0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #ifndef HPSA_H
0021 #define HPSA_H
0022
0023 #include <scsi/scsicam.h>
0024
0025 #define IO_OK 0
0026 #define IO_ERROR 1
0027
0028 struct ctlr_info;
0029
0030 struct access_method {
0031 void (*submit_command)(struct ctlr_info *h,
0032 struct CommandList *c);
0033 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
0034 bool (*intr_pending)(struct ctlr_info *h);
0035 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
0036 };
0037
0038
0039 struct hpsa_sas_node {
0040 struct device *parent_dev;
0041 struct list_head port_list_head;
0042 };
0043
0044 struct hpsa_sas_port {
0045 struct list_head port_list_entry;
0046 u64 sas_address;
0047 struct sas_port *port;
0048 int next_phy_index;
0049 struct list_head phy_list_head;
0050 struct hpsa_sas_node *parent_node;
0051 struct sas_rphy *rphy;
0052 };
0053
0054 struct hpsa_sas_phy {
0055 struct list_head phy_list_entry;
0056 struct sas_phy *phy;
0057 struct hpsa_sas_port *parent_port;
0058 bool added_to_port;
0059 };
0060
0061 #define EXTERNAL_QD 128
0062 struct hpsa_scsi_dev_t {
0063 unsigned int devtype;
0064 int bus, target, lun;
0065 unsigned char scsi3addr[8];
0066 u8 physical_device : 1;
0067 u8 expose_device;
0068 u8 removed : 1;
0069 u8 was_removed : 1;
0070 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
0071 unsigned char device_id[16];
0072 u64 sas_address;
0073 u64 eli;
0074 unsigned char vendor[8];
0075 unsigned char model[16];
0076 unsigned char rev;
0077 unsigned char raid_level;
0078 unsigned char volume_offline;
0079 u16 queue_depth;
0080 atomic_t commands_outstanding;
0081 atomic_t ioaccel_cmds_out;
0082
0083
0084
0085 bool in_reset;
0086 u32 ioaccel_handle;
0087 u8 active_path_index;
0088 u8 path_map;
0089 u8 bay;
0090 u8 box[8];
0091 u16 phys_connector[8];
0092 int offload_config;
0093 int offload_enabled;
0094 int offload_to_be_enabled;
0095 int hba_ioaccel_enabled;
0096 int offload_to_mirror;
0097
0098
0099 struct raid_map_data raid_map;
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
0110 int nphysical_disks;
0111 int supports_aborts;
0112 struct hpsa_sas_port *sas_port;
0113 int external;
0114 };
0115
0116 struct reply_queue_buffer {
0117 u64 *head;
0118 size_t size;
0119 u8 wraparound;
0120 u32 current_entry;
0121 dma_addr_t busaddr;
0122 };
0123
0124 #pragma pack(1)
0125 struct bmic_controller_parameters {
0126 u8 led_flags;
0127 u8 enable_command_list_verification;
0128 u8 backed_out_write_drives;
0129 u16 stripes_for_parity;
0130 u8 parity_distribution_mode_flags;
0131 u16 max_driver_requests;
0132 u16 elevator_trend_count;
0133 u8 disable_elevator;
0134 u8 force_scan_complete;
0135 u8 scsi_transfer_mode;
0136 u8 force_narrow;
0137 u8 rebuild_priority;
0138 u8 expand_priority;
0139 u8 host_sdb_asic_fix;
0140 u8 pdpi_burst_from_host_disabled;
0141 char software_name[64];
0142 char hardware_name[32];
0143 u8 bridge_revision;
0144 u8 snapshot_priority;
0145 u32 os_specific;
0146 u8 post_prompt_timeout;
0147 u8 automatic_drive_slamming;
0148 u8 reserved1;
0149 u8 nvram_flags;
0150 u8 cache_nvram_flags;
0151 u8 drive_config_flags;
0152 u16 reserved2;
0153 u8 temp_warning_level;
0154 u8 temp_shutdown_level;
0155 u8 temp_condition_reset;
0156 u8 max_coalesce_commands;
0157 u32 max_coalesce_delay;
0158 u8 orca_password[4];
0159 u8 access_id[16];
0160 u8 reserved[356];
0161 };
0162 #pragma pack()
0163
0164 struct ctlr_info {
0165 unsigned int *reply_map;
0166 int ctlr;
0167 char devname[8];
0168 char *product_name;
0169 struct pci_dev *pdev;
0170 u32 board_id;
0171 u64 sas_address;
0172 void __iomem *vaddr;
0173 unsigned long paddr;
0174 int nr_cmds;
0175 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
0176 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
0177 struct CfgTable __iomem *cfgtable;
0178 int interrupts_enabled;
0179 int max_commands;
0180 int last_collision_tag;
0181 atomic_t commands_outstanding;
0182 # define PERF_MODE_INT 0
0183 # define DOORBELL_INT 1
0184 # define SIMPLE_MODE_INT 2
0185 # define MEMQ_MODE_INT 3
0186 unsigned int msix_vectors;
0187 int intr_mode;
0188 struct access_method access;
0189
0190
0191 unsigned int Qdepth;
0192 unsigned int maxSG;
0193 spinlock_t lock;
0194 int maxsgentries;
0195 u8 max_cmd_sg_entries;
0196 int chainsize;
0197 struct SGDescriptor **cmd_sg_list;
0198 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
0199
0200
0201 struct CommandList *cmd_pool;
0202 dma_addr_t cmd_pool_dhandle;
0203 struct io_accel1_cmd *ioaccel_cmd_pool;
0204 dma_addr_t ioaccel_cmd_pool_dhandle;
0205 struct io_accel2_cmd *ioaccel2_cmd_pool;
0206 dma_addr_t ioaccel2_cmd_pool_dhandle;
0207 struct ErrorInfo *errinfo_pool;
0208 dma_addr_t errinfo_pool_dhandle;
0209 unsigned long *cmd_pool_bits;
0210 int scan_finished;
0211 u8 scan_waiting : 1;
0212 spinlock_t scan_lock;
0213 wait_queue_head_t scan_wait_queue;
0214
0215 struct Scsi_Host *scsi_host;
0216 spinlock_t devlock;
0217 int ndevices;
0218 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
0219
0220
0221
0222 u32 trans_support;
0223 u32 trans_offset;
0224 struct TransTable_struct __iomem *transtable;
0225 unsigned long transMethod;
0226
0227
0228 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
0229 atomic_t passthru_cmds_avail;
0230
0231
0232
0233
0234 size_t reply_queue_size;
0235 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
0236 u8 nreply_queues;
0237 u32 *blockFetchTable;
0238 u32 *ioaccel1_blockFetchTable;
0239 u32 *ioaccel2_blockFetchTable;
0240 u32 __iomem *ioaccel2_bft2_regs;
0241 unsigned char *hba_inquiry_data;
0242 u32 driver_support;
0243 u32 fw_support;
0244 int ioaccel_support;
0245 int ioaccel_maxsg;
0246 u64 last_intr_timestamp;
0247 u32 last_heartbeat;
0248 u64 last_heartbeat_timestamp;
0249 u32 heartbeat_sample_interval;
0250 atomic_t firmware_flash_in_progress;
0251 u32 __percpu *lockup_detected;
0252 struct delayed_work monitor_ctlr_work;
0253 struct delayed_work rescan_ctlr_work;
0254 struct delayed_work event_monitor_work;
0255 int remove_in_progress;
0256
0257 u8 q[MAX_REPLY_QUEUES];
0258 char intrname[MAX_REPLY_QUEUES][16];
0259 u32 TMFSupportFlags;
0260 #define HPSATMF_BITS_SUPPORTED (1 << 0)
0261 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
0262 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
0263 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
0264 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
0265 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
0266 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
0267 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
0268 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
0269 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
0270 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
0271 #define HPSATMF_MASK_SUPPORTED (1 << 16)
0272 #define HPSATMF_LOG_LUN_RESET (1 << 17)
0273 #define HPSATMF_LOG_NEX_RESET (1 << 18)
0274 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
0275 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
0276 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
0277 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
0278 #define HPSATMF_LOG_QRY_TASK (1 << 23)
0279 #define HPSATMF_LOG_QRY_TSET (1 << 24)
0280 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
0281 u32 events;
0282 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
0283 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
0284 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
0285 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
0286 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
0287 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
0288 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
0289
0290 #define RESCAN_REQUIRED_EVENT_BITS \
0291 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
0292 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
0293 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
0294 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
0295 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
0296 spinlock_t offline_device_lock;
0297 struct list_head offline_device_list;
0298 int acciopath_status;
0299 int drv_req_rescan;
0300 int raid_offload_debug;
0301 int discovery_polling;
0302 int legacy_board;
0303 struct ReportLUNdata *lastlogicals;
0304 int needs_abort_tags_swizzled;
0305 struct workqueue_struct *resubmit_wq;
0306 struct workqueue_struct *rescan_ctlr_wq;
0307 struct workqueue_struct *monitor_ctlr_wq;
0308 atomic_t abort_cmds_available;
0309 wait_queue_head_t event_sync_wait_queue;
0310 struct mutex reset_mutex;
0311 u8 reset_in_progress;
0312 struct hpsa_sas_node *sas_host;
0313 spinlock_t reset_lock;
0314 };
0315
0316 struct offline_device_entry {
0317 unsigned char scsi3addr[8];
0318 struct list_head offline_list;
0319 };
0320
0321 #define HPSA_ABORT_MSG 0
0322 #define HPSA_DEVICE_RESET_MSG 1
0323 #define HPSA_RESET_TYPE_CONTROLLER 0x00
0324 #define HPSA_RESET_TYPE_BUS 0x01
0325 #define HPSA_RESET_TYPE_LUN 0x04
0326 #define HPSA_PHYS_TARGET_RESET 0x99
0327 #define HPSA_MSG_SEND_RETRY_LIMIT 10
0328 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
0329
0330
0331
0332
0333 #define HPSA_MAX_POLL_TIME_SECS (20)
0334
0335
0336
0337
0338
0339
0340
0341
0342 #define HPSA_TUR_RETRY_LIMIT (20)
0343 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
0344
0345
0346
0347
0348
0349
0350
0351
0352 #define HPSA_BOARD_READY_WAIT_SECS (120)
0353 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
0354 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
0355 #define HPSA_BOARD_READY_POLL_INTERVAL \
0356 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
0357 #define HPSA_BOARD_READY_ITERATIONS \
0358 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
0359 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
0360 #define HPSA_BOARD_NOT_READY_ITERATIONS \
0361 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
0362 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
0363 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
0364 #define HPSA_POST_RESET_NOOP_RETRIES (12)
0365
0366
0367
0368
0369
0370 #define SA5_DOORBELL 0x20
0371 #define SA5_REQUEST_PORT_OFFSET 0x40
0372 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
0373 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
0374 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
0375 #define SA5_REPLY_PORT_OFFSET 0x44
0376 #define SA5_INTR_STATUS 0x30
0377 #define SA5_SCRATCHPAD_OFFSET 0xB0
0378
0379 #define SA5_CTCFG_OFFSET 0xB4
0380 #define SA5_CTMEM_OFFSET 0xB8
0381
0382 #define SA5_INTR_OFF 0x08
0383 #define SA5B_INTR_OFF 0x04
0384 #define SA5_INTR_PENDING 0x08
0385 #define SA5B_INTR_PENDING 0x04
0386 #define FIFO_EMPTY 0xffffffff
0387 #define HPSA_FIRMWARE_READY 0xffff0000
0388
0389 #define HPSA_ERROR_BIT 0x02
0390
0391
0392 #define SA5_PERF_INTR_PENDING 0x04
0393 #define SA5_PERF_INTR_OFF 0x05
0394 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
0395 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
0396 #define SA5_OUTDB_CLEAR 0xA0
0397 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
0398 #define SA5_OUTDB_STATUS 0x9C
0399
0400
0401 #define HPSA_INTR_ON 1
0402 #define HPSA_INTR_OFF 0
0403
0404
0405
0406
0407 #define IOACCEL2_INBOUND_POSTQ_32 0x48
0408 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
0409 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
0410
0411 #define HPSA_PHYSICAL_DEVICE_BUS 0
0412 #define HPSA_RAID_VOLUME_BUS 1
0413 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
0414 #define HPSA_HBA_BUS 0
0415 #define HPSA_LEGACY_HBA_BUS 3
0416
0417
0418
0419
0420 static void SA5_submit_command(struct ctlr_info *h,
0421 struct CommandList *c)
0422 {
0423 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
0424 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
0425 }
0426
0427 static void SA5_submit_command_no_read(struct ctlr_info *h,
0428 struct CommandList *c)
0429 {
0430 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
0431 }
0432
0433 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
0434 struct CommandList *c)
0435 {
0436 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
0437 }
0438
0439
0440
0441
0442
0443
0444 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
0445 {
0446 if (val) {
0447 h->interrupts_enabled = 1;
0448 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0449 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0450 } else {
0451 h->interrupts_enabled = 0;
0452 writel(SA5_INTR_OFF,
0453 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0454 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0455 }
0456 }
0457
0458
0459
0460
0461 static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
0462 {
0463 if (val) {
0464 h->interrupts_enabled = 1;
0465 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0466 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0467 } else {
0468 h->interrupts_enabled = 0;
0469 writel(SA5B_INTR_OFF,
0470 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0471 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0472 }
0473 }
0474
0475 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
0476 {
0477 if (val) {
0478 h->interrupts_enabled = 1;
0479 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0480 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0481 } else {
0482 h->interrupts_enabled = 0;
0483 writel(SA5_PERF_INTR_OFF,
0484 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0485 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
0486 }
0487 }
0488
0489 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
0490 {
0491 struct reply_queue_buffer *rq = &h->reply_queue[q];
0492 unsigned long register_value = FIFO_EMPTY;
0493
0494
0495 if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
0496
0497
0498
0499 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
0500 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
0501
0502
0503
0504 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
0505 }
0506
0507 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
0508 register_value = rq->head[rq->current_entry];
0509 rq->current_entry++;
0510 atomic_dec(&h->commands_outstanding);
0511 } else {
0512 register_value = FIFO_EMPTY;
0513 }
0514
0515 if (rq->current_entry == h->max_commands) {
0516 rq->current_entry = 0;
0517 rq->wraparound ^= 1;
0518 }
0519 return register_value;
0520 }
0521
0522
0523
0524
0525
0526 static unsigned long SA5_completed(struct ctlr_info *h,
0527 __attribute__((unused)) u8 q)
0528 {
0529 unsigned long register_value
0530 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
0531
0532 if (register_value != FIFO_EMPTY)
0533 atomic_dec(&h->commands_outstanding);
0534
0535 #ifdef HPSA_DEBUG
0536 if (register_value != FIFO_EMPTY)
0537 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
0538 register_value);
0539 else
0540 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
0541 #endif
0542
0543 return register_value;
0544 }
0545
0546
0547
0548 static bool SA5_intr_pending(struct ctlr_info *h)
0549 {
0550 unsigned long register_value =
0551 readl(h->vaddr + SA5_INTR_STATUS);
0552 return register_value & SA5_INTR_PENDING;
0553 }
0554
0555 static bool SA5_performant_intr_pending(struct ctlr_info *h)
0556 {
0557 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
0558
0559 if (!register_value)
0560 return false;
0561
0562
0563 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
0564 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
0565 }
0566
0567 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
0568
0569 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
0570 {
0571 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
0572
0573 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
0574 true : false;
0575 }
0576
0577
0578
0579
0580 static bool SA5B_intr_pending(struct ctlr_info *h)
0581 {
0582 return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
0583 }
0584
0585 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
0586 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
0587 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
0588 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
0589
0590 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
0591 {
0592 u64 register_value;
0593 struct reply_queue_buffer *rq = &h->reply_queue[q];
0594
0595 BUG_ON(q >= h->nreply_queues);
0596
0597 register_value = rq->head[rq->current_entry];
0598 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
0599 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
0600 if (++rq->current_entry == rq->size)
0601 rq->current_entry = 0;
0602
0603
0604
0605
0606
0607
0608 wmb();
0609 writel((q << 24) | rq->current_entry, h->vaddr +
0610 IOACCEL_MODE1_CONSUMER_INDEX);
0611 atomic_dec(&h->commands_outstanding);
0612 }
0613 return (unsigned long) register_value;
0614 }
0615
0616 static struct access_method SA5_access = {
0617 .submit_command = SA5_submit_command,
0618 .set_intr_mask = SA5_intr_mask,
0619 .intr_pending = SA5_intr_pending,
0620 .command_completed = SA5_completed,
0621 };
0622
0623
0624 static struct access_method SA5A_access = {
0625 .submit_command = SA5_submit_command,
0626 .set_intr_mask = SA5_intr_mask,
0627 .intr_pending = SA5_intr_pending,
0628 .command_completed = SA5_completed,
0629 };
0630
0631 static struct access_method SA5B_access = {
0632 .submit_command = SA5_submit_command,
0633 .set_intr_mask = SA5B_intr_mask,
0634 .intr_pending = SA5B_intr_pending,
0635 .command_completed = SA5_completed,
0636 };
0637
0638 static struct access_method SA5_ioaccel_mode1_access = {
0639 .submit_command = SA5_submit_command,
0640 .set_intr_mask = SA5_performant_intr_mask,
0641 .intr_pending = SA5_ioaccel_mode1_intr_pending,
0642 .command_completed = SA5_ioaccel_mode1_completed,
0643 };
0644
0645 static struct access_method SA5_ioaccel_mode2_access = {
0646 .submit_command = SA5_submit_command_ioaccel2,
0647 .set_intr_mask = SA5_performant_intr_mask,
0648 .intr_pending = SA5_performant_intr_pending,
0649 .command_completed = SA5_performant_completed,
0650 };
0651
0652 static struct access_method SA5_performant_access = {
0653 .submit_command = SA5_submit_command,
0654 .set_intr_mask = SA5_performant_intr_mask,
0655 .intr_pending = SA5_performant_intr_pending,
0656 .command_completed = SA5_performant_completed,
0657 };
0658
0659 static struct access_method SA5_performant_access_no_read = {
0660 .submit_command = SA5_submit_command_no_read,
0661 .set_intr_mask = SA5_performant_intr_mask,
0662 .intr_pending = SA5_performant_intr_pending,
0663 .command_completed = SA5_performant_completed,
0664 };
0665
0666 struct board_type {
0667 u32 board_id;
0668 char *product_name;
0669 struct access_method *access;
0670 };
0671
0672 #endif
0673