0001
0002 #ifndef __CARD_BASE_H__
0003 #define __CARD_BASE_H__
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/kernel.h>
0022 #include <linux/types.h>
0023 #include <linux/cdev.h>
0024 #include <linux/stringify.h>
0025 #include <linux/pci.h>
0026 #include <linux/semaphore.h>
0027 #include <linux/uaccess.h>
0028 #include <linux/io.h>
0029 #include <linux/debugfs.h>
0030 #include <linux/slab.h>
0031
0032 #include <linux/genwqe/genwqe_card.h>
0033 #include "genwqe_driver.h"
0034
0035 #define GENWQE_MSI_IRQS 4
0036
0037 #define GENWQE_MAX_VFS 15
0038 #define GENWQE_MAX_FUNCS 16
0039 #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
0040
0041
0042 #define GENWQE_DDCB_MAX 32
0043 #define GENWQE_POLLING_ENABLED 0
0044 #define GENWQE_DDCB_SOFTWARE_TIMEOUT 10
0045 #define GENWQE_KILL_TIMEOUT 8
0046 #define GENWQE_VF_JOBTIMEOUT_MSEC 250
0047 #define GENWQE_PF_JOBTIMEOUT_MSEC 8000
0048 #define GENWQE_HEALTH_CHECK_INTERVAL 4
0049
0050
0051 extern const struct attribute_group *genwqe_attribute_groups[];
0052
0053
0054
0055
0056
0057
0058
0059
0060 #define PCI_DEVICE_GENWQE 0x044b
0061
0062 #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f
0063 #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b
0064 #define PCI_CLASSCODE_GENWQE5 0x1200
0065
0066 #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
0067 #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000
0068 #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200
0069
0070 #define GENWQE_SLU_ARCH_REQ 2
0071
0072
0073
0074
0075 struct genwqe_reg {
0076 u32 addr;
0077 u32 idx;
0078 u64 val;
0079 };
0080
0081
0082
0083
0084 enum genwqe_dbg_type {
0085 GENWQE_DBG_UNIT0 = 0,
0086 GENWQE_DBG_UNIT1 = 1,
0087 GENWQE_DBG_UNIT2 = 2,
0088 GENWQE_DBG_UNIT3 = 3,
0089 GENWQE_DBG_UNIT4 = 4,
0090 GENWQE_DBG_UNIT5 = 5,
0091 GENWQE_DBG_UNIT6 = 6,
0092 GENWQE_DBG_UNIT7 = 7,
0093 GENWQE_DBG_REGS = 8,
0094 GENWQE_DBG_DMA = 9,
0095 GENWQE_DBG_UNITS = 10,
0096 };
0097
0098
0099 #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001
0100 #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002
0101 #define GENWQE_INJECT_GFIR_FATAL 0x00000004
0102 #define GENWQE_INJECT_GFIR_INFO 0x00000008
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154 enum dma_mapping_type {
0155 GENWQE_MAPPING_RAW = 0,
0156 GENWQE_MAPPING_SGL_TEMP,
0157 GENWQE_MAPPING_SGL_PINNED,
0158 };
0159
0160
0161
0162
0163 struct dma_mapping {
0164 enum dma_mapping_type type;
0165
0166 void *u_vaddr;
0167 void *k_vaddr;
0168 dma_addr_t dma_addr;
0169
0170 struct page **page_list;
0171 dma_addr_t *dma_list;
0172 unsigned int nr_pages;
0173 unsigned int size;
0174
0175 struct list_head card_list;
0176 struct list_head pin_list;
0177 int write;
0178 };
0179
0180 static inline void genwqe_mapping_init(struct dma_mapping *m,
0181 enum dma_mapping_type type)
0182 {
0183 memset(m, 0, sizeof(*m));
0184 m->type = type;
0185 m->write = 1;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 struct ddcb_queue {
0207 int ddcb_max;
0208 int ddcb_next;
0209 int ddcb_act;
0210 u16 ddcb_seq;
0211 unsigned int ddcbs_in_flight;
0212 unsigned int ddcbs_completed;
0213 unsigned int ddcbs_max_in_flight;
0214 unsigned int return_on_busy;
0215 unsigned int wait_on_busy;
0216
0217 dma_addr_t ddcb_daddr;
0218 struct ddcb *ddcb_vaddr;
0219 struct ddcb_requ **ddcb_req;
0220 wait_queue_head_t *ddcb_waitqs;
0221
0222 spinlock_t ddcb_lock;
0223 wait_queue_head_t busy_waitq;
0224
0225
0226 u32 IO_QUEUE_CONFIG;
0227 u32 IO_QUEUE_STATUS;
0228 u32 IO_QUEUE_SEGMENT;
0229 u32 IO_QUEUE_INITSQN;
0230 u32 IO_QUEUE_WRAP;
0231 u32 IO_QUEUE_OFFSET;
0232 u32 IO_QUEUE_WTIME;
0233 u32 IO_QUEUE_ERRCNTS;
0234 u32 IO_QUEUE_LRW;
0235 };
0236
0237
0238
0239
0240
0241 #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
0242
0243 struct genwqe_ffdc {
0244 unsigned int entries;
0245 struct genwqe_reg *regs;
0246 };
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 struct genwqe_dev {
0269 enum genwqe_card_state card_state;
0270 spinlock_t print_lock;
0271
0272 int card_idx;
0273 u64 flags;
0274
0275
0276 struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
0277
0278
0279 struct task_struct *card_thread;
0280 wait_queue_head_t queue_waitq;
0281 struct ddcb_queue queue;
0282 unsigned int irqs_processed;
0283
0284
0285 struct task_struct *health_thread;
0286 wait_queue_head_t health_waitq;
0287
0288 int use_platform_recovery;
0289
0290
0291 dev_t devnum_genwqe;
0292 struct class *class_genwqe;
0293 struct device *dev;
0294 struct cdev cdev_genwqe;
0295
0296 struct dentry *debugfs_root;
0297 struct dentry *debugfs_genwqe;
0298
0299
0300 struct pci_dev *pci_dev;
0301 void __iomem *mmio;
0302 unsigned long mmio_len;
0303 int num_vfs;
0304 u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
0305 int is_privileged;
0306
0307
0308 u64 slu_unitcfg;
0309 u64 app_unitcfg;
0310 u64 softreset;
0311 u64 err_inject;
0312 u64 last_gfir;
0313 char app_name[5];
0314
0315 spinlock_t file_lock;
0316 struct list_head file_list;
0317
0318
0319 int ddcb_software_timeout;
0320 int skip_recovery;
0321 int kill_timeout;
0322 };
0323
0324
0325
0326
0327 enum genwqe_requ_state {
0328 GENWQE_REQU_NEW = 0,
0329 GENWQE_REQU_ENQUEUED = 1,
0330 GENWQE_REQU_TAPPED = 2,
0331 GENWQE_REQU_FINISHED = 3,
0332 GENWQE_REQU_STATE_MAX,
0333 };
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346 struct genwqe_sgl {
0347 dma_addr_t sgl_dma_addr;
0348 struct sg_entry *sgl;
0349 size_t sgl_size;
0350
0351 void __user *user_addr;
0352 size_t user_size;
0353
0354 int write;
0355
0356 unsigned long nr_pages;
0357 unsigned long fpage_offs;
0358 size_t fpage_size;
0359 size_t lpage_size;
0360
0361 void *fpage;
0362 dma_addr_t fpage_dma_addr;
0363
0364 void *lpage;
0365 dma_addr_t lpage_dma_addr;
0366 };
0367
0368 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
0369 void __user *user_addr, size_t user_size, int write);
0370
0371 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
0372 dma_addr_t *dma_list);
0373
0374 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
0375
0376
0377
0378
0379
0380 struct ddcb_requ {
0381
0382 enum genwqe_requ_state req_state;
0383 int num;
0384 struct ddcb_queue *queue;
0385
0386 struct dma_mapping dma_mappings[DDCB_FIXUPS];
0387 struct genwqe_sgl sgls[DDCB_FIXUPS];
0388
0389
0390 struct genwqe_ddcb_cmd cmd;
0391 struct genwqe_debug_data debug_data;
0392 };
0393
0394
0395
0396
0397 struct genwqe_file {
0398 struct genwqe_dev *cd;
0399 struct genwqe_driver *client;
0400 struct file *filp;
0401
0402 struct fasync_struct *async_queue;
0403 struct pid *opener;
0404 struct list_head list;
0405
0406 spinlock_t map_lock;
0407 struct list_head map_list;
0408
0409 spinlock_t pin_lock;
0410 struct list_head pin_list;
0411 };
0412
0413 int genwqe_setup_service_layer(struct genwqe_dev *cd);
0414 int genwqe_finish_queue(struct genwqe_dev *cd);
0415 int genwqe_release_service_layer(struct genwqe_dev *cd);
0416
0417
0418
0419
0420
0421
0422
0423
0424 static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
0425 {
0426 return (int)((cd->slu_unitcfg >> 32) & 0xff);
0427 }
0428
0429 int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
0430
0431 u8 genwqe_card_type(struct genwqe_dev *cd);
0432 int genwqe_card_reset(struct genwqe_dev *cd);
0433 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
0434 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
0435
0436 int genwqe_device_create(struct genwqe_dev *cd);
0437 int genwqe_device_remove(struct genwqe_dev *cd);
0438
0439
0440 void genwqe_init_debugfs(struct genwqe_dev *cd);
0441 void genqwe_exit_debugfs(struct genwqe_dev *cd);
0442
0443 int genwqe_read_softreset(struct genwqe_dev *cd);
0444
0445
0446 int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
0447 int genwqe_flash_readback_fails(struct genwqe_dev *cd);
0448
0449
0450
0451
0452
0453
0454
0455
0456 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
0467
0468
0469 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
0470 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
0471 struct genwqe_reg *regs, unsigned int max_regs);
0472 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
0473 unsigned int max_regs, int all);
0474 int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
0475 struct genwqe_reg *regs, unsigned int max_regs);
0476
0477 int genwqe_init_debug_data(struct genwqe_dev *cd,
0478 struct genwqe_debug_data *d);
0479
0480 void genwqe_init_crc32(void);
0481 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
0482
0483
0484 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
0485 void *uaddr, unsigned long size);
0486
0487 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
0488
0489 static inline bool dma_mapping_used(struct dma_mapping *m)
0490 {
0491 if (!m)
0492 return false;
0493 return m->size != 0;
0494 }
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 int __genwqe_execute_ddcb(struct genwqe_dev *cd,
0506 struct genwqe_ddcb_cmd *cmd, unsigned int f_flags);
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
0518 struct genwqe_ddcb_cmd *cmd,
0519 unsigned int f_flags);
0520 int __genwqe_enqueue_ddcb(struct genwqe_dev *cd,
0521 struct ddcb_requ *req,
0522 unsigned int f_flags);
0523
0524 int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
0525 int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
0526
0527
0528 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
0529 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
0530 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
0531 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
0532
0533 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
0534 dma_addr_t *dma_handle);
0535 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
0536 void *vaddr, dma_addr_t dma_handle);
0537
0538
0539 int genwqe_base_clock_frequency(struct genwqe_dev *cd);
0540
0541
0542 void genwqe_stop_traps(struct genwqe_dev *cd);
0543 void genwqe_start_traps(struct genwqe_dev *cd);
0544
0545
0546 bool genwqe_need_err_masking(struct genwqe_dev *cd);
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 static inline int genwqe_is_privileged(struct genwqe_dev *cd)
0573 {
0574 return cd->is_privileged;
0575 }
0576
0577 #endif