Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * CXL Flash Device Driver
0004  *
0005  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
0006  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
0007  *
0008  * Copyright (C) 2015 IBM Corporation
0009  */
0010 
0011 #ifndef _CXLFLASH_COMMON_H
0012 #define _CXLFLASH_COMMON_H
0013 
0014 #include <linux/async.h>
0015 #include <linux/cdev.h>
0016 #include <linux/irq_poll.h>
0017 #include <linux/list.h>
0018 #include <linux/rwsem.h>
0019 #include <linux/types.h>
0020 #include <scsi/scsi.h>
0021 #include <scsi/scsi_cmnd.h>
0022 #include <scsi/scsi_device.h>
0023 
0024 #include "backend.h"
0025 
0026 extern const struct file_operations cxlflash_cxl_fops;
0027 
0028 #define MAX_CONTEXT CXLFLASH_MAX_CONTEXT    /* num contexts per afu */
0029 #define MAX_FC_PORTS    CXLFLASH_MAX_FC_PORTS   /* max ports per AFU */
0030 #define LEGACY_FC_PORTS 2           /* legacy ports per AFU */
0031 
0032 #define CHAN2PORTBANK(_x)   ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
0033 #define CHAN2BANKPORT(_x)   ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
0034 
0035 #define CHAN2PORTMASK(_x)   (1 << (_x)) /* channel to port mask */
0036 #define PORTMASK2CHAN(_x)   (ilog2((_x)))   /* port mask to channel */
0037 #define PORTNUM2CHAN(_x)    ((_x) - 1)  /* port number to channel */
0038 
0039 #define CXLFLASH_BLOCK_SIZE 4096        /* 4K blocks */
0040 #define CXLFLASH_MAX_XFER_SIZE  16777216    /* 16MB transfer */
0041 #define CXLFLASH_MAX_SECTORS    (CXLFLASH_MAX_XFER_SIZE/512)    /* SCSI wants
0042                                  * max_sectors
0043                                  * in units of
0044                                  * 512 byte
0045                                  * sectors
0046                                  */
0047 
0048 #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
0049 
0050 /* AFU command retry limit */
0051 #define MC_RETRY_CNT    5   /* Sufficient for SCSI and certain AFU errors */
0052 
0053 /* Command management definitions */
0054 #define CXLFLASH_MAX_CMDS               256
0055 #define CXLFLASH_MAX_CMDS_PER_LUN       CXLFLASH_MAX_CMDS
0056 
0057 /* RRQ for master issued cmds */
0058 #define NUM_RRQ_ENTRY                   CXLFLASH_MAX_CMDS
0059 
0060 /* SQ for master issued cmds */
0061 #define NUM_SQ_ENTRY            CXLFLASH_MAX_CMDS
0062 
0063 /* Hardware queue definitions */
0064 #define CXLFLASH_DEF_HWQS       1
0065 #define CXLFLASH_MAX_HWQS       8
0066 #define PRIMARY_HWQ         0
0067 
0068 
0069 static inline void check_sizes(void)
0070 {
0071     BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
0072     BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
0073 }
0074 
0075 /* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
0076 #define CMD_BUFSIZE     SIZE_4K
0077 
0078 enum cxlflash_lr_state {
0079     LINK_RESET_INVALID,
0080     LINK_RESET_REQUIRED,
0081     LINK_RESET_COMPLETE
0082 };
0083 
0084 enum cxlflash_init_state {
0085     INIT_STATE_NONE,
0086     INIT_STATE_PCI,
0087     INIT_STATE_AFU,
0088     INIT_STATE_SCSI,
0089     INIT_STATE_CDEV
0090 };
0091 
0092 enum cxlflash_state {
0093     STATE_PROBING,  /* Initial state during probe */
0094     STATE_PROBED,   /* Temporary state, probe completed but EEH occurred */
0095     STATE_NORMAL,   /* Normal running state, everything good */
0096     STATE_RESET,    /* Reset state, trying to reset/recover */
0097     STATE_FAILTERM  /* Failed/terminating state, error out users/threads */
0098 };
0099 
0100 enum cxlflash_hwq_mode {
0101     HWQ_MODE_RR,    /* Roundrobin (default) */
0102     HWQ_MODE_TAG,   /* Distribute based on block MQ tag */
0103     HWQ_MODE_CPU,   /* CPU affinity */
0104     MAX_HWQ_MODE
0105 };
0106 
0107 /*
0108  * Each context has its own set of resource handles that is visible
0109  * only from that context.
0110  */
0111 
0112 struct cxlflash_cfg {
0113     struct afu *afu;
0114 
0115     const struct cxlflash_backend_ops *ops;
0116     struct pci_dev *dev;
0117     struct pci_device_id *dev_id;
0118     struct Scsi_Host *host;
0119     int num_fc_ports;
0120     struct cdev cdev;
0121     struct device *chardev;
0122 
0123     ulong cxlflash_regs_pci;
0124 
0125     struct work_struct work_q;
0126     enum cxlflash_init_state init_state;
0127     enum cxlflash_lr_state lr_state;
0128     int lr_port;
0129     atomic_t scan_host_needed;
0130 
0131     void *afu_cookie;
0132 
0133     atomic_t recovery_threads;
0134     struct mutex ctx_recovery_mutex;
0135     struct mutex ctx_tbl_list_mutex;
0136     struct rw_semaphore ioctl_rwsem;
0137     struct ctx_info *ctx_tbl[MAX_CONTEXT];
0138     struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
0139     struct file_operations cxl_fops;
0140 
0141     /* Parameters that are LUN table related */
0142     int last_lun_index[MAX_FC_PORTS];
0143     int promote_lun_index;
0144     struct list_head lluns; /* list of llun_info structs */
0145 
0146     wait_queue_head_t tmf_waitq;
0147     spinlock_t tmf_slock;
0148     bool tmf_active;
0149     bool ws_unmap;      /* Write-same unmap supported */
0150     wait_queue_head_t reset_waitq;
0151     enum cxlflash_state state;
0152     async_cookie_t async_reset_cookie;
0153 };
0154 
0155 struct afu_cmd {
0156     struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
0157     struct sisl_ioasa sa;   /* IOASA must follow IOARCB */
0158     struct afu *parent;
0159     struct scsi_cmnd *scp;
0160     struct completion cevent;
0161     struct list_head queue;
0162     u32 hwq_index;
0163 
0164     u8 cmd_tmf:1,
0165        cmd_aborted:1;
0166 
0167     struct list_head list;  /* Pending commands link */
0168 
0169     /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
0170      * However for performance reasons the IOARCB/IOASA should be
0171      * cache line aligned.
0172      */
0173 } __aligned(cache_line_size());
0174 
0175 static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
0176 {
0177     return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
0178 }
0179 
0180 static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
0181 {
0182     struct afu_cmd *afuc = sc_to_afuc(sc);
0183 
0184     INIT_LIST_HEAD(&afuc->queue);
0185     return afuc;
0186 }
0187 
0188 static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
0189 {
0190     struct afu_cmd *afuc = sc_to_afuc(sc);
0191 
0192     memset(afuc, 0, sizeof(*afuc));
0193     return sc_to_afuci(sc);
0194 }
0195 
0196 struct hwq {
0197     /* Stuff requiring alignment go first. */
0198     struct sisl_ioarcb sq[NUM_SQ_ENTRY];        /* 16K SQ */
0199     u64 rrq_entry[NUM_RRQ_ENTRY];           /* 2K RRQ */
0200 
0201     /* Beware of alignment till here. Preferably introduce new
0202      * fields after this point
0203      */
0204     struct afu *afu;
0205     void *ctx_cookie;
0206     struct sisl_host_map __iomem *host_map;     /* MC host map */
0207     struct sisl_ctrl_map __iomem *ctrl_map;     /* MC control map */
0208     ctx_hndl_t ctx_hndl;    /* master's context handle */
0209     u32 index;      /* Index of this hwq */
0210     int num_irqs;       /* Number of interrupts requested for context */
0211     struct list_head pending_cmds;  /* Commands pending completion */
0212 
0213     atomic_t hsq_credits;
0214     spinlock_t hsq_slock;   /* Hardware send queue lock */
0215     struct sisl_ioarcb *hsq_start;
0216     struct sisl_ioarcb *hsq_end;
0217     struct sisl_ioarcb *hsq_curr;
0218     spinlock_t hrrq_slock;
0219     u64 *hrrq_start;
0220     u64 *hrrq_end;
0221     u64 *hrrq_curr;
0222     bool toggle;
0223     bool hrrq_online;
0224 
0225     s64 room;
0226 
0227     struct irq_poll irqpoll;
0228 } __aligned(cache_line_size());
0229 
0230 struct afu {
0231     struct hwq hwqs[CXLFLASH_MAX_HWQS];
0232     int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
0233     int (*context_reset)(struct hwq *hwq);
0234 
0235     /* AFU HW */
0236     struct cxlflash_afu_map __iomem *afu_map;   /* entire MMIO map */
0237 
0238     atomic_t cmds_active;   /* Number of currently active AFU commands */
0239     struct mutex sync_active;   /* Mutex to serialize AFU commands */
0240     u64 hb;
0241     u32 internal_lun;   /* User-desired LUN mode for this AFU */
0242 
0243     u32 num_hwqs;       /* Number of hardware queues */
0244     u32 desired_hwqs;   /* Desired h/w queues, effective on AFU reset */
0245     enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
0246     u32 hwq_rr_count;   /* Count to distribute traffic for roundrobin */
0247 
0248     char version[16];
0249     u64 interface_version;
0250 
0251     u32 irqpoll_weight;
0252     struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
0253 };
0254 
0255 static inline struct hwq *get_hwq(struct afu *afu, u32 index)
0256 {
0257     WARN_ON(index >= CXLFLASH_MAX_HWQS);
0258 
0259     return &afu->hwqs[index];
0260 }
0261 
0262 static inline bool afu_is_irqpoll_enabled(struct afu *afu)
0263 {
0264     return !!afu->irqpoll_weight;
0265 }
0266 
0267 static inline bool afu_has_cap(struct afu *afu, u64 cap)
0268 {
0269     u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
0270 
0271     return afu_cap & cap;
0272 }
0273 
0274 static inline bool afu_is_ocxl_lisn(struct afu *afu)
0275 {
0276     return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
0277 }
0278 
0279 static inline bool afu_is_afu_debug(struct afu *afu)
0280 {
0281     return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
0282 }
0283 
0284 static inline bool afu_is_lun_provision(struct afu *afu)
0285 {
0286     return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
0287 }
0288 
0289 static inline bool afu_is_sq_cmd_mode(struct afu *afu)
0290 {
0291     return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
0292 }
0293 
0294 static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
0295 {
0296     return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
0297 }
0298 
0299 static inline u64 lun_to_lunid(u64 lun)
0300 {
0301     __be64 lun_id;
0302 
0303     int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
0304     return be64_to_cpu(lun_id);
0305 }
0306 
0307 static inline struct fc_port_bank __iomem *get_fc_port_bank(
0308                         struct cxlflash_cfg *cfg, int i)
0309 {
0310     struct afu *afu = cfg->afu;
0311 
0312     return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
0313 }
0314 
0315 static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
0316 {
0317     struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
0318 
0319     return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
0320 }
0321 
0322 static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
0323 {
0324     struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
0325 
0326     return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
0327 }
0328 
0329 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
0330 void cxlflash_list_init(void);
0331 void cxlflash_term_global_luns(void);
0332 void cxlflash_free_errpage(void);
0333 int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
0334            void __user *arg);
0335 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
0336 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
0337 void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
0338 void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
0339 
0340 #endif /* ifndef _CXLFLASH_COMMON_H */