Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
0004  *
0005  *  eHEA ethernet device driver for IBM eServer System p
0006  *
0007  *  (C) Copyright IBM Corp. 2006
0008  *
0009  *  Authors:
0010  *       Christoph Raisch <raisch@de.ibm.com>
0011  *       Jan-Bernd Themann <themann@de.ibm.com>
0012  *       Thomas Klein <tklein@de.ibm.com>
0013  */
0014 
0015 #ifndef __EHEA_QMR_H__
0016 #define __EHEA_QMR_H__
0017 
0018 #include <linux/prefetch.h>
0019 #include "ehea.h"
0020 #include "ehea_hw.h"
0021 
0022 /*
0023  * page size of ehea hardware queues
0024  */
0025 
0026 #define EHEA_PAGESHIFT         12
0027 #define EHEA_PAGESIZE          (1UL << EHEA_PAGESHIFT)
0028 #define EHEA_SECTSIZE          (1UL << 24)
0029 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
0030 #define EHEA_HUGEPAGESHIFT     34
0031 #define EHEA_HUGEPAGE_SIZE     (1UL << EHEA_HUGEPAGESHIFT)
0032 #define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
0033 
0034 #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
0035 #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
0036 #endif
0037 
0038 /* Some abbreviations used here:
0039  *
0040  * WQE  - Work Queue Entry
0041  * SWQE - Send Work Queue Entry
0042  * RWQE - Receive Work Queue Entry
0043  * CQE  - Completion Queue Entry
0044  * EQE  - Event Queue Entry
0045  * MR   - Memory Region
0046  */
0047 
0048 /* Use of WR_ID field for EHEA */
0049 #define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
0050 #define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
0051 #define EHEA_SWQE2_TYPE    0x1
0052 #define EHEA_SWQE3_TYPE    0x2
0053 #define EHEA_RWQE2_TYPE    0x3
0054 #define EHEA_RWQE3_TYPE    0x4
0055 #define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
0056 #define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
0057 
0058 struct ehea_vsgentry {
0059     u64 vaddr;
0060     u32 l_key;
0061     u32 len;
0062 };
0063 
0064 /* maximum number of sg entries allowed in a WQE */
0065 #define EHEA_MAX_WQE_SG_ENTRIES     252
0066 #define SWQE2_MAX_IMM               (0xD0 - 0x30)
0067 #define SWQE3_MAX_IMM               224
0068 
0069 /* tx control flags for swqe */
0070 #define EHEA_SWQE_CRC                   0x8000
0071 #define EHEA_SWQE_IP_CHECKSUM           0x4000
0072 #define EHEA_SWQE_TCP_CHECKSUM          0x2000
0073 #define EHEA_SWQE_TSO                   0x1000
0074 #define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
0075 #define EHEA_SWQE_VLAN_INSERT           0x0400
0076 #define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
0077 #define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
0078 #define EHEA_SWQE_WRAP_CTL_REC          0x0080
0079 #define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
0080 #define EHEA_SWQE_BIND                  0x0020
0081 #define EHEA_SWQE_PURGE                 0x0010
0082 
0083 /* sizeof(struct ehea_swqe) less the union */
0084 #define SWQE_HEADER_SIZE        32
0085 
0086 struct ehea_swqe {
0087     u64 wr_id;
0088     u16 tx_control;
0089     u16 vlan_tag;
0090     u8 reserved1;
0091     u8 ip_start;
0092     u8 ip_end;
0093     u8 immediate_data_length;
0094     u8 tcp_offset;
0095     u8 reserved2;
0096     u16 reserved2b;
0097     u8 wrap_tag;
0098     u8 descriptors;     /* number of valid descriptors in WQE */
0099     u16 reserved3;
0100     u16 reserved4;
0101     u16 mss;
0102     u32 reserved5;
0103     union {
0104         /*  Send WQE Format 1 */
0105         struct {
0106             struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
0107         } no_immediate_data;
0108 
0109         /*  Send WQE Format 2 */
0110         struct {
0111             struct ehea_vsgentry sg_entry;
0112             /* 0x30 */
0113             u8 immediate_data[SWQE2_MAX_IMM];
0114             /* 0xd0 */
0115             struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
0116         } immdata_desc __packed;
0117 
0118         /*  Send WQE Format 3 */
0119         struct {
0120             u8 immediate_data[SWQE3_MAX_IMM];
0121         } immdata_nodesc;
0122     } u;
0123 };
0124 
0125 struct ehea_rwqe {
0126     u64 wr_id;      /* work request ID */
0127     u8 reserved1[5];
0128     u8 data_segments;
0129     u16 reserved2;
0130     u64 reserved3;
0131     u64 reserved4;
0132     struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
0133 };
0134 
0135 #define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
0136 
0137 #define EHEA_CQE_TYPE_RQ           0x60
0138 #define EHEA_CQE_STAT_ERR_MASK     0x700F
0139 #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
0140 #define EHEA_CQE_BLIND_CKSUM       0x8000
0141 #define EHEA_CQE_STAT_ERR_TCP      0x4000
0142 #define EHEA_CQE_STAT_ERR_IP       0x2000
0143 #define EHEA_CQE_STAT_ERR_CRC      0x1000
0144 
0145 /* Defines which bad send cqe stati lead to a port reset */
0146 #define EHEA_CQE_STAT_RESET_MASK   0x0002
0147 
0148 struct ehea_cqe {
0149     u64 wr_id;      /* work request ID from WQE */
0150     u8 type;
0151     u8 valid;
0152     u16 status;
0153     u16 reserved1;
0154     u16 num_bytes_transfered;
0155     u16 vlan_tag;
0156     u16 inet_checksum_value;
0157     u8 reserved2;
0158     u8 header_length;
0159     u16 reserved3;
0160     u16 page_offset;
0161     u16 wqe_count;
0162     u32 qp_token;
0163     u32 timestamp;
0164     u32 reserved4;
0165     u64 reserved5[3];
0166 };
0167 
0168 #define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
0169 #define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
0170 #define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
0171 #define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
0172 #define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
0173 #define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
0174 #define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
0175 #define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
0176 #define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
0177 #define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
0178 #define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
0179 #define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
0180 
0181 #define EHEA_AER_RESTYPE_QP  0x8
0182 #define EHEA_AER_RESTYPE_CQ  0x4
0183 #define EHEA_AER_RESTYPE_EQ  0x3
0184 
0185 /* Defines which affiliated errors lead to a port reset */
0186 #define EHEA_AER_RESET_MASK   0xFFFFFFFFFEFFFFFFULL
0187 #define EHEA_AERR_RESET_MASK  0xFFFFFFFFFFFFFFFFULL
0188 
0189 struct ehea_eqe {
0190     u64 entry;
0191 };
0192 
0193 #define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52, 63)
0194 #define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0, 7)
0195 
0196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
0197 {
0198     struct ehea_page *current_page;
0199 
0200     if (q_offset >= queue->queue_length)
0201         q_offset -= queue->queue_length;
0202     current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
0203     return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
0204 }
0205 
0206 static inline void *hw_qeit_get(struct hw_queue *queue)
0207 {
0208     return hw_qeit_calc(queue, queue->current_q_offset);
0209 }
0210 
0211 static inline void hw_qeit_inc(struct hw_queue *queue)
0212 {
0213     queue->current_q_offset += queue->qe_size;
0214     if (queue->current_q_offset >= queue->queue_length) {
0215         queue->current_q_offset = 0;
0216         /* toggle the valid flag */
0217         queue->toggle_state = (~queue->toggle_state) & 1;
0218     }
0219 }
0220 
0221 static inline void *hw_qeit_get_inc(struct hw_queue *queue)
0222 {
0223     void *retvalue = hw_qeit_get(queue);
0224     hw_qeit_inc(queue);
0225     return retvalue;
0226 }
0227 
0228 static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
0229 {
0230     struct ehea_cqe *retvalue = hw_qeit_get(queue);
0231     u8 valid = retvalue->valid;
0232     void *pref;
0233 
0234     if ((valid >> 7) == (queue->toggle_state & 1)) {
0235         /* this is a good one */
0236         hw_qeit_inc(queue);
0237         pref = hw_qeit_calc(queue, queue->current_q_offset);
0238         prefetch(pref);
0239         prefetch(pref + 128);
0240     } else
0241         retvalue = NULL;
0242     return retvalue;
0243 }
0244 
0245 static inline void *hw_qeit_get_valid(struct hw_queue *queue)
0246 {
0247     struct ehea_cqe *retvalue = hw_qeit_get(queue);
0248     void *pref;
0249     u8 valid;
0250 
0251     pref = hw_qeit_calc(queue, queue->current_q_offset);
0252     prefetch(pref);
0253     prefetch(pref + 128);
0254     prefetch(pref + 256);
0255     valid = retvalue->valid;
0256     if (!((valid >> 7) == (queue->toggle_state & 1)))
0257         retvalue = NULL;
0258     return retvalue;
0259 }
0260 
0261 static inline void *hw_qeit_reset(struct hw_queue *queue)
0262 {
0263     queue->current_q_offset = 0;
0264     return hw_qeit_get(queue);
0265 }
0266 
0267 static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
0268 {
0269     u64 last_entry_in_q = queue->queue_length - queue->qe_size;
0270     void *retvalue;
0271 
0272     retvalue = hw_qeit_get(queue);
0273     queue->current_q_offset += queue->qe_size;
0274     if (queue->current_q_offset > last_entry_in_q) {
0275         queue->current_q_offset = 0;
0276         queue->toggle_state = (~queue->toggle_state) & 1;
0277     }
0278     return retvalue;
0279 }
0280 
0281 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
0282 {
0283     void *retvalue = hw_qeit_get(queue);
0284     u32 qe = *(u8 *)retvalue;
0285     if ((qe >> 7) == (queue->toggle_state & 1))
0286         hw_qeit_eq_get_inc(queue);
0287     else
0288         retvalue = NULL;
0289     return retvalue;
0290 }
0291 
0292 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
0293                            int rq_nr)
0294 {
0295     struct hw_queue *queue;
0296 
0297     if (rq_nr == 1)
0298         queue = &qp->hw_rqueue1;
0299     else if (rq_nr == 2)
0300         queue = &qp->hw_rqueue2;
0301     else
0302         queue = &qp->hw_rqueue3;
0303 
0304     return hw_qeit_get_inc(queue);
0305 }
0306 
0307 static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
0308                           int *wqe_index)
0309 {
0310     struct hw_queue *queue = &my_qp->hw_squeue;
0311     struct ehea_swqe *wqe_p;
0312 
0313     *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
0314     wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
0315 
0316     return wqe_p;
0317 }
0318 
0319 static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
0320 {
0321     iosync();
0322     ehea_update_sqa(my_qp, 1);
0323 }
0324 
0325 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
0326 {
0327     struct hw_queue *queue = &qp->hw_rqueue1;
0328 
0329     *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
0330     return hw_qeit_get_valid(queue);
0331 }
0332 
0333 static inline void ehea_inc_cq(struct ehea_cq *cq)
0334 {
0335     hw_qeit_inc(&cq->hw_queue);
0336 }
0337 
0338 static inline void ehea_inc_rq1(struct ehea_qp *qp)
0339 {
0340     hw_qeit_inc(&qp->hw_rqueue1);
0341 }
0342 
0343 static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
0344 {
0345     return hw_qeit_get_valid(&my_cq->hw_queue);
0346 }
0347 
0348 #define EHEA_CQ_REGISTER_ORIG 0
0349 #define EHEA_EQ_REGISTER_ORIG 0
0350 
0351 enum ehea_eq_type {
0352     EHEA_EQ = 0,        /* event queue              */
0353     EHEA_NEQ        /* notification event queue */
0354 };
0355 
0356 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
0357                    enum ehea_eq_type type,
0358                    const u32 length, const u8 eqe_gen);
0359 
0360 int ehea_destroy_eq(struct ehea_eq *eq);
0361 
0362 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
0363 
0364 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
0365                    u64 eq_handle, u32 cq_token);
0366 
0367 int ehea_destroy_cq(struct ehea_cq *cq);
0368 
0369 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
0370                    struct ehea_qp_init_attr *init_attr);
0371 
0372 int ehea_destroy_qp(struct ehea_qp *qp);
0373 
0374 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
0375 
0376 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
0377          struct ehea_mr *shared_mr);
0378 
0379 int ehea_rem_mr(struct ehea_mr *mr);
0380 
0381 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
0382             u64 *aer, u64 *aerr);
0383 
0384 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
0385 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
0386 int ehea_create_busmap(void);
0387 void ehea_destroy_busmap(void);
0388 u64 ehea_map_vaddr(void *caddr);
0389 
0390 #endif  /* __EHEA_QMR_H__ */