Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
0002 /* Copyright (c) 2021, Microsoft Corporation. */
0003 
0004 #ifndef _GDMA_H
0005 #define _GDMA_H
0006 
0007 #include <linux/dma-mapping.h>
0008 #include <linux/netdevice.h>
0009 
0010 #include "shm_channel.h"
0011 
0012 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
0013  * them are naturally aligned and hence don't need __packed.
0014  */
0015 
0016 enum gdma_request_type {
0017     GDMA_VERIFY_VF_DRIVER_VERSION   = 1,
0018     GDMA_QUERY_MAX_RESOURCES    = 2,
0019     GDMA_LIST_DEVICES       = 3,
0020     GDMA_REGISTER_DEVICE        = 4,
0021     GDMA_DEREGISTER_DEVICE      = 5,
0022     GDMA_GENERATE_TEST_EQE      = 10,
0023     GDMA_CREATE_QUEUE       = 12,
0024     GDMA_DISABLE_QUEUE      = 13,
0025     GDMA_CREATE_DMA_REGION      = 25,
0026     GDMA_DMA_REGION_ADD_PAGES   = 26,
0027     GDMA_DESTROY_DMA_REGION     = 27,
0028 };
0029 
0030 enum gdma_queue_type {
0031     GDMA_INVALID_QUEUE,
0032     GDMA_SQ,
0033     GDMA_RQ,
0034     GDMA_CQ,
0035     GDMA_EQ,
0036 };
0037 
0038 enum gdma_work_request_flags {
0039     GDMA_WR_NONE            = 0,
0040     GDMA_WR_OOB_IN_SGL      = BIT(0),
0041     GDMA_WR_PAD_BY_SGE0     = BIT(1),
0042 };
0043 
0044 enum gdma_eqe_type {
0045     GDMA_EQE_COMPLETION     = 3,
0046     GDMA_EQE_TEST_EVENT     = 64,
0047     GDMA_EQE_HWC_INIT_EQ_ID_DB  = 129,
0048     GDMA_EQE_HWC_INIT_DATA      = 130,
0049     GDMA_EQE_HWC_INIT_DONE      = 131,
0050 };
0051 
0052 enum {
0053     GDMA_DEVICE_NONE    = 0,
0054     GDMA_DEVICE_HWC     = 1,
0055     GDMA_DEVICE_MANA    = 2,
0056 };
0057 
0058 struct gdma_resource {
0059     /* Protect the bitmap */
0060     spinlock_t lock;
0061 
0062     /* The bitmap size in bits. */
0063     u32 size;
0064 
0065     /* The bitmap tracks the resources. */
0066     unsigned long *map;
0067 };
0068 
0069 union gdma_doorbell_entry {
0070     u64 as_uint64;
0071 
0072     struct {
0073         u64 id      : 24;
0074         u64 reserved    : 8;
0075         u64 tail_ptr    : 31;
0076         u64 arm     : 1;
0077     } cq;
0078 
0079     struct {
0080         u64 id      : 24;
0081         u64 wqe_cnt : 8;
0082         u64 tail_ptr    : 32;
0083     } rq;
0084 
0085     struct {
0086         u64 id      : 24;
0087         u64 reserved    : 8;
0088         u64 tail_ptr    : 32;
0089     } sq;
0090 
0091     struct {
0092         u64 id      : 16;
0093         u64 reserved    : 16;
0094         u64 tail_ptr    : 31;
0095         u64 arm     : 1;
0096     } eq;
0097 }; /* HW DATA */
0098 
0099 struct gdma_msg_hdr {
0100     u32 hdr_type;
0101     u32 msg_type;
0102     u16 msg_version;
0103     u16 hwc_msg_id;
0104     u32 msg_size;
0105 }; /* HW DATA */
0106 
0107 struct gdma_dev_id {
0108     union {
0109         struct {
0110             u16 type;
0111             u16 instance;
0112         };
0113 
0114         u32 as_uint32;
0115     };
0116 }; /* HW DATA */
0117 
0118 struct gdma_req_hdr {
0119     struct gdma_msg_hdr req;
0120     struct gdma_msg_hdr resp; /* The expected response */
0121     struct gdma_dev_id dev_id;
0122     u32 activity_id;
0123 }; /* HW DATA */
0124 
0125 struct gdma_resp_hdr {
0126     struct gdma_msg_hdr response;
0127     struct gdma_dev_id dev_id;
0128     u32 activity_id;
0129     u32 status;
0130     u32 reserved;
0131 }; /* HW DATA */
0132 
0133 struct gdma_general_req {
0134     struct gdma_req_hdr hdr;
0135 }; /* HW DATA */
0136 
0137 #define GDMA_MESSAGE_V1 1
0138 
0139 struct gdma_general_resp {
0140     struct gdma_resp_hdr hdr;
0141 }; /* HW DATA */
0142 
0143 #define GDMA_STANDARD_HEADER_TYPE 0
0144 
0145 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
0146                     u32 req_size, u32 resp_size)
0147 {
0148     hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
0149     hdr->req.msg_type = code;
0150     hdr->req.msg_version = GDMA_MESSAGE_V1;
0151     hdr->req.msg_size = req_size;
0152 
0153     hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
0154     hdr->resp.msg_type = code;
0155     hdr->resp.msg_version = GDMA_MESSAGE_V1;
0156     hdr->resp.msg_size = resp_size;
0157 }
0158 
0159 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
0160 struct gdma_sge {
0161     u64 address;
0162     u32 mem_key;
0163     u32 size;
0164 }; /* HW DATA */
0165 
0166 struct gdma_wqe_request {
0167     struct gdma_sge *sgl;
0168     u32 num_sge;
0169 
0170     u32 inline_oob_size;
0171     const void *inline_oob_data;
0172 
0173     u32 flags;
0174     u32 client_data_unit;
0175 };
0176 
0177 enum gdma_page_type {
0178     GDMA_PAGE_TYPE_4K,
0179 };
0180 
0181 #define GDMA_INVALID_DMA_REGION 0
0182 
0183 struct gdma_mem_info {
0184     struct device *dev;
0185 
0186     dma_addr_t dma_handle;
0187     void *virt_addr;
0188     u64 length;
0189 
0190     /* Allocated by the PF driver */
0191     u64 gdma_region;
0192 };
0193 
0194 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
0195 
0196 struct gdma_dev {
0197     struct gdma_context *gdma_context;
0198 
0199     struct gdma_dev_id dev_id;
0200 
0201     u32 pdid;
0202     u32 doorbell;
0203     u32 gpa_mkey;
0204 
0205     /* GDMA driver specific pointer */
0206     void *driver_data;
0207 };
0208 
0209 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
0210 
0211 #define GDMA_CQE_SIZE 64
0212 #define GDMA_EQE_SIZE 16
0213 #define GDMA_MAX_SQE_SIZE 512
0214 #define GDMA_MAX_RQE_SIZE 256
0215 
0216 #define GDMA_COMP_DATA_SIZE 0x3C
0217 
0218 #define GDMA_EVENT_DATA_SIZE 0xC
0219 
0220 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
0221 #define GDMA_WQE_BU_SIZE 32
0222 
0223 #define INVALID_PDID        UINT_MAX
0224 #define INVALID_DOORBELL    UINT_MAX
0225 #define INVALID_MEM_KEY     UINT_MAX
0226 #define INVALID_QUEUE_ID    UINT_MAX
0227 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
0228 
0229 struct gdma_comp {
0230     u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
0231     u32 wq_num;
0232     bool is_sq;
0233 };
0234 
0235 struct gdma_event {
0236     u32 details[GDMA_EVENT_DATA_SIZE / 4];
0237     u8  type;
0238 };
0239 
0240 struct gdma_queue;
0241 
0242 struct mana_eq {
0243     struct gdma_queue *eq;
0244 };
0245 
0246 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
0247                   struct gdma_event *e);
0248 
0249 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
0250 
0251 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
0252  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
0253  * driver increases the 'head' in BUs rather than in bytes, and notifies
0254  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
0255  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
0256  *
0257  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
0258  * processed, the driver increases the 'tail' to indicate that WQEs have
0259  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
0260  *
0261  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
0262  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
0263  * the owner bits mechanism to detect if the queue has become empty.
0264  */
0265 struct gdma_queue {
0266     struct gdma_dev *gdma_dev;
0267 
0268     enum gdma_queue_type type;
0269     u32 id;
0270 
0271     struct gdma_mem_info mem_info;
0272 
0273     void *queue_mem_ptr;
0274     u32 queue_size;
0275 
0276     bool monitor_avl_buf;
0277 
0278     u32 head;
0279     u32 tail;
0280 
0281     /* Extra fields specific to EQ/CQ. */
0282     union {
0283         struct {
0284             bool disable_needed;
0285 
0286             gdma_eq_callback *callback;
0287             void *context;
0288 
0289             unsigned int msix_index;
0290 
0291             u32 log2_throttle_limit;
0292         } eq;
0293 
0294         struct {
0295             gdma_cq_callback *callback;
0296             void *context;
0297 
0298             struct gdma_queue *parent; /* For CQ/EQ relationship */
0299         } cq;
0300     };
0301 };
0302 
0303 struct gdma_queue_spec {
0304     enum gdma_queue_type type;
0305     bool monitor_avl_buf;
0306     unsigned int queue_size;
0307 
0308     /* Extra fields specific to EQ/CQ. */
0309     union {
0310         struct {
0311             gdma_eq_callback *callback;
0312             void *context;
0313 
0314             unsigned long log2_throttle_limit;
0315         } eq;
0316 
0317         struct {
0318             gdma_cq_callback *callback;
0319             void *context;
0320 
0321             struct gdma_queue *parent_eq;
0322 
0323         } cq;
0324     };
0325 };
0326 
0327 struct gdma_irq_context {
0328     void (*handler)(void *arg);
0329     void *arg;
0330 };
0331 
0332 struct gdma_context {
0333     struct device       *dev;
0334 
0335     /* Per-vPort max number of queues */
0336     unsigned int        max_num_queues;
0337     unsigned int        max_num_msix;
0338     unsigned int        num_msix_usable;
0339     struct gdma_resource    msix_resource;
0340     struct gdma_irq_context *irq_contexts;
0341 
0342     /* This maps a CQ index to the queue structure. */
0343     unsigned int        max_num_cqs;
0344     struct gdma_queue   **cq_table;
0345 
0346     /* Protect eq_test_event and test_event_eq_id  */
0347     struct mutex        eq_test_event_mutex;
0348     struct completion   eq_test_event;
0349     u32         test_event_eq_id;
0350 
0351     bool            is_pf;
0352     void __iomem        *bar0_va;
0353     void __iomem        *shm_base;
0354     void __iomem        *db_page_base;
0355     u32 db_page_size;
0356 
0357     /* Shared memory chanenl (used to bootstrap HWC) */
0358     struct shm_channel  shm_channel;
0359 
0360     /* Hardware communication channel (HWC) */
0361     struct gdma_dev     hwc;
0362 
0363     /* Azure network adapter */
0364     struct gdma_dev     mana;
0365 };
0366 
0367 #define MAX_NUM_GDMA_DEVICES    4
0368 
0369 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
0370 {
0371     return gd->dev_id.type == GDMA_DEVICE_MANA;
0372 }
0373 
0374 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
0375 {
0376     return gd->dev_id.type == GDMA_DEVICE_HWC;
0377 }
0378 
0379 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
0380 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
0381 
0382 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
0383 
0384 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
0385                  const struct gdma_queue_spec *spec,
0386                  struct gdma_queue **queue_ptr);
0387 
0388 int mana_gd_create_mana_eq(struct gdma_dev *gd,
0389                const struct gdma_queue_spec *spec,
0390                struct gdma_queue **queue_ptr);
0391 
0392 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
0393                   const struct gdma_queue_spec *spec,
0394                   struct gdma_queue **queue_ptr);
0395 
0396 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
0397 
0398 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
0399 
0400 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
0401 
0402 struct gdma_wqe {
0403     u32 reserved    :24;
0404     u32 last_vbytes :8;
0405 
0406     union {
0407         u32 flags;
0408 
0409         struct {
0410             u32 num_sge     :8;
0411             u32 inline_oob_size_div4:3;
0412             u32 client_oob_in_sgl   :1;
0413             u32 reserved1       :4;
0414             u32 client_data_unit    :14;
0415             u32 reserved2       :2;
0416         };
0417     };
0418 }; /* HW DATA */
0419 
0420 #define INLINE_OOB_SMALL_SIZE 8
0421 #define INLINE_OOB_LARGE_SIZE 24
0422 
0423 #define MAX_TX_WQE_SIZE 512
0424 #define MAX_RX_WQE_SIZE 256
0425 
0426 struct gdma_cqe {
0427     u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
0428 
0429     union {
0430         u32 as_uint32;
0431 
0432         struct {
0433             u32 wq_num  : 24;
0434             u32 is_sq   : 1;
0435             u32 reserved    : 4;
0436             u32 owner_bits  : 3;
0437         };
0438     } cqe_info;
0439 }; /* HW DATA */
0440 
0441 #define GDMA_CQE_OWNER_BITS 3
0442 
0443 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
0444 
0445 #define SET_ARM_BIT 1
0446 
0447 #define GDMA_EQE_OWNER_BITS 3
0448 
0449 union gdma_eqe_info {
0450     u32 as_uint32;
0451 
0452     struct {
0453         u32 type    : 8;
0454         u32 reserved1   : 8;
0455         u32 client_id   : 2;
0456         u32 reserved2   : 11;
0457         u32 owner_bits  : 3;
0458     };
0459 }; /* HW DATA */
0460 
0461 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
0462 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
0463 
0464 struct gdma_eqe {
0465     u32 details[GDMA_EVENT_DATA_SIZE / 4];
0466     u32 eqe_info;
0467 }; /* HW DATA */
0468 
0469 #define GDMA_REG_DB_PAGE_OFFSET 8
0470 #define GDMA_REG_DB_PAGE_SIZE   0x10
0471 #define GDMA_REG_SHM_OFFSET 0x18
0472 
0473 #define GDMA_PF_REG_DB_PAGE_SIZE    0xD0
0474 #define GDMA_PF_REG_DB_PAGE_OFF     0xC8
0475 #define GDMA_PF_REG_SHM_OFF     0x70
0476 
0477 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
0478 
0479 #define MANA_PF_DEVICE_ID 0x00B9
0480 #define MANA_VF_DEVICE_ID 0x00BA
0481 
0482 struct gdma_posted_wqe_info {
0483     u32 wqe_size_in_bu;
0484 };
0485 
0486 /* GDMA_GENERATE_TEST_EQE */
0487 struct gdma_generate_test_event_req {
0488     struct gdma_req_hdr hdr;
0489     u32 queue_index;
0490 }; /* HW DATA */
0491 
0492 /* GDMA_VERIFY_VF_DRIVER_VERSION */
0493 enum {
0494     GDMA_PROTOCOL_V1    = 1,
0495     GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
0496     GDMA_PROTOCOL_LAST  = GDMA_PROTOCOL_V1,
0497 };
0498 
0499 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
0500 
0501 #define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT
0502 
0503 #define GDMA_DRV_CAP_FLAGS2 0
0504 
0505 #define GDMA_DRV_CAP_FLAGS3 0
0506 
0507 #define GDMA_DRV_CAP_FLAGS4 0
0508 
0509 struct gdma_verify_ver_req {
0510     struct gdma_req_hdr hdr;
0511 
0512     /* Mandatory fields required for protocol establishment */
0513     u64 protocol_ver_min;
0514     u64 protocol_ver_max;
0515 
0516     /* Gdma Driver Capability Flags */
0517     u64 gd_drv_cap_flags1;
0518     u64 gd_drv_cap_flags2;
0519     u64 gd_drv_cap_flags3;
0520     u64 gd_drv_cap_flags4;
0521 
0522     /* Advisory fields */
0523     u64 drv_ver;
0524     u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
0525     u32 reserved;
0526     u32 os_ver_major;
0527     u32 os_ver_minor;
0528     u32 os_ver_build;
0529     u32 os_ver_platform;
0530     u64 reserved_2;
0531     u8 os_ver_str1[128];
0532     u8 os_ver_str2[128];
0533     u8 os_ver_str3[128];
0534     u8 os_ver_str4[128];
0535 }; /* HW DATA */
0536 
0537 struct gdma_verify_ver_resp {
0538     struct gdma_resp_hdr hdr;
0539     u64 gdma_protocol_ver;
0540     u64 pf_cap_flags1;
0541     u64 pf_cap_flags2;
0542     u64 pf_cap_flags3;
0543     u64 pf_cap_flags4;
0544 }; /* HW DATA */
0545 
0546 /* GDMA_QUERY_MAX_RESOURCES */
0547 struct gdma_query_max_resources_resp {
0548     struct gdma_resp_hdr hdr;
0549     u32 status;
0550     u32 max_sq;
0551     u32 max_rq;
0552     u32 max_cq;
0553     u32 max_eq;
0554     u32 max_db;
0555     u32 max_mst;
0556     u32 max_cq_mod_ctx;
0557     u32 max_mod_cq;
0558     u32 max_msix;
0559 }; /* HW DATA */
0560 
0561 /* GDMA_LIST_DEVICES */
0562 struct gdma_list_devices_resp {
0563     struct gdma_resp_hdr hdr;
0564     u32 num_of_devs;
0565     u32 reserved;
0566     struct gdma_dev_id devs[64];
0567 }; /* HW DATA */
0568 
0569 /* GDMA_REGISTER_DEVICE */
0570 struct gdma_register_device_resp {
0571     struct gdma_resp_hdr hdr;
0572     u32 pdid;
0573     u32 gpa_mkey;
0574     u32 db_id;
0575 }; /* HW DATA */
0576 
0577 /* GDMA_CREATE_QUEUE */
0578 struct gdma_create_queue_req {
0579     struct gdma_req_hdr hdr;
0580     u32 type;
0581     u32 reserved1;
0582     u32 pdid;
0583     u32 doolbell_id;
0584     u64 gdma_region;
0585     u32 reserved2;
0586     u32 queue_size;
0587     u32 log2_throttle_limit;
0588     u32 eq_pci_msix_index;
0589     u32 cq_mod_ctx_id;
0590     u32 cq_parent_eq_id;
0591     u8  rq_drop_on_overrun;
0592     u8  rq_err_on_wqe_overflow;
0593     u8  rq_chain_rec_wqes;
0594     u8  sq_hw_db;
0595     u32 reserved3;
0596 }; /* HW DATA */
0597 
0598 struct gdma_create_queue_resp {
0599     struct gdma_resp_hdr hdr;
0600     u32 queue_index;
0601 }; /* HW DATA */
0602 
0603 /* GDMA_DISABLE_QUEUE */
0604 struct gdma_disable_queue_req {
0605     struct gdma_req_hdr hdr;
0606     u32 type;
0607     u32 queue_index;
0608     u32 alloc_res_id_on_creation;
0609 }; /* HW DATA */
0610 
0611 /* GDMA_CREATE_DMA_REGION */
0612 struct gdma_create_dma_region_req {
0613     struct gdma_req_hdr hdr;
0614 
0615     /* The total size of the DMA region */
0616     u64 length;
0617 
0618     /* The offset in the first page */
0619     u32 offset_in_page;
0620 
0621     /* enum gdma_page_type */
0622     u32 gdma_page_type;
0623 
0624     /* The total number of pages */
0625     u32 page_count;
0626 
0627     /* If page_addr_list_len is smaller than page_count,
0628      * the remaining page addresses will be added via the
0629      * message GDMA_DMA_REGION_ADD_PAGES.
0630      */
0631     u32 page_addr_list_len;
0632     u64 page_addr_list[];
0633 }; /* HW DATA */
0634 
0635 struct gdma_create_dma_region_resp {
0636     struct gdma_resp_hdr hdr;
0637     u64 gdma_region;
0638 }; /* HW DATA */
0639 
0640 /* GDMA_DMA_REGION_ADD_PAGES */
0641 struct gdma_dma_region_add_pages_req {
0642     struct gdma_req_hdr hdr;
0643 
0644     u64 gdma_region;
0645 
0646     u32 page_addr_list_len;
0647     u32 reserved3;
0648 
0649     u64 page_addr_list[];
0650 }; /* HW DATA */
0651 
0652 /* GDMA_DESTROY_DMA_REGION */
0653 struct gdma_destroy_dma_region_req {
0654     struct gdma_req_hdr hdr;
0655 
0656     u64 gdma_region;
0657 }; /* HW DATA */
0658 
0659 int mana_gd_verify_vf_version(struct pci_dev *pdev);
0660 
0661 int mana_gd_register_device(struct gdma_dev *gd);
0662 int mana_gd_deregister_device(struct gdma_dev *gd);
0663 
0664 int mana_gd_post_work_request(struct gdma_queue *wq,
0665                   const struct gdma_wqe_request *wqe_req,
0666                   struct gdma_posted_wqe_info *wqe_info);
0667 
0668 int mana_gd_post_and_ring(struct gdma_queue *queue,
0669               const struct gdma_wqe_request *wqe,
0670               struct gdma_posted_wqe_info *wqe_info);
0671 
0672 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
0673 void mana_gd_free_res_map(struct gdma_resource *r);
0674 
0675 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
0676                   struct gdma_queue *queue);
0677 
0678 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
0679              struct gdma_mem_info *gmi);
0680 
0681 void mana_gd_free_memory(struct gdma_mem_info *gmi);
0682 
0683 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
0684              u32 resp_len, void *resp);
0685 #endif /* _GDMA_H */