0001
0002
0003
0004
0005
0006
0007 #ifndef _SMBDIRECT_H
0008 #define _SMBDIRECT_H
0009
0010 #ifdef CONFIG_CIFS_SMB_DIRECT
0011 #define cifs_rdma_enabled(server) ((server)->rdma)
0012
0013 #include "cifsglob.h"
0014 #include <rdma/ib_verbs.h>
0015 #include <rdma/rdma_cm.h>
0016 #include <linux/mempool.h>
0017
0018 extern int rdma_readwrite_threshold;
0019 extern int smbd_max_frmr_depth;
0020 extern int smbd_keep_alive_interval;
0021 extern int smbd_max_receive_size;
0022 extern int smbd_max_fragmented_recv_size;
0023 extern int smbd_max_send_size;
0024 extern int smbd_send_credit_target;
0025 extern int smbd_receive_credit_max;
0026
0027 enum keep_alive_status {
0028 KEEP_ALIVE_NONE,
0029 KEEP_ALIVE_PENDING,
0030 KEEP_ALIVE_SENT,
0031 };
0032
0033 enum smbd_connection_status {
0034 SMBD_CREATED,
0035 SMBD_CONNECTING,
0036 SMBD_CONNECTED,
0037 SMBD_NEGOTIATE_FAILED,
0038 SMBD_DISCONNECTING,
0039 SMBD_DISCONNECTED,
0040 SMBD_DESTROYED
0041 };
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct smbd_connection {
0053 enum smbd_connection_status transport_status;
0054
0055
0056 struct rdma_cm_id *id;
0057 struct ib_qp_init_attr qp_attr;
0058 struct ib_pd *pd;
0059 struct ib_cq *send_cq, *recv_cq;
0060 struct ib_device_attr dev_attr;
0061 int ri_rc;
0062 struct completion ri_done;
0063 wait_queue_head_t conn_wait;
0064 wait_queue_head_t disconn_wait;
0065
0066 struct completion negotiate_completion;
0067 bool negotiate_done;
0068
0069 struct work_struct disconnect_work;
0070 struct work_struct post_send_credits_work;
0071
0072 spinlock_t lock_new_credits_offered;
0073 int new_credits_offered;
0074
0075
0076 int receive_credit_max;
0077 int send_credit_target;
0078 int max_send_size;
0079 int max_fragmented_recv_size;
0080 int max_fragmented_send_size;
0081 int max_receive_size;
0082 int keep_alive_interval;
0083 int max_readwrite_size;
0084 enum keep_alive_status keep_alive_requested;
0085 int protocol;
0086 atomic_t send_credits;
0087 atomic_t receive_credits;
0088 int receive_credit_target;
0089 int fragment_reassembly_remaining;
0090
0091
0092
0093 int responder_resources;
0094
0095 int max_frmr_depth;
0096
0097
0098
0099
0100
0101
0102 int rdma_readwrite_threshold;
0103 enum ib_mr_type mr_type;
0104 struct list_head mr_list;
0105 spinlock_t mr_list_lock;
0106
0107 atomic_t mr_ready_count;
0108 atomic_t mr_used_count;
0109 wait_queue_head_t wait_mr;
0110 struct work_struct mr_recovery_work;
0111
0112 wait_queue_head_t wait_for_mr_cleanup;
0113
0114
0115 atomic_t send_pending;
0116 wait_queue_head_t wait_send_pending;
0117 wait_queue_head_t wait_post_send;
0118
0119
0120 struct list_head receive_queue;
0121 int count_receive_queue;
0122 spinlock_t receive_queue_lock;
0123
0124 struct list_head empty_packet_queue;
0125 int count_empty_packet_queue;
0126 spinlock_t empty_packet_queue_lock;
0127
0128 wait_queue_head_t wait_receive_queues;
0129
0130
0131 struct list_head reassembly_queue;
0132 spinlock_t reassembly_queue_lock;
0133 wait_queue_head_t wait_reassembly_queue;
0134
0135
0136 int reassembly_data_length;
0137 int reassembly_queue_length;
0138
0139 int first_entry_offset;
0140
0141 bool send_immediate;
0142
0143 wait_queue_head_t wait_send_queue;
0144
0145
0146
0147
0148
0149
0150
0151
0152 bool full_packet_received;
0153
0154 struct workqueue_struct *workqueue;
0155 struct delayed_work idle_timer_work;
0156
0157
0158
0159 struct kmem_cache *request_cache;
0160 mempool_t *request_mempool;
0161
0162
0163 struct kmem_cache *response_cache;
0164 mempool_t *response_mempool;
0165
0166
0167 unsigned int count_get_receive_buffer;
0168 unsigned int count_put_receive_buffer;
0169 unsigned int count_reassembly_queue;
0170 unsigned int count_enqueue_reassembly_queue;
0171 unsigned int count_dequeue_reassembly_queue;
0172 unsigned int count_send_empty;
0173 };
0174
0175 enum smbd_message_type {
0176 SMBD_NEGOTIATE_RESP,
0177 SMBD_TRANSFER_DATA,
0178 };
0179
0180 #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
0181
0182
0183 struct smbd_negotiate_req {
0184 __le16 min_version;
0185 __le16 max_version;
0186 __le16 reserved;
0187 __le16 credits_requested;
0188 __le32 preferred_send_size;
0189 __le32 max_receive_size;
0190 __le32 max_fragmented_size;
0191 } __packed;
0192
0193
0194 struct smbd_negotiate_resp {
0195 __le16 min_version;
0196 __le16 max_version;
0197 __le16 negotiated_version;
0198 __le16 reserved;
0199 __le16 credits_requested;
0200 __le16 credits_granted;
0201 __le32 status;
0202 __le32 max_readwrite_size;
0203 __le32 preferred_send_size;
0204 __le32 max_receive_size;
0205 __le32 max_fragmented_size;
0206 } __packed;
0207
0208
0209 struct smbd_data_transfer {
0210 __le16 credits_requested;
0211 __le16 credits_granted;
0212 __le16 flags;
0213 __le16 reserved;
0214 __le32 remaining_data_length;
0215 __le32 data_offset;
0216 __le32 data_length;
0217 __le32 padding;
0218 __u8 buffer[];
0219 } __packed;
0220
0221
0222 struct smbd_buffer_descriptor_v1 {
0223 __le64 offset;
0224 __le32 token;
0225 __le32 length;
0226 } __packed;
0227
0228
0229 #define SMBDIRECT_MAX_SGE 16
0230
0231 struct smbd_request {
0232 struct smbd_connection *info;
0233 struct ib_cqe cqe;
0234
0235
0236 struct ib_sge sge[SMBDIRECT_MAX_SGE];
0237 int num_sge;
0238
0239
0240 u8 packet[];
0241 };
0242
0243
0244 struct smbd_response {
0245 struct smbd_connection *info;
0246 struct ib_cqe cqe;
0247 struct ib_sge sge;
0248
0249 enum smbd_message_type type;
0250
0251
0252 struct list_head list;
0253
0254
0255 bool first_segment;
0256
0257
0258 u8 packet[];
0259 };
0260
0261
0262 struct smbd_connection *smbd_get_connection(
0263 struct TCP_Server_Info *server, struct sockaddr *dstaddr);
0264
0265
0266 int smbd_reconnect(struct TCP_Server_Info *server);
0267
0268 void smbd_destroy(struct TCP_Server_Info *server);
0269
0270
0271 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
0272 int smbd_send(struct TCP_Server_Info *server,
0273 int num_rqst, struct smb_rqst *rqst);
0274
0275 enum mr_state {
0276 MR_READY,
0277 MR_REGISTERED,
0278 MR_INVALIDATED,
0279 MR_ERROR
0280 };
0281
0282 struct smbd_mr {
0283 struct smbd_connection *conn;
0284 struct list_head list;
0285 enum mr_state state;
0286 struct ib_mr *mr;
0287 struct scatterlist *sgl;
0288 int sgl_count;
0289 enum dma_data_direction dir;
0290 union {
0291 struct ib_reg_wr wr;
0292 struct ib_send_wr inv_wr;
0293 };
0294 struct ib_cqe cqe;
0295 bool need_invalidate;
0296 struct completion invalidate_done;
0297 };
0298
0299
0300 struct smbd_mr *smbd_register_mr(
0301 struct smbd_connection *info, struct page *pages[], int num_pages,
0302 int offset, int tailsz, bool writing, bool need_invalidate);
0303 int smbd_deregister_mr(struct smbd_mr *mr);
0304
0305 #else
0306 #define cifs_rdma_enabled(server) 0
0307 struct smbd_connection {};
0308 static inline void *smbd_get_connection(
0309 struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
0310 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
0311 static inline void smbd_destroy(struct TCP_Server_Info *server) {}
0312 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
0313 static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
0314 #endif
0315
0316 #endif