0001
0002
0003
0004
0005
0006
0007 #include <linux/mutex.h>
0008 #include <linux/freezer.h>
0009 #include <linux/module.h>
0010
0011 #include "server.h"
0012 #include "smb_common.h"
0013 #include "mgmt/ksmbd_ida.h"
0014 #include "connection.h"
0015 #include "transport_tcp.h"
0016 #include "transport_rdma.h"
0017
0018 static DEFINE_MUTEX(init_lock);
0019
0020 static struct ksmbd_conn_ops default_conn_ops;
0021
0022 LIST_HEAD(conn_list);
0023 DEFINE_RWLOCK(conn_list_lock);
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 void ksmbd_conn_free(struct ksmbd_conn *conn)
0034 {
0035 write_lock(&conn_list_lock);
0036 list_del(&conn->conns_list);
0037 write_unlock(&conn_list_lock);
0038
0039 xa_destroy(&conn->sessions);
0040 kvfree(conn->request_buf);
0041 kfree(conn->preauth_info);
0042 kfree(conn);
0043 }
0044
0045
0046
0047
0048
0049
0050 struct ksmbd_conn *ksmbd_conn_alloc(void)
0051 {
0052 struct ksmbd_conn *conn;
0053
0054 conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
0055 if (!conn)
0056 return NULL;
0057
0058 conn->need_neg = true;
0059 conn->status = KSMBD_SESS_NEW;
0060 conn->local_nls = load_nls("utf8");
0061 if (!conn->local_nls)
0062 conn->local_nls = load_nls_default();
0063 atomic_set(&conn->req_running, 0);
0064 atomic_set(&conn->r_count, 0);
0065 conn->total_credits = 1;
0066 conn->outstanding_credits = 0;
0067
0068 init_waitqueue_head(&conn->req_running_q);
0069 init_waitqueue_head(&conn->r_count_q);
0070 INIT_LIST_HEAD(&conn->conns_list);
0071 INIT_LIST_HEAD(&conn->requests);
0072 INIT_LIST_HEAD(&conn->async_requests);
0073 spin_lock_init(&conn->request_lock);
0074 spin_lock_init(&conn->credits_lock);
0075 ida_init(&conn->async_ida);
0076 xa_init(&conn->sessions);
0077
0078 spin_lock_init(&conn->llist_lock);
0079 INIT_LIST_HEAD(&conn->lock_list);
0080
0081 write_lock(&conn_list_lock);
0082 list_add(&conn->conns_list, &conn_list);
0083 write_unlock(&conn_list_lock);
0084 return conn;
0085 }
0086
0087 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
0088 {
0089 struct ksmbd_conn *t;
0090 bool ret = false;
0091
0092 read_lock(&conn_list_lock);
0093 list_for_each_entry(t, &conn_list, conns_list) {
0094 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
0095 continue;
0096
0097 ret = true;
0098 break;
0099 }
0100 read_unlock(&conn_list_lock);
0101 return ret;
0102 }
0103
0104 void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
0105 {
0106 struct ksmbd_conn *conn = work->conn;
0107 struct list_head *requests_queue = NULL;
0108
0109 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
0110 requests_queue = &conn->requests;
0111 work->syncronous = true;
0112 }
0113
0114 if (requests_queue) {
0115 atomic_inc(&conn->req_running);
0116 spin_lock(&conn->request_lock);
0117 list_add_tail(&work->request_entry, requests_queue);
0118 spin_unlock(&conn->request_lock);
0119 }
0120 }
0121
0122 int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
0123 {
0124 struct ksmbd_conn *conn = work->conn;
0125 int ret = 1;
0126
0127 if (list_empty(&work->request_entry) &&
0128 list_empty(&work->async_request_entry))
0129 return 0;
0130
0131 if (!work->multiRsp)
0132 atomic_dec(&conn->req_running);
0133 spin_lock(&conn->request_lock);
0134 if (!work->multiRsp) {
0135 list_del_init(&work->request_entry);
0136 if (work->syncronous == false)
0137 list_del_init(&work->async_request_entry);
0138 ret = 0;
0139 }
0140 spin_unlock(&conn->request_lock);
0141
0142 wake_up_all(&conn->req_running_q);
0143 return ret;
0144 }
0145
0146 static void ksmbd_conn_lock(struct ksmbd_conn *conn)
0147 {
0148 mutex_lock(&conn->srv_mutex);
0149 }
0150
0151 static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
0152 {
0153 mutex_unlock(&conn->srv_mutex);
0154 }
0155
0156 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
0157 {
0158 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
0159 }
0160
0161 int ksmbd_conn_write(struct ksmbd_work *work)
0162 {
0163 struct ksmbd_conn *conn = work->conn;
0164 size_t len = 0;
0165 int sent;
0166 struct kvec iov[3];
0167 int iov_idx = 0;
0168
0169 if (!work->response_buf) {
0170 pr_err("NULL response header\n");
0171 return -EINVAL;
0172 }
0173
0174 if (work->tr_buf) {
0175 iov[iov_idx] = (struct kvec) { work->tr_buf,
0176 sizeof(struct smb2_transform_hdr) + 4 };
0177 len += iov[iov_idx++].iov_len;
0178 }
0179
0180 if (work->aux_payload_sz) {
0181 iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
0182 len += iov[iov_idx++].iov_len;
0183 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
0184 len += iov[iov_idx++].iov_len;
0185 } else {
0186 if (work->tr_buf)
0187 iov[iov_idx].iov_len = work->resp_hdr_sz;
0188 else
0189 iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
0190 iov[iov_idx].iov_base = work->response_buf;
0191 len += iov[iov_idx++].iov_len;
0192 }
0193
0194 ksmbd_conn_lock(conn);
0195 sent = conn->transport->ops->writev(conn->transport, &iov[0],
0196 iov_idx, len,
0197 work->need_invalidate_rkey,
0198 work->remote_key);
0199 ksmbd_conn_unlock(conn);
0200
0201 if (sent < 0) {
0202 pr_err("Failed to send message: %d\n", sent);
0203 return sent;
0204 }
0205
0206 return 0;
0207 }
0208
0209 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
0210 void *buf, unsigned int buflen,
0211 struct smb2_buffer_desc_v1 *desc,
0212 unsigned int desc_len)
0213 {
0214 int ret = -EINVAL;
0215
0216 if (conn->transport->ops->rdma_read)
0217 ret = conn->transport->ops->rdma_read(conn->transport,
0218 buf, buflen,
0219 desc, desc_len);
0220 return ret;
0221 }
0222
0223 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
0224 void *buf, unsigned int buflen,
0225 struct smb2_buffer_desc_v1 *desc,
0226 unsigned int desc_len)
0227 {
0228 int ret = -EINVAL;
0229
0230 if (conn->transport->ops->rdma_write)
0231 ret = conn->transport->ops->rdma_write(conn->transport,
0232 buf, buflen,
0233 desc, desc_len);
0234 return ret;
0235 }
0236
0237 bool ksmbd_conn_alive(struct ksmbd_conn *conn)
0238 {
0239 if (!ksmbd_server_running())
0240 return false;
0241
0242 if (conn->status == KSMBD_SESS_EXITING)
0243 return false;
0244
0245 if (kthread_should_stop())
0246 return false;
0247
0248 if (atomic_read(&conn->stats.open_files_count) > 0)
0249 return true;
0250
0251
0252
0253
0254
0255
0256 if (server_conf.deadtime > 0 &&
0257 time_after(jiffies, conn->last_active + server_conf.deadtime)) {
0258 ksmbd_debug(CONN, "No response from client in %lu minutes\n",
0259 server_conf.deadtime / SMB_ECHO_INTERVAL);
0260 return false;
0261 }
0262 return true;
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 int ksmbd_conn_handler_loop(void *p)
0274 {
0275 struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
0276 struct ksmbd_transport *t = conn->transport;
0277 unsigned int pdu_size;
0278 char hdr_buf[4] = {0,};
0279 int size;
0280
0281 mutex_init(&conn->srv_mutex);
0282 __module_get(THIS_MODULE);
0283
0284 if (t->ops->prepare && t->ops->prepare(t))
0285 goto out;
0286
0287 conn->last_active = jiffies;
0288 while (ksmbd_conn_alive(conn)) {
0289 if (try_to_freeze())
0290 continue;
0291
0292 kvfree(conn->request_buf);
0293 conn->request_buf = NULL;
0294
0295 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
0296 if (size != sizeof(hdr_buf))
0297 break;
0298
0299 pdu_size = get_rfc1002_len(hdr_buf);
0300 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
0301
0302
0303
0304
0305
0306 if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
0307 pdu_size > MAX_STREAM_PROT_LEN) {
0308 continue;
0309 }
0310
0311
0312 size = pdu_size + 4;
0313 conn->request_buf = kvmalloc(size, GFP_KERNEL);
0314 if (!conn->request_buf)
0315 continue;
0316
0317 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
0318 if (!ksmbd_smb_request(conn))
0319 break;
0320
0321
0322
0323
0324
0325 size = t->ops->read(t, conn->request_buf + 4, pdu_size);
0326 if (size < 0) {
0327 pr_err("sock_read failed: %d\n", size);
0328 break;
0329 }
0330
0331 if (size != pdu_size) {
0332 pr_err("PDU error. Read: %d, Expected: %d\n",
0333 size, pdu_size);
0334 continue;
0335 }
0336
0337 if (!default_conn_ops.process_fn) {
0338 pr_err("No connection request callback\n");
0339 break;
0340 }
0341
0342 if (default_conn_ops.process_fn(conn)) {
0343 pr_err("Cannot handle request\n");
0344 break;
0345 }
0346 }
0347
0348 out:
0349
0350 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
0351
0352
0353 unload_nls(conn->local_nls);
0354 if (default_conn_ops.terminate_fn)
0355 default_conn_ops.terminate_fn(conn);
0356 t->ops->disconnect(t);
0357 module_put(THIS_MODULE);
0358 return 0;
0359 }
0360
0361 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
0362 {
0363 default_conn_ops.process_fn = ops->process_fn;
0364 default_conn_ops.terminate_fn = ops->terminate_fn;
0365 }
0366
0367 int ksmbd_conn_transport_init(void)
0368 {
0369 int ret;
0370
0371 mutex_lock(&init_lock);
0372 ret = ksmbd_tcp_init();
0373 if (ret) {
0374 pr_err("Failed to init TCP subsystem: %d\n", ret);
0375 goto out;
0376 }
0377
0378 ret = ksmbd_rdma_init();
0379 if (ret) {
0380 pr_err("Failed to init RDMA subsystem: %d\n", ret);
0381 goto out;
0382 }
0383 out:
0384 mutex_unlock(&init_lock);
0385 return ret;
0386 }
0387
0388 static void stop_sessions(void)
0389 {
0390 struct ksmbd_conn *conn;
0391 struct ksmbd_transport *t;
0392
0393 again:
0394 read_lock(&conn_list_lock);
0395 list_for_each_entry(conn, &conn_list, conns_list) {
0396 struct task_struct *task;
0397
0398 t = conn->transport;
0399 task = t->handler;
0400 if (task)
0401 ksmbd_debug(CONN, "Stop session handler %s/%d\n",
0402 task->comm, task_pid_nr(task));
0403 conn->status = KSMBD_SESS_EXITING;
0404 if (t->ops->shutdown) {
0405 read_unlock(&conn_list_lock);
0406 t->ops->shutdown(t);
0407 read_lock(&conn_list_lock);
0408 }
0409 }
0410 read_unlock(&conn_list_lock);
0411
0412 if (!list_empty(&conn_list)) {
0413 schedule_timeout_interruptible(HZ / 10);
0414 goto again;
0415 }
0416 }
0417
0418 void ksmbd_conn_transport_destroy(void)
0419 {
0420 mutex_lock(&init_lock);
0421 ksmbd_tcp_destroy();
0422 ksmbd_rdma_destroy();
0423 stop_sessions();
0424 mutex_unlock(&init_lock);
0425 }