0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/module.h>
0011 #include <linux/net.h>
0012 #include <linux/skbuff.h>
0013 #include <linux/errqueue.h>
0014 #include <linux/udp.h>
0015 #include <linux/in.h>
0016 #include <linux/in6.h>
0017 #include <linux/icmp.h>
0018 #include <linux/gfp.h>
0019 #include <linux/circ_buf.h>
0020 #include <net/sock.h>
0021 #include <net/af_rxrpc.h>
0022 #include <net/ip.h>
0023 #include "ar-internal.h"
0024
0025 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
0026 unsigned long user_call_ID)
0027 {
0028 }
0029
0030
0031
0032
0033
0034 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
0035 struct rxrpc_backlog *b,
0036 rxrpc_notify_rx_t notify_rx,
0037 rxrpc_user_attach_call_t user_attach_call,
0038 unsigned long user_call_ID, gfp_t gfp,
0039 unsigned int debug_id)
0040 {
0041 const void *here = __builtin_return_address(0);
0042 struct rxrpc_call *call, *xcall;
0043 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
0044 struct rb_node *parent, **pp;
0045 int max, tmp;
0046 unsigned int size = RXRPC_BACKLOG_MAX;
0047 unsigned int head, tail, call_head, call_tail;
0048
0049 max = rx->sk.sk_max_ack_backlog;
0050 tmp = rx->sk.sk_ack_backlog;
0051 if (tmp >= max) {
0052 _leave(" = -ENOBUFS [full %u]", max);
0053 return -ENOBUFS;
0054 }
0055 max -= tmp;
0056
0057
0058
0059
0060
0061 call_head = b->call_backlog_head;
0062 call_tail = READ_ONCE(b->call_backlog_tail);
0063 tmp = CIRC_CNT(call_head, call_tail, size);
0064 if (tmp >= max) {
0065 _leave(" = -ENOBUFS [enough %u]", tmp);
0066 return -ENOBUFS;
0067 }
0068 max = tmp + 1;
0069
0070 head = b->peer_backlog_head;
0071 tail = READ_ONCE(b->peer_backlog_tail);
0072 if (CIRC_CNT(head, tail, size) < max) {
0073 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
0074 if (!peer)
0075 return -ENOMEM;
0076 b->peer_backlog[head] = peer;
0077 smp_store_release(&b->peer_backlog_head,
0078 (head + 1) & (size - 1));
0079 }
0080
0081 head = b->conn_backlog_head;
0082 tail = READ_ONCE(b->conn_backlog_tail);
0083 if (CIRC_CNT(head, tail, size) < max) {
0084 struct rxrpc_connection *conn;
0085
0086 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
0087 if (!conn)
0088 return -ENOMEM;
0089 b->conn_backlog[head] = conn;
0090 smp_store_release(&b->conn_backlog_head,
0091 (head + 1) & (size - 1));
0092
0093 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
0094 refcount_read(&conn->ref), here);
0095 }
0096
0097
0098
0099
0100 call = rxrpc_alloc_call(rx, gfp, debug_id);
0101 if (!call)
0102 return -ENOMEM;
0103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
0104 call->state = RXRPC_CALL_SERVER_PREALLOC;
0105
0106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
0107 refcount_read(&call->ref),
0108 here, (const void *)user_call_ID);
0109
0110 write_lock(&rx->call_lock);
0111
0112
0113 pp = &rx->calls.rb_node;
0114 parent = NULL;
0115 while (*pp) {
0116 parent = *pp;
0117 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
0118 if (user_call_ID < xcall->user_call_ID)
0119 pp = &(*pp)->rb_left;
0120 else if (user_call_ID > xcall->user_call_ID)
0121 pp = &(*pp)->rb_right;
0122 else
0123 goto id_in_use;
0124 }
0125
0126 call->user_call_ID = user_call_ID;
0127 call->notify_rx = notify_rx;
0128 if (user_attach_call) {
0129 rxrpc_get_call(call, rxrpc_call_got_kernel);
0130 user_attach_call(call, user_call_ID);
0131 }
0132
0133 rxrpc_get_call(call, rxrpc_call_got_userid);
0134 rb_link_node(&call->sock_node, parent, pp);
0135 rb_insert_color(&call->sock_node, &rx->calls);
0136 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
0137
0138 list_add(&call->sock_link, &rx->sock_calls);
0139
0140 write_unlock(&rx->call_lock);
0141
0142 rxnet = call->rxnet;
0143 spin_lock_bh(&rxnet->call_lock);
0144 list_add_tail_rcu(&call->link, &rxnet->calls);
0145 spin_unlock_bh(&rxnet->call_lock);
0146
0147 b->call_backlog[call_head] = call;
0148 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
0149 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
0150 return 0;
0151
0152 id_in_use:
0153 write_unlock(&rx->call_lock);
0154 rxrpc_cleanup_call(call);
0155 _leave(" = -EBADSLT");
0156 return -EBADSLT;
0157 }
0158
0159
0160
0161
0162
0163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
0164 {
0165 struct rxrpc_backlog *b = rx->backlog;
0166
0167 if (!b) {
0168 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
0169 if (!b)
0170 return -ENOMEM;
0171 rx->backlog = b;
0172 }
0173
0174 return 0;
0175 }
0176
0177
0178
0179
0180 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
0181 {
0182 struct rxrpc_backlog *b = rx->backlog;
0183 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
0184 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
0185
0186 if (!b)
0187 return;
0188 rx->backlog = NULL;
0189
0190
0191
0192
0193 spin_lock_bh(&rx->incoming_lock);
0194 spin_unlock_bh(&rx->incoming_lock);
0195
0196 head = b->peer_backlog_head;
0197 tail = b->peer_backlog_tail;
0198 while (CIRC_CNT(head, tail, size) > 0) {
0199 struct rxrpc_peer *peer = b->peer_backlog[tail];
0200 rxrpc_put_local(peer->local);
0201 kfree(peer);
0202 tail = (tail + 1) & (size - 1);
0203 }
0204
0205 head = b->conn_backlog_head;
0206 tail = b->conn_backlog_tail;
0207 while (CIRC_CNT(head, tail, size) > 0) {
0208 struct rxrpc_connection *conn = b->conn_backlog[tail];
0209 write_lock(&rxnet->conn_lock);
0210 list_del(&conn->link);
0211 list_del(&conn->proc_link);
0212 write_unlock(&rxnet->conn_lock);
0213 kfree(conn);
0214 if (atomic_dec_and_test(&rxnet->nr_conns))
0215 wake_up_var(&rxnet->nr_conns);
0216 tail = (tail + 1) & (size - 1);
0217 }
0218
0219 head = b->call_backlog_head;
0220 tail = b->call_backlog_tail;
0221 while (CIRC_CNT(head, tail, size) > 0) {
0222 struct rxrpc_call *call = b->call_backlog[tail];
0223 rcu_assign_pointer(call->socket, rx);
0224 if (rx->discard_new_call) {
0225 _debug("discard %lx", call->user_call_ID);
0226 rx->discard_new_call(call, call->user_call_ID);
0227 if (call->notify_rx)
0228 call->notify_rx = rxrpc_dummy_notify;
0229 rxrpc_put_call(call, rxrpc_call_put_kernel);
0230 }
0231 rxrpc_call_completed(call);
0232 rxrpc_release_call(rx, call);
0233 rxrpc_put_call(call, rxrpc_call_put);
0234 tail = (tail + 1) & (size - 1);
0235 }
0236
0237 kfree(b);
0238 }
0239
0240
0241
0242
0243
0244 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
0245 {
0246 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
0247 ktime_t now = skb->tstamp;
0248
0249 if (call->peer->rtt_count < 3 ||
0250 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
0251 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
0252 true, true,
0253 rxrpc_propose_ack_ping_for_params);
0254 }
0255
0256
0257
0258
0259
0260 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
0261 struct rxrpc_local *local,
0262 struct rxrpc_peer *peer,
0263 struct rxrpc_connection *conn,
0264 const struct rxrpc_security *sec,
0265 struct sk_buff *skb)
0266 {
0267 struct rxrpc_backlog *b = rx->backlog;
0268 struct rxrpc_call *call;
0269 unsigned short call_head, conn_head, peer_head;
0270 unsigned short call_tail, conn_tail, peer_tail;
0271 unsigned short call_count, conn_count;
0272
0273
0274 call_head = smp_load_acquire(&b->call_backlog_head);
0275 call_tail = b->call_backlog_tail;
0276 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
0277 conn_head = smp_load_acquire(&b->conn_backlog_head);
0278 conn_tail = b->conn_backlog_tail;
0279 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
0280 ASSERTCMP(conn_count, >=, call_count);
0281 peer_head = smp_load_acquire(&b->peer_backlog_head);
0282 peer_tail = b->peer_backlog_tail;
0283 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
0284 conn_count);
0285
0286 if (call_count == 0)
0287 return NULL;
0288
0289 if (!conn) {
0290 if (peer && !rxrpc_get_peer_maybe(peer))
0291 peer = NULL;
0292 if (!peer) {
0293 peer = b->peer_backlog[peer_tail];
0294 if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
0295 return NULL;
0296 b->peer_backlog[peer_tail] = NULL;
0297 smp_store_release(&b->peer_backlog_tail,
0298 (peer_tail + 1) &
0299 (RXRPC_BACKLOG_MAX - 1));
0300
0301 rxrpc_new_incoming_peer(rx, local, peer);
0302 }
0303
0304
0305 conn = b->conn_backlog[conn_tail];
0306 b->conn_backlog[conn_tail] = NULL;
0307 smp_store_release(&b->conn_backlog_tail,
0308 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
0309 conn->params.local = rxrpc_get_local(local);
0310 conn->params.peer = peer;
0311 rxrpc_see_connection(conn);
0312 rxrpc_new_incoming_connection(rx, conn, sec, skb);
0313 } else {
0314 rxrpc_get_connection(conn);
0315 }
0316
0317
0318 call = b->call_backlog[call_tail];
0319 b->call_backlog[call_tail] = NULL;
0320 smp_store_release(&b->call_backlog_tail,
0321 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
0322
0323 rxrpc_see_call(call);
0324 call->conn = conn;
0325 call->security = conn->security;
0326 call->security_ix = conn->security_ix;
0327 call->peer = rxrpc_get_peer(conn->params.peer);
0328 call->cong_cwnd = call->peer->cong_cwnd;
0329 return call;
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
0348 struct rxrpc_sock *rx,
0349 struct sk_buff *skb)
0350 {
0351 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
0352 const struct rxrpc_security *sec = NULL;
0353 struct rxrpc_connection *conn;
0354 struct rxrpc_peer *peer = NULL;
0355 struct rxrpc_call *call = NULL;
0356
0357 _enter("");
0358
0359 spin_lock(&rx->incoming_lock);
0360 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
0361 rx->sk.sk_state == RXRPC_CLOSE) {
0362 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
0363 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
0364 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
0365 skb->priority = RX_INVALID_OPERATION;
0366 goto no_call;
0367 }
0368
0369
0370
0371
0372
0373
0374 conn = rxrpc_find_connection_rcu(local, skb, &peer);
0375
0376 if (!conn) {
0377 sec = rxrpc_get_incoming_security(rx, skb);
0378 if (!sec)
0379 goto no_call;
0380 }
0381
0382 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, skb);
0383 if (!call) {
0384 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
0385 goto no_call;
0386 }
0387
0388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
0389 sp->hdr.serial, sp->hdr.seq);
0390
0391
0392 rxrpc_incoming_call(rx, call, skb);
0393 conn = call->conn;
0394
0395 if (rx->notify_new_call)
0396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
0397
0398 spin_lock(&conn->state_lock);
0399 switch (conn->state) {
0400 case RXRPC_CONN_SERVICE_UNSECURED:
0401 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
0402 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
0403 rxrpc_queue_conn(call->conn);
0404 break;
0405
0406 case RXRPC_CONN_SERVICE:
0407 write_lock(&call->state_lock);
0408 if (call->state < RXRPC_CALL_COMPLETE)
0409 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
0410 write_unlock(&call->state_lock);
0411 break;
0412
0413 case RXRPC_CONN_REMOTELY_ABORTED:
0414 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0415 conn->abort_code, conn->error);
0416 break;
0417 case RXRPC_CONN_LOCALLY_ABORTED:
0418 rxrpc_abort_call("CON", call, sp->hdr.seq,
0419 conn->abort_code, conn->error);
0420 break;
0421 default:
0422 BUG();
0423 }
0424 spin_unlock(&conn->state_lock);
0425 spin_unlock(&rx->incoming_lock);
0426
0427 rxrpc_send_ping(call, skb);
0428
0429
0430
0431
0432
0433
0434 rxrpc_put_call(call, rxrpc_call_put);
0435
0436 _leave(" = %p{%d}", call, call->debug_id);
0437 return call;
0438
0439 no_call:
0440 spin_unlock(&rx->incoming_lock);
0441 _leave(" = NULL [%u]", skb->mark);
0442 return NULL;
0443 }
0444
0445
0446
0447
0448 int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
0449 {
0450 struct rxrpc_backlog *b = rx->backlog;
0451
0452 if (rx->sk.sk_state == RXRPC_CLOSE)
0453 return -ESHUTDOWN;
0454
0455 return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
0456 GFP_KERNEL,
0457 atomic_inc_return(&rxrpc_debug_id));
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 int rxrpc_kernel_charge_accept(struct socket *sock,
0476 rxrpc_notify_rx_t notify_rx,
0477 rxrpc_user_attach_call_t user_attach_call,
0478 unsigned long user_call_ID, gfp_t gfp,
0479 unsigned int debug_id)
0480 {
0481 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
0482 struct rxrpc_backlog *b = rx->backlog;
0483
0484 if (sock->sk->sk_state == RXRPC_CLOSE)
0485 return -ESHUTDOWN;
0486
0487 return rxrpc_service_prealloc_one(rx, b, notify_rx,
0488 user_attach_call, user_call_ID,
0489 gfp, debug_id);
0490 }
0491 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);