0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0025
0026 #include <linux/slab.h>
0027 #include <linux/idr.h>
0028 #include <linux/timer.h>
0029 #include <linux/sched/signal.h>
0030
0031 #include "ar-internal.h"
0032
0033 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
0034 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
0035 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
0036
0037
0038
0039
0040 DEFINE_IDR(rxrpc_client_conn_ids);
0041 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
0042
0043
0044
0045
0046
0047
0048
0049 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
0050 gfp_t gfp)
0051 {
0052 struct rxrpc_net *rxnet = conn->params.local->rxnet;
0053 int id;
0054
0055 _enter("");
0056
0057 idr_preload(gfp);
0058 spin_lock(&rxrpc_conn_id_lock);
0059
0060 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
0061 1, 0x40000000, GFP_NOWAIT);
0062 if (id < 0)
0063 goto error;
0064
0065 spin_unlock(&rxrpc_conn_id_lock);
0066 idr_preload_end();
0067
0068 conn->proto.epoch = rxnet->epoch;
0069 conn->proto.cid = id << RXRPC_CIDSHIFT;
0070 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
0071 _leave(" [CID %x]", conn->proto.cid);
0072 return 0;
0073
0074 error:
0075 spin_unlock(&rxrpc_conn_id_lock);
0076 idr_preload_end();
0077 _leave(" = %d", id);
0078 return id;
0079 }
0080
0081
0082
0083
0084 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
0085 {
0086 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
0087 spin_lock(&rxrpc_conn_id_lock);
0088 idr_remove(&rxrpc_client_conn_ids,
0089 conn->proto.cid >> RXRPC_CIDSHIFT);
0090 spin_unlock(&rxrpc_conn_id_lock);
0091 }
0092 }
0093
0094
0095
0096
0097 void rxrpc_destroy_client_conn_ids(void)
0098 {
0099 struct rxrpc_connection *conn;
0100 int id;
0101
0102 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
0103 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
0104 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
0105 conn, refcount_read(&conn->ref));
0106 }
0107 BUG();
0108 }
0109
0110 idr_destroy(&rxrpc_client_conn_ids);
0111 }
0112
0113
0114
0115
0116 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
0117 gfp_t gfp)
0118 {
0119 struct rxrpc_bundle *bundle;
0120
0121 bundle = kzalloc(sizeof(*bundle), gfp);
0122 if (bundle) {
0123 bundle->params = *cp;
0124 rxrpc_get_peer(bundle->params.peer);
0125 refcount_set(&bundle->ref, 1);
0126 spin_lock_init(&bundle->channel_lock);
0127 INIT_LIST_HEAD(&bundle->waiting_calls);
0128 }
0129 return bundle;
0130 }
0131
0132 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
0133 {
0134 refcount_inc(&bundle->ref);
0135 return bundle;
0136 }
0137
0138 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
0139 {
0140 rxrpc_put_peer(bundle->params.peer);
0141 kfree(bundle);
0142 }
0143
0144 void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
0145 {
0146 unsigned int d = bundle->debug_id;
0147 bool dead;
0148 int r;
0149
0150 dead = __refcount_dec_and_test(&bundle->ref, &r);
0151
0152 _debug("PUT B=%x %d", d, r);
0153 if (dead)
0154 rxrpc_free_bundle(bundle);
0155 }
0156
0157
0158
0159
0160 static struct rxrpc_connection *
0161 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
0162 {
0163 struct rxrpc_connection *conn;
0164 struct rxrpc_net *rxnet = bundle->params.local->rxnet;
0165 int ret;
0166
0167 _enter("");
0168
0169 conn = rxrpc_alloc_connection(gfp);
0170 if (!conn) {
0171 _leave(" = -ENOMEM");
0172 return ERR_PTR(-ENOMEM);
0173 }
0174
0175 refcount_set(&conn->ref, 1);
0176 conn->bundle = bundle;
0177 conn->params = bundle->params;
0178 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
0179 conn->state = RXRPC_CONN_CLIENT;
0180 conn->service_id = conn->params.service_id;
0181
0182 ret = rxrpc_get_client_connection_id(conn, gfp);
0183 if (ret < 0)
0184 goto error_0;
0185
0186 ret = rxrpc_init_client_conn_security(conn);
0187 if (ret < 0)
0188 goto error_1;
0189
0190 atomic_inc(&rxnet->nr_conns);
0191 write_lock(&rxnet->conn_lock);
0192 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
0193 write_unlock(&rxnet->conn_lock);
0194
0195 rxrpc_get_bundle(bundle);
0196 rxrpc_get_peer(conn->params.peer);
0197 rxrpc_get_local(conn->params.local);
0198 key_get(conn->params.key);
0199
0200 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
0201 refcount_read(&conn->ref),
0202 __builtin_return_address(0));
0203
0204 atomic_inc(&rxnet->nr_client_conns);
0205 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
0206 _leave(" = %p", conn);
0207 return conn;
0208
0209 error_1:
0210 rxrpc_put_client_connection_id(conn);
0211 error_0:
0212 kfree(conn);
0213 _leave(" = %d", ret);
0214 return ERR_PTR(ret);
0215 }
0216
0217
0218
0219
0220 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
0221 {
0222 struct rxrpc_net *rxnet;
0223 int id_cursor, id, distance, limit;
0224
0225 if (!conn)
0226 goto dont_reuse;
0227
0228 rxnet = conn->params.local->rxnet;
0229 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
0230 goto dont_reuse;
0231
0232 if (conn->state != RXRPC_CONN_CLIENT ||
0233 conn->proto.epoch != rxnet->epoch)
0234 goto mark_dont_reuse;
0235
0236
0237
0238
0239
0240
0241
0242 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
0243 id = conn->proto.cid >> RXRPC_CIDSHIFT;
0244 distance = id - id_cursor;
0245 if (distance < 0)
0246 distance = -distance;
0247 limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
0248 if (distance > limit)
0249 goto mark_dont_reuse;
0250
0251 return true;
0252
0253 mark_dont_reuse:
0254 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
0255 dont_reuse:
0256 return false;
0257 }
0258
0259
0260
0261
0262
0263 static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
0264 gfp_t gfp)
0265 {
0266 static atomic_t rxrpc_bundle_id;
0267 struct rxrpc_bundle *bundle, *candidate;
0268 struct rxrpc_local *local = cp->local;
0269 struct rb_node *p, **pp, *parent;
0270 long diff;
0271
0272 _enter("{%px,%x,%u,%u}",
0273 cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
0274
0275 if (cp->exclusive)
0276 return rxrpc_alloc_bundle(cp, gfp);
0277
0278
0279 _debug("search 1");
0280 spin_lock(&local->client_bundles_lock);
0281 p = local->client_bundles.rb_node;
0282 while (p) {
0283 bundle = rb_entry(p, struct rxrpc_bundle, local_node);
0284
0285 #define cmp(X) ((long)bundle->params.X - (long)cp->X)
0286 diff = (cmp(peer) ?:
0287 cmp(key) ?:
0288 cmp(security_level) ?:
0289 cmp(upgrade));
0290 #undef cmp
0291 if (diff < 0)
0292 p = p->rb_left;
0293 else if (diff > 0)
0294 p = p->rb_right;
0295 else
0296 goto found_bundle;
0297 }
0298 spin_unlock(&local->client_bundles_lock);
0299 _debug("not found");
0300
0301
0302 candidate = rxrpc_alloc_bundle(cp, gfp);
0303 if (!candidate)
0304 return NULL;
0305
0306 _debug("search 2");
0307 spin_lock(&local->client_bundles_lock);
0308 pp = &local->client_bundles.rb_node;
0309 parent = NULL;
0310 while (*pp) {
0311 parent = *pp;
0312 bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
0313
0314 #define cmp(X) ((long)bundle->params.X - (long)cp->X)
0315 diff = (cmp(peer) ?:
0316 cmp(key) ?:
0317 cmp(security_level) ?:
0318 cmp(upgrade));
0319 #undef cmp
0320 if (diff < 0)
0321 pp = &(*pp)->rb_left;
0322 else if (diff > 0)
0323 pp = &(*pp)->rb_right;
0324 else
0325 goto found_bundle_free;
0326 }
0327
0328 _debug("new bundle");
0329 candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
0330 rb_link_node(&candidate->local_node, parent, pp);
0331 rb_insert_color(&candidate->local_node, &local->client_bundles);
0332 rxrpc_get_bundle(candidate);
0333 spin_unlock(&local->client_bundles_lock);
0334 _leave(" = %u [new]", candidate->debug_id);
0335 return candidate;
0336
0337 found_bundle_free:
0338 rxrpc_free_bundle(candidate);
0339 found_bundle:
0340 rxrpc_get_bundle(bundle);
0341 spin_unlock(&local->client_bundles_lock);
0342 _leave(" = %u [found]", bundle->debug_id);
0343 return bundle;
0344 }
0345
0346
0347
0348
0349
0350
0351
0352 static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
0353 struct rxrpc_call *call,
0354 struct rxrpc_conn_parameters *cp,
0355 struct sockaddr_rxrpc *srx,
0356 gfp_t gfp)
0357 {
0358 struct rxrpc_bundle *bundle;
0359
0360 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
0361
0362 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
0363 if (!cp->peer)
0364 goto error;
0365
0366 call->cong_cwnd = cp->peer->cong_cwnd;
0367 if (call->cong_cwnd >= call->cong_ssthresh)
0368 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
0369 else
0370 call->cong_mode = RXRPC_CALL_SLOW_START;
0371 if (cp->upgrade)
0372 __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
0373
0374
0375 bundle = rxrpc_look_up_bundle(cp, gfp);
0376 if (!bundle)
0377 goto error;
0378
0379
0380
0381
0382 spin_lock(&bundle->channel_lock);
0383 list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
0384 spin_unlock(&bundle->channel_lock);
0385
0386 _leave(" = [B=%x]", bundle->debug_id);
0387 return bundle;
0388
0389 error:
0390 _leave(" = -ENOMEM");
0391 return ERR_PTR(-ENOMEM);
0392 }
0393
0394
0395
0396
0397 static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
0398 __releases(bundle->channel_lock)
0399 {
0400 struct rxrpc_connection *candidate = NULL, *old = NULL;
0401 bool conflict;
0402 int i;
0403
0404 _enter("");
0405
0406 conflict = bundle->alloc_conn;
0407 if (!conflict)
0408 bundle->alloc_conn = true;
0409 spin_unlock(&bundle->channel_lock);
0410 if (conflict) {
0411 _leave(" [conf]");
0412 return;
0413 }
0414
0415 candidate = rxrpc_alloc_client_connection(bundle, gfp);
0416
0417 spin_lock(&bundle->channel_lock);
0418 bundle->alloc_conn = false;
0419
0420 if (IS_ERR(candidate)) {
0421 bundle->alloc_error = PTR_ERR(candidate);
0422 spin_unlock(&bundle->channel_lock);
0423 _leave(" [err %ld]", PTR_ERR(candidate));
0424 return;
0425 }
0426
0427 bundle->alloc_error = 0;
0428
0429 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
0430 unsigned int shift = i * RXRPC_MAXCALLS;
0431 int j;
0432
0433 old = bundle->conns[i];
0434 if (!rxrpc_may_reuse_conn(old)) {
0435 if (old)
0436 trace_rxrpc_client(old, -1, rxrpc_client_replace);
0437 candidate->bundle_shift = shift;
0438 bundle->conns[i] = candidate;
0439 for (j = 0; j < RXRPC_MAXCALLS; j++)
0440 set_bit(shift + j, &bundle->avail_chans);
0441 candidate = NULL;
0442 break;
0443 }
0444
0445 old = NULL;
0446 }
0447
0448 spin_unlock(&bundle->channel_lock);
0449
0450 if (candidate) {
0451 _debug("discard C=%x", candidate->debug_id);
0452 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
0453 rxrpc_put_connection(candidate);
0454 }
0455
0456 rxrpc_put_connection(old);
0457 _leave("");
0458 }
0459
0460
0461
0462
0463
0464 static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
0465 {
0466 struct rxrpc_call *call;
0467 int i, usable;
0468
0469 _enter("");
0470
0471 spin_lock(&bundle->channel_lock);
0472
0473
0474 usable = 0;
0475 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
0476 if (rxrpc_may_reuse_conn(bundle->conns[i]))
0477 usable++;
0478
0479 if (!usable && !list_empty(&bundle->waiting_calls)) {
0480 call = list_first_entry(&bundle->waiting_calls,
0481 struct rxrpc_call, chan_wait_link);
0482 if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
0483 bundle->try_upgrade = true;
0484 }
0485
0486 if (!usable)
0487 goto alloc_conn;
0488
0489 if (!bundle->avail_chans &&
0490 !bundle->try_upgrade &&
0491 !list_empty(&bundle->waiting_calls) &&
0492 usable < ARRAY_SIZE(bundle->conns))
0493 goto alloc_conn;
0494
0495 spin_unlock(&bundle->channel_lock);
0496 _leave("");
0497 return;
0498
0499 alloc_conn:
0500 return rxrpc_add_conn_to_bundle(bundle, gfp);
0501 }
0502
0503
0504
0505
0506
0507
0508 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
0509 unsigned int channel)
0510 {
0511 struct rxrpc_channel *chan = &conn->channels[channel];
0512 struct rxrpc_bundle *bundle = conn->bundle;
0513 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
0514 struct rxrpc_call, chan_wait_link);
0515 u32 call_id = chan->call_counter + 1;
0516
0517 _enter("C=%x,%u", conn->debug_id, channel);
0518
0519 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
0520
0521
0522
0523
0524 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
0525 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
0526
0527 rxrpc_see_call(call);
0528 list_del_init(&call->chan_wait_link);
0529 call->peer = rxrpc_get_peer(conn->params.peer);
0530 call->conn = rxrpc_get_connection(conn);
0531 call->cid = conn->proto.cid | channel;
0532 call->call_id = call_id;
0533 call->security = conn->security;
0534 call->security_ix = conn->security_ix;
0535 call->service_id = conn->service_id;
0536
0537 trace_rxrpc_connect_call(call);
0538 _net("CONNECT call %08x:%08x as call %d on conn %d",
0539 call->cid, call->call_id, call->debug_id, conn->debug_id);
0540
0541 write_lock_bh(&call->state_lock);
0542 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
0543 write_unlock_bh(&call->state_lock);
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 smp_wmb();
0556
0557 chan->call_id = call_id;
0558 chan->call_debug_id = call->debug_id;
0559 rcu_assign_pointer(chan->call, call);
0560 wake_up(&call->waitq);
0561 }
0562
0563
0564
0565
0566 static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
0567 {
0568 struct rxrpc_net *rxnet = bundle->params.local->rxnet;
0569 bool drop_ref;
0570
0571 if (!list_empty(&conn->cache_link)) {
0572 drop_ref = false;
0573 spin_lock(&rxnet->client_conn_cache_lock);
0574 if (!list_empty(&conn->cache_link)) {
0575 list_del_init(&conn->cache_link);
0576 drop_ref = true;
0577 }
0578 spin_unlock(&rxnet->client_conn_cache_lock);
0579 if (drop_ref)
0580 rxrpc_put_connection(conn);
0581 }
0582 }
0583
0584
0585
0586
0587
0588 static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
0589 {
0590 struct rxrpc_connection *conn;
0591 unsigned long avail, mask;
0592 unsigned int channel, slot;
0593
0594 if (bundle->try_upgrade)
0595 mask = 1;
0596 else
0597 mask = ULONG_MAX;
0598
0599 while (!list_empty(&bundle->waiting_calls)) {
0600 avail = bundle->avail_chans & mask;
0601 if (!avail)
0602 break;
0603 channel = __ffs(avail);
0604 clear_bit(channel, &bundle->avail_chans);
0605
0606 slot = channel / RXRPC_MAXCALLS;
0607 conn = bundle->conns[slot];
0608 if (!conn)
0609 break;
0610
0611 if (bundle->try_upgrade)
0612 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
0613 rxrpc_unidle_conn(bundle, conn);
0614
0615 channel &= (RXRPC_MAXCALLS - 1);
0616 conn->act_chans |= 1 << channel;
0617 rxrpc_activate_one_channel(conn, channel);
0618 }
0619 }
0620
0621
0622
0623
0624 static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
0625 {
0626 _enter("B=%x", bundle->debug_id);
0627
0628 trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
0629
0630 if (!bundle->avail_chans)
0631 return;
0632
0633 spin_lock(&bundle->channel_lock);
0634 rxrpc_activate_channels_locked(bundle);
0635 spin_unlock(&bundle->channel_lock);
0636 _leave("");
0637 }
0638
0639
0640
0641
0642 static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
0643 struct rxrpc_call *call, gfp_t gfp)
0644 {
0645 DECLARE_WAITQUEUE(myself, current);
0646 int ret = 0;
0647
0648 _enter("%d", call->debug_id);
0649
0650 if (!gfpflags_allow_blocking(gfp)) {
0651 rxrpc_maybe_add_conn(bundle, gfp);
0652 rxrpc_activate_channels(bundle);
0653 ret = bundle->alloc_error ?: -EAGAIN;
0654 goto out;
0655 }
0656
0657 add_wait_queue_exclusive(&call->waitq, &myself);
0658 for (;;) {
0659 rxrpc_maybe_add_conn(bundle, gfp);
0660 rxrpc_activate_channels(bundle);
0661 ret = bundle->alloc_error;
0662 if (ret < 0)
0663 break;
0664
0665 switch (call->interruptibility) {
0666 case RXRPC_INTERRUPTIBLE:
0667 case RXRPC_PREINTERRUPTIBLE:
0668 set_current_state(TASK_INTERRUPTIBLE);
0669 break;
0670 case RXRPC_UNINTERRUPTIBLE:
0671 default:
0672 set_current_state(TASK_UNINTERRUPTIBLE);
0673 break;
0674 }
0675 if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
0676 break;
0677 if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
0678 call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
0679 signal_pending(current)) {
0680 ret = -ERESTARTSYS;
0681 break;
0682 }
0683 schedule();
0684 }
0685 remove_wait_queue(&call->waitq, &myself);
0686 __set_current_state(TASK_RUNNING);
0687
0688 out:
0689 _leave(" = %d", ret);
0690 return ret;
0691 }
0692
0693
0694
0695
0696
0697 int rxrpc_connect_call(struct rxrpc_sock *rx,
0698 struct rxrpc_call *call,
0699 struct rxrpc_conn_parameters *cp,
0700 struct sockaddr_rxrpc *srx,
0701 gfp_t gfp)
0702 {
0703 struct rxrpc_bundle *bundle;
0704 struct rxrpc_net *rxnet = cp->local->rxnet;
0705 int ret = 0;
0706
0707 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
0708
0709 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
0710
0711 bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
0712 if (IS_ERR(bundle)) {
0713 ret = PTR_ERR(bundle);
0714 goto out;
0715 }
0716
0717 if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
0718 ret = rxrpc_wait_for_channel(bundle, call, gfp);
0719 if (ret < 0)
0720 goto wait_failed;
0721 }
0722
0723 granted_channel:
0724
0725 smp_rmb();
0726
0727 out_put_bundle:
0728 rxrpc_put_bundle(bundle);
0729 out:
0730 _leave(" = %d", ret);
0731 return ret;
0732
0733 wait_failed:
0734 spin_lock(&bundle->channel_lock);
0735 list_del_init(&call->chan_wait_link);
0736 spin_unlock(&bundle->channel_lock);
0737
0738 if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
0739 ret = 0;
0740 goto granted_channel;
0741 }
0742
0743 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
0744 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
0745 rxrpc_disconnect_client_call(bundle, call);
0746 goto out_put_bundle;
0747 }
0748
0749
0750
0751
0752
0753 void rxrpc_expose_client_call(struct rxrpc_call *call)
0754 {
0755 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
0756 struct rxrpc_connection *conn = call->conn;
0757 struct rxrpc_channel *chan = &conn->channels[channel];
0758
0759 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
0760
0761
0762
0763
0764
0765 chan->call_counter++;
0766 if (chan->call_counter >= INT_MAX)
0767 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
0768 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
0769 }
0770 }
0771
0772
0773
0774
0775 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
0776 {
0777 if (!rxnet->kill_all_client_conns) {
0778 unsigned long now = jiffies;
0779 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
0780
0781 if (rxnet->live)
0782 timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
0783 }
0784 }
0785
0786
0787
0788
0789 void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
0790 {
0791 struct rxrpc_connection *conn;
0792 struct rxrpc_channel *chan = NULL;
0793 struct rxrpc_net *rxnet = bundle->params.local->rxnet;
0794 unsigned int channel;
0795 bool may_reuse;
0796 u32 cid;
0797
0798 _enter("c=%x", call->debug_id);
0799
0800 spin_lock(&bundle->channel_lock);
0801 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
0802
0803
0804
0805
0806 conn = call->conn;
0807 if (!conn) {
0808 _debug("call is waiting");
0809 ASSERTCMP(call->call_id, ==, 0);
0810 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
0811 list_del_init(&call->chan_wait_link);
0812 goto out;
0813 }
0814
0815 cid = call->cid;
0816 channel = cid & RXRPC_CHANNELMASK;
0817 chan = &conn->channels[channel];
0818 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
0819
0820 if (rcu_access_pointer(chan->call) != call) {
0821 spin_unlock(&bundle->channel_lock);
0822 BUG();
0823 }
0824
0825 may_reuse = rxrpc_may_reuse_conn(conn);
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
0837 _debug("exposed %u,%u", call->call_id, call->abort_code);
0838 __rxrpc_disconnect_call(conn, call);
0839
0840 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
0841 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
0842 bundle->try_upgrade = false;
0843 if (may_reuse)
0844 rxrpc_activate_channels_locked(bundle);
0845 }
0846
0847 }
0848
0849
0850 if (may_reuse && !list_empty(&bundle->waiting_calls)) {
0851 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
0852 rxrpc_activate_one_channel(conn, channel);
0853 goto out;
0854 }
0855
0856
0857
0858
0859
0860 if (call->completion == RXRPC_CALL_SUCCEEDED &&
0861 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
0862 unsigned long final_ack_at = jiffies + 2;
0863
0864 WRITE_ONCE(chan->final_ack_at, final_ack_at);
0865 smp_wmb();
0866 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
0867 rxrpc_reduce_conn_timer(conn, final_ack_at);
0868 }
0869
0870
0871 rcu_assign_pointer(chan->call, NULL);
0872 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
0873 conn->act_chans &= ~(1 << channel);
0874
0875
0876
0877
0878
0879 if (!conn->act_chans) {
0880 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
0881 conn->idle_timestamp = jiffies;
0882
0883 rxrpc_get_connection(conn);
0884 spin_lock(&rxnet->client_conn_cache_lock);
0885 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
0886 spin_unlock(&rxnet->client_conn_cache_lock);
0887
0888 rxrpc_set_client_reap_timer(rxnet);
0889 }
0890
0891 out:
0892 spin_unlock(&bundle->channel_lock);
0893 _leave("");
0894 return;
0895 }
0896
0897
0898
0899
0900 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
0901 {
0902 struct rxrpc_bundle *bundle = conn->bundle;
0903 struct rxrpc_local *local = bundle->params.local;
0904 unsigned int bindex;
0905 bool need_drop = false, need_put = false;
0906 int i;
0907
0908 _enter("C=%x", conn->debug_id);
0909
0910 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
0911 rxrpc_process_delayed_final_acks(conn, true);
0912
0913 spin_lock(&bundle->channel_lock);
0914 bindex = conn->bundle_shift / RXRPC_MAXCALLS;
0915 if (bundle->conns[bindex] == conn) {
0916 _debug("clear slot %u", bindex);
0917 bundle->conns[bindex] = NULL;
0918 for (i = 0; i < RXRPC_MAXCALLS; i++)
0919 clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
0920 need_drop = true;
0921 }
0922 spin_unlock(&bundle->channel_lock);
0923
0924
0925 if (!bundle->avail_chans) {
0926 _debug("maybe unbundle");
0927 spin_lock(&local->client_bundles_lock);
0928
0929 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
0930 if (bundle->conns[i])
0931 break;
0932 if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
0933 _debug("erase bundle");
0934 rb_erase(&bundle->local_node, &local->client_bundles);
0935 need_put = true;
0936 }
0937
0938 spin_unlock(&local->client_bundles_lock);
0939 if (need_put)
0940 rxrpc_put_bundle(bundle);
0941 }
0942
0943 if (need_drop)
0944 rxrpc_put_connection(conn);
0945 _leave("");
0946 }
0947
0948
0949
0950
0951 static void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
0952 {
0953 struct rxrpc_local *local = conn->params.local;
0954 struct rxrpc_net *rxnet = local->rxnet;
0955
0956 _enter("C=%x", conn->debug_id);
0957
0958 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
0959 atomic_dec(&rxnet->nr_client_conns);
0960
0961 rxrpc_put_client_connection_id(conn);
0962 rxrpc_kill_connection(conn);
0963 }
0964
0965
0966
0967
0968 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
0969 {
0970 const void *here = __builtin_return_address(0);
0971 unsigned int debug_id = conn->debug_id;
0972 bool dead;
0973 int r;
0974
0975 dead = __refcount_dec_and_test(&conn->ref, &r);
0976 trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here);
0977 if (dead)
0978 rxrpc_kill_client_conn(conn);
0979 }
0980
0981
0982
0983
0984
0985
0986
0987
0988 void rxrpc_discard_expired_client_conns(struct work_struct *work)
0989 {
0990 struct rxrpc_connection *conn;
0991 struct rxrpc_net *rxnet =
0992 container_of(work, struct rxrpc_net, client_conn_reaper);
0993 unsigned long expiry, conn_expires_at, now;
0994 unsigned int nr_conns;
0995
0996 _enter("");
0997
0998 if (list_empty(&rxnet->idle_client_conns)) {
0999 _leave(" [empty]");
1000 return;
1001 }
1002
1003
1004 if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1005 _leave(" [already]");
1006 return;
1007 }
1008
1009
1010
1011
1012 nr_conns = atomic_read(&rxnet->nr_client_conns);
1013
1014 next:
1015 spin_lock(&rxnet->client_conn_cache_lock);
1016
1017 if (list_empty(&rxnet->idle_client_conns))
1018 goto out;
1019
1020 conn = list_entry(rxnet->idle_client_conns.next,
1021 struct rxrpc_connection, cache_link);
1022
1023 if (!rxnet->kill_all_client_conns) {
1024
1025
1026
1027
1028
1029 expiry = rxrpc_conn_idle_client_expiry;
1030 if (nr_conns > rxrpc_reap_client_connections)
1031 expiry = rxrpc_conn_idle_client_fast_expiry;
1032 if (conn->params.local->service_closed)
1033 expiry = rxrpc_closed_conn_expiry * HZ;
1034
1035 conn_expires_at = conn->idle_timestamp + expiry;
1036
1037 now = READ_ONCE(jiffies);
1038 if (time_after(conn_expires_at, now))
1039 goto not_yet_expired;
1040 }
1041
1042 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1043 list_del_init(&conn->cache_link);
1044
1045 spin_unlock(&rxnet->client_conn_cache_lock);
1046
1047 rxrpc_unbundle_conn(conn);
1048 rxrpc_put_connection(conn);
1049
1050 nr_conns--;
1051 goto next;
1052
1053 not_yet_expired:
1054
1055
1056
1057
1058
1059
1060
1061 _debug("not yet");
1062 if (!rxnet->kill_all_client_conns)
1063 timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
1064
1065 out:
1066 spin_unlock(&rxnet->client_conn_cache_lock);
1067 spin_unlock(&rxnet->client_conn_discard_lock);
1068 _leave("");
1069 }
1070
1071
1072
1073
1074
1075 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1076 {
1077 _enter("");
1078
1079 spin_lock(&rxnet->client_conn_cache_lock);
1080 rxnet->kill_all_client_conns = true;
1081 spin_unlock(&rxnet->client_conn_cache_lock);
1082
1083 del_timer_sync(&rxnet->client_conn_reap_timer);
1084
1085 if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1086 _debug("destroy: queue failed");
1087
1088 _leave("");
1089 }
1090
1091
1092
1093
1094 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1095 {
1096 struct rxrpc_connection *conn, *tmp;
1097 struct rxrpc_net *rxnet = local->rxnet;
1098 LIST_HEAD(graveyard);
1099
1100 _enter("");
1101
1102 spin_lock(&rxnet->client_conn_cache_lock);
1103
1104 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1105 cache_link) {
1106 if (conn->params.local == local) {
1107 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1108 list_move(&conn->cache_link, &graveyard);
1109 }
1110 }
1111
1112 spin_unlock(&rxnet->client_conn_cache_lock);
1113
1114 while (!list_empty(&graveyard)) {
1115 conn = list_entry(graveyard.next,
1116 struct rxrpc_connection, cache_link);
1117 list_del_init(&conn->cache_link);
1118 rxrpc_unbundle_conn(conn);
1119 rxrpc_put_connection(conn);
1120 }
1121
1122 _leave(" [culled]");
1123 }