0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/device.h>
0018 #include "xpc.h"
0019
0020
0021
0022
0023
0024
0025
0026 static void
0027 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
0028 {
0029 enum xp_retval ret;
0030
0031 lockdep_assert_held(&ch->lock);
0032
0033 if (!(ch->flags & XPC_C_OPENREQUEST) ||
0034 !(ch->flags & XPC_C_ROPENREQUEST)) {
0035
0036 return;
0037 }
0038 DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
0039
0040 if (!(ch->flags & XPC_C_SETUP)) {
0041 spin_unlock_irqrestore(&ch->lock, *irq_flags);
0042 ret = xpc_arch_ops.setup_msg_structures(ch);
0043 spin_lock_irqsave(&ch->lock, *irq_flags);
0044
0045 if (ret != xpSuccess)
0046 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
0047 else
0048 ch->flags |= XPC_C_SETUP;
0049
0050 if (ch->flags & XPC_C_DISCONNECTING)
0051 return;
0052 }
0053
0054 if (!(ch->flags & XPC_C_OPENREPLY)) {
0055 ch->flags |= XPC_C_OPENREPLY;
0056 xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
0057 }
0058
0059 if (!(ch->flags & XPC_C_ROPENREPLY))
0060 return;
0061
0062 if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
0063 ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
0064 xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
0065 }
0066
0067 if (!(ch->flags & XPC_C_ROPENCOMPLETE))
0068 return;
0069
0070 dev_info(xpc_chan, "channel %d to partition %d connected\n",
0071 ch->number, ch->partid);
0072
0073 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);
0074 }
0075
0076
0077
0078
0079 static void
0080 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
0081 {
0082 struct xpc_partition *part = &xpc_partitions[ch->partid];
0083 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
0084
0085 lockdep_assert_held(&ch->lock);
0086
0087 if (!(ch->flags & XPC_C_DISCONNECTING))
0088 return;
0089
0090 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
0091
0092
0093
0094 if (atomic_read(&ch->kthreads_assigned) > 0 ||
0095 atomic_read(&ch->references) > 0) {
0096 return;
0097 }
0098 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
0099 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
0100
0101 if (part->act_state == XPC_P_AS_DEACTIVATING) {
0102
0103 if (xpc_arch_ops.partition_engaged(ch->partid))
0104 return;
0105
0106 } else {
0107
0108
0109
0110 if (!(ch->flags & XPC_C_RCLOSEREQUEST))
0111 return;
0112
0113 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
0114 ch->flags |= XPC_C_CLOSEREPLY;
0115 xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
0116 }
0117
0118 if (!(ch->flags & XPC_C_RCLOSEREPLY))
0119 return;
0120 }
0121
0122
0123 if (atomic_read(&ch->n_to_notify) > 0) {
0124
0125 xpc_arch_ops.notify_senders_of_disconnect(ch);
0126 }
0127
0128
0129
0130 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
0131 spin_unlock_irqrestore(&ch->lock, *irq_flags);
0132 xpc_disconnect_callout(ch, xpDisconnected);
0133 spin_lock_irqsave(&ch->lock, *irq_flags);
0134 }
0135
0136 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
0137
0138
0139 xpc_arch_ops.teardown_msg_structures(ch);
0140
0141 ch->func = NULL;
0142 ch->key = NULL;
0143 ch->entry_size = 0;
0144 ch->local_nentries = 0;
0145 ch->remote_nentries = 0;
0146 ch->kthreads_assigned_limit = 0;
0147 ch->kthreads_idle_limit = 0;
0148
0149
0150
0151
0152
0153
0154
0155 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
0156
0157 atomic_dec(&part->nchannels_active);
0158
0159 if (channel_was_connected) {
0160 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
0161 "reason=%d\n", ch->number, ch->partid, ch->reason);
0162 }
0163
0164 if (ch->flags & XPC_C_WDISCONNECT) {
0165
0166 complete(&ch->wdisconnect_wait);
0167 } else if (ch->delayed_chctl_flags) {
0168 if (part->act_state != XPC_P_AS_DEACTIVATING) {
0169
0170 spin_lock(&part->chctl_lock);
0171 part->chctl.flags[ch->number] |=
0172 ch->delayed_chctl_flags;
0173 spin_unlock(&part->chctl_lock);
0174 }
0175 ch->delayed_chctl_flags = 0;
0176 }
0177 }
0178
0179
0180
0181
0182 static void
0183 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
0184 u8 chctl_flags)
0185 {
0186 unsigned long irq_flags;
0187 struct xpc_openclose_args *args =
0188 &part->remote_openclose_args[ch_number];
0189 struct xpc_channel *ch = &part->channels[ch_number];
0190 enum xp_retval reason;
0191 enum xp_retval ret;
0192 int create_kthread = 0;
0193
0194 spin_lock_irqsave(&ch->lock, irq_flags);
0195
0196 again:
0197
0198 if ((ch->flags & XPC_C_DISCONNECTED) &&
0199 (ch->flags & XPC_C_WDISCONNECT)) {
0200
0201
0202
0203
0204 ch->delayed_chctl_flags |= chctl_flags;
0205 goto out;
0206 }
0207
0208 if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
0209
0210 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
0211 "from partid=%d, channel=%d\n", args->reason,
0212 ch->partid, ch->number);
0213
0214
0215
0216
0217
0218
0219
0220 if (ch->flags & XPC_C_RCLOSEREQUEST) {
0221 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
0222 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
0223 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
0224 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
0225
0226 DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
0227 chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
0228 ch->flags |= XPC_C_RCLOSEREPLY;
0229
0230
0231 xpc_process_disconnect(ch, &irq_flags);
0232 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
0233 goto again;
0234 }
0235
0236 if (ch->flags & XPC_C_DISCONNECTED) {
0237 if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
0238 if (part->chctl.flags[ch_number] &
0239 XPC_CHCTL_OPENREQUEST) {
0240
0241 DBUG_ON(ch->delayed_chctl_flags != 0);
0242 spin_lock(&part->chctl_lock);
0243 part->chctl.flags[ch_number] |=
0244 XPC_CHCTL_CLOSEREQUEST;
0245 spin_unlock(&part->chctl_lock);
0246 }
0247 goto out;
0248 }
0249
0250 XPC_SET_REASON(ch, 0, 0);
0251 ch->flags &= ~XPC_C_DISCONNECTED;
0252
0253 atomic_inc(&part->nchannels_active);
0254 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
0255 }
0256
0257 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
0258 XPC_CHCTL_OPENCOMPLETE);
0259
0260
0261
0262
0263
0264
0265 ch->flags |= XPC_C_RCLOSEREQUEST;
0266
0267 if (!(ch->flags & XPC_C_DISCONNECTING)) {
0268 reason = args->reason;
0269 if (reason <= xpSuccess || reason > xpUnknownReason)
0270 reason = xpUnknownReason;
0271 else if (reason == xpUnregistering)
0272 reason = xpOtherUnregistering;
0273
0274 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
0275
0276 DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
0277 goto out;
0278 }
0279
0280 xpc_process_disconnect(ch, &irq_flags);
0281 }
0282
0283 if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
0284
0285 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
0286 "%d, channel=%d\n", ch->partid, ch->number);
0287
0288 if (ch->flags & XPC_C_DISCONNECTED) {
0289 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
0290 goto out;
0291 }
0292
0293 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
0294
0295 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
0296 if (part->chctl.flags[ch_number] &
0297 XPC_CHCTL_CLOSEREQUEST) {
0298
0299 DBUG_ON(ch->delayed_chctl_flags != 0);
0300 spin_lock(&part->chctl_lock);
0301 part->chctl.flags[ch_number] |=
0302 XPC_CHCTL_CLOSEREPLY;
0303 spin_unlock(&part->chctl_lock);
0304 }
0305 goto out;
0306 }
0307
0308 ch->flags |= XPC_C_RCLOSEREPLY;
0309
0310 if (ch->flags & XPC_C_CLOSEREPLY) {
0311
0312 xpc_process_disconnect(ch, &irq_flags);
0313 }
0314 }
0315
0316 if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
0317
0318 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
0319 "local_nentries=%d) received from partid=%d, "
0320 "channel=%d\n", args->entry_size, args->local_nentries,
0321 ch->partid, ch->number);
0322
0323 if (part->act_state == XPC_P_AS_DEACTIVATING ||
0324 (ch->flags & XPC_C_ROPENREQUEST)) {
0325 goto out;
0326 }
0327
0328 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
0329 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
0330 goto out;
0331 }
0332 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
0333 XPC_C_OPENREQUEST)));
0334 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
0335 XPC_C_OPENREPLY | XPC_C_CONNECTED));
0336
0337
0338
0339
0340
0341
0342 if (args->entry_size == 0 || args->local_nentries == 0) {
0343
0344 goto out;
0345 }
0346
0347 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
0348 ch->remote_nentries = args->local_nentries;
0349
0350 if (ch->flags & XPC_C_OPENREQUEST) {
0351 if (args->entry_size != ch->entry_size) {
0352 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
0353 &irq_flags);
0354 goto out;
0355 }
0356 } else {
0357 ch->entry_size = args->entry_size;
0358
0359 XPC_SET_REASON(ch, 0, 0);
0360 ch->flags &= ~XPC_C_DISCONNECTED;
0361
0362 atomic_inc(&part->nchannels_active);
0363 }
0364
0365 xpc_process_connect(ch, &irq_flags);
0366 }
0367
0368 if (chctl_flags & XPC_CHCTL_OPENREPLY) {
0369
0370 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
0371 "0x%lx, local_nentries=%d, remote_nentries=%d) "
0372 "received from partid=%d, channel=%d\n",
0373 args->local_msgqueue_pa, args->local_nentries,
0374 args->remote_nentries, ch->partid, ch->number);
0375
0376 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
0377 goto out;
0378
0379 if (!(ch->flags & XPC_C_OPENREQUEST)) {
0380 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
0381 &irq_flags);
0382 goto out;
0383 }
0384
0385 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
0386 DBUG_ON(ch->flags & XPC_C_CONNECTED);
0387
0388
0389
0390
0391
0392
0393
0394
0395 DBUG_ON(args->local_msgqueue_pa == 0);
0396 DBUG_ON(args->local_nentries == 0);
0397 DBUG_ON(args->remote_nentries == 0);
0398
0399 ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
0400 args->local_msgqueue_pa);
0401 if (ret != xpSuccess) {
0402 XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
0403 goto out;
0404 }
0405 ch->flags |= XPC_C_ROPENREPLY;
0406
0407 if (args->local_nentries < ch->remote_nentries) {
0408 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
0409 "remote_nentries=%d, old remote_nentries=%d, "
0410 "partid=%d, channel=%d\n",
0411 args->local_nentries, ch->remote_nentries,
0412 ch->partid, ch->number);
0413
0414 ch->remote_nentries = args->local_nentries;
0415 }
0416 if (args->remote_nentries < ch->local_nentries) {
0417 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
0418 "local_nentries=%d, old local_nentries=%d, "
0419 "partid=%d, channel=%d\n",
0420 args->remote_nentries, ch->local_nentries,
0421 ch->partid, ch->number);
0422
0423 ch->local_nentries = args->remote_nentries;
0424 }
0425
0426 xpc_process_connect(ch, &irq_flags);
0427 }
0428
0429 if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
0430
0431 dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
0432 "partid=%d, channel=%d\n", ch->partid, ch->number);
0433
0434 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
0435 goto out;
0436
0437 if (!(ch->flags & XPC_C_OPENREQUEST) ||
0438 !(ch->flags & XPC_C_OPENREPLY)) {
0439 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
0440 &irq_flags);
0441 goto out;
0442 }
0443
0444 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
0445 DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
0446 DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
0447
0448 ch->flags |= XPC_C_ROPENCOMPLETE;
0449
0450 xpc_process_connect(ch, &irq_flags);
0451 create_kthread = 1;
0452 }
0453
0454 out:
0455 spin_unlock_irqrestore(&ch->lock, irq_flags);
0456
0457 if (create_kthread)
0458 xpc_create_kthreads(ch, 1, 0);
0459 }
0460
0461
0462
0463
0464 static enum xp_retval
0465 xpc_connect_channel(struct xpc_channel *ch)
0466 {
0467 unsigned long irq_flags;
0468 struct xpc_registration *registration = &xpc_registrations[ch->number];
0469
0470 if (mutex_trylock(®istration->mutex) == 0)
0471 return xpRetry;
0472
0473 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
0474 mutex_unlock(®istration->mutex);
0475 return xpUnregistered;
0476 }
0477
0478 spin_lock_irqsave(&ch->lock, irq_flags);
0479
0480 DBUG_ON(ch->flags & XPC_C_CONNECTED);
0481 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
0482
0483 if (ch->flags & XPC_C_DISCONNECTING) {
0484 spin_unlock_irqrestore(&ch->lock, irq_flags);
0485 mutex_unlock(®istration->mutex);
0486 return ch->reason;
0487 }
0488
0489
0490
0491 ch->kthreads_assigned_limit = registration->assigned_limit;
0492 ch->kthreads_idle_limit = registration->idle_limit;
0493 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
0494 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
0495 DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
0496
0497 ch->func = registration->func;
0498 DBUG_ON(registration->func == NULL);
0499 ch->key = registration->key;
0500
0501 ch->local_nentries = registration->nentries;
0502
0503 if (ch->flags & XPC_C_ROPENREQUEST) {
0504 if (registration->entry_size != ch->entry_size) {
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516 mutex_unlock(®istration->mutex);
0517 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
0518 &irq_flags);
0519 spin_unlock_irqrestore(&ch->lock, irq_flags);
0520 return xpUnequalMsgSizes;
0521 }
0522 } else {
0523 ch->entry_size = registration->entry_size;
0524
0525 XPC_SET_REASON(ch, 0, 0);
0526 ch->flags &= ~XPC_C_DISCONNECTED;
0527
0528 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
0529 }
0530
0531 mutex_unlock(®istration->mutex);
0532
0533
0534
0535 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
0536 xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
0537
0538 xpc_process_connect(ch, &irq_flags);
0539
0540 spin_unlock_irqrestore(&ch->lock, irq_flags);
0541
0542 return xpSuccess;
0543 }
0544
0545 void
0546 xpc_process_sent_chctl_flags(struct xpc_partition *part)
0547 {
0548 unsigned long irq_flags;
0549 union xpc_channel_ctl_flags chctl;
0550 struct xpc_channel *ch;
0551 int ch_number;
0552 u32 ch_flags;
0553
0554 chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
0555
0556
0557
0558
0559
0560
0561
0562
0563 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
0564 ch = &part->channels[ch_number];
0565
0566
0567
0568
0569
0570
0571 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
0572 xpc_process_openclose_chctl_flags(part, ch_number,
0573 chctl.flags[ch_number]);
0574 }
0575
0576 ch_flags = ch->flags;
0577
0578 if (ch_flags & XPC_C_DISCONNECTING) {
0579 spin_lock_irqsave(&ch->lock, irq_flags);
0580 xpc_process_disconnect(ch, &irq_flags);
0581 spin_unlock_irqrestore(&ch->lock, irq_flags);
0582 continue;
0583 }
0584
0585 if (part->act_state == XPC_P_AS_DEACTIVATING)
0586 continue;
0587
0588 if (!(ch_flags & XPC_C_CONNECTED)) {
0589 if (!(ch_flags & XPC_C_OPENREQUEST)) {
0590 DBUG_ON(ch_flags & XPC_C_SETUP);
0591 (void)xpc_connect_channel(ch);
0592 }
0593 continue;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
0603 xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
0604 }
0605 }
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 void
0617 xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
0618 {
0619 unsigned long irq_flags;
0620 int ch_number;
0621 struct xpc_channel *ch;
0622
0623 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
0624 XPC_PARTID(part), reason);
0625
0626 if (!xpc_part_ref(part)) {
0627
0628 return;
0629 }
0630
0631
0632
0633 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
0634 ch = &part->channels[ch_number];
0635
0636 xpc_msgqueue_ref(ch);
0637 spin_lock_irqsave(&ch->lock, irq_flags);
0638
0639 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
0640
0641 spin_unlock_irqrestore(&ch->lock, irq_flags);
0642 xpc_msgqueue_deref(ch);
0643 }
0644
0645 xpc_wakeup_channel_mgr(part);
0646
0647 xpc_part_deref(part);
0648 }
0649
0650
0651
0652
0653
0654 void
0655 xpc_initiate_connect(int ch_number)
0656 {
0657 short partid;
0658 struct xpc_partition *part;
0659
0660 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
0661
0662 for (partid = 0; partid < xp_max_npartitions; partid++) {
0663 part = &xpc_partitions[partid];
0664
0665 if (xpc_part_ref(part)) {
0666
0667
0668
0669
0670 xpc_wakeup_channel_mgr(part);
0671 xpc_part_deref(part);
0672 }
0673 }
0674 }
0675
0676 void
0677 xpc_connected_callout(struct xpc_channel *ch)
0678 {
0679
0680
0681 if (ch->func != NULL) {
0682 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
0683 "partid=%d, channel=%d\n", ch->partid, ch->number);
0684
0685 ch->func(xpConnected, ch->partid, ch->number,
0686 (void *)(u64)ch->local_nentries, ch->key);
0687
0688 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
0689 "partid=%d, channel=%d\n", ch->partid, ch->number);
0690 }
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706 void
0707 xpc_initiate_disconnect(int ch_number)
0708 {
0709 unsigned long irq_flags;
0710 short partid;
0711 struct xpc_partition *part;
0712 struct xpc_channel *ch;
0713
0714 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
0715
0716
0717 for (partid = 0; partid < xp_max_npartitions; partid++) {
0718 part = &xpc_partitions[partid];
0719
0720 if (xpc_part_ref(part)) {
0721 ch = &part->channels[ch_number];
0722 xpc_msgqueue_ref(ch);
0723
0724 spin_lock_irqsave(&ch->lock, irq_flags);
0725
0726 if (!(ch->flags & XPC_C_DISCONNECTED)) {
0727 ch->flags |= XPC_C_WDISCONNECT;
0728
0729 XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
0730 &irq_flags);
0731 }
0732
0733 spin_unlock_irqrestore(&ch->lock, irq_flags);
0734
0735 xpc_msgqueue_deref(ch);
0736 xpc_part_deref(part);
0737 }
0738 }
0739
0740 xpc_disconnect_wait(ch_number);
0741 }
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 void
0753 xpc_disconnect_channel(const int line, struct xpc_channel *ch,
0754 enum xp_retval reason, unsigned long *irq_flags)
0755 {
0756 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
0757
0758 lockdep_assert_held(&ch->lock);
0759
0760 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
0761 return;
0762
0763 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
0764
0765 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
0766 reason, line, ch->partid, ch->number);
0767
0768 XPC_SET_REASON(ch, reason, line);
0769
0770 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
0771
0772 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
0773 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
0774 XPC_C_CONNECTING | XPC_C_CONNECTED);
0775
0776 xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
0777
0778 if (channel_was_connected)
0779 ch->flags |= XPC_C_WASCONNECTED;
0780
0781 spin_unlock_irqrestore(&ch->lock, *irq_flags);
0782
0783
0784 if (atomic_read(&ch->kthreads_idle) > 0) {
0785 wake_up_all(&ch->idle_wq);
0786
0787 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
0788 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
0789
0790 xpc_create_kthreads(ch, 1, 1);
0791 }
0792
0793
0794 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
0795 wake_up(&ch->msg_allocate_wq);
0796
0797 spin_lock_irqsave(&ch->lock, *irq_flags);
0798 }
0799
0800 void
0801 xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
0802 {
0803
0804
0805
0806
0807
0808
0809 if (ch->func != NULL) {
0810 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
0811 "channel=%d\n", reason, ch->partid, ch->number);
0812
0813 ch->func(reason, ch->partid, ch->number, NULL, ch->key);
0814
0815 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
0816 "channel=%d\n", reason, ch->partid, ch->number);
0817 }
0818 }
0819
0820
0821
0822
0823
0824 enum xp_retval
0825 xpc_allocate_msg_wait(struct xpc_channel *ch)
0826 {
0827 enum xp_retval ret;
0828 DEFINE_WAIT(wait);
0829
0830 if (ch->flags & XPC_C_DISCONNECTING) {
0831 DBUG_ON(ch->reason == xpInterrupted);
0832 return ch->reason;
0833 }
0834
0835 atomic_inc(&ch->n_on_msg_allocate_wq);
0836 prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
0837 ret = schedule_timeout(1);
0838 finish_wait(&ch->msg_allocate_wq, &wait);
0839 atomic_dec(&ch->n_on_msg_allocate_wq);
0840
0841 if (ch->flags & XPC_C_DISCONNECTING) {
0842 ret = ch->reason;
0843 DBUG_ON(ch->reason == xpInterrupted);
0844 } else if (ret == 0) {
0845 ret = xpTimeout;
0846 } else {
0847 ret = xpInterrupted;
0848 }
0849
0850 return ret;
0851 }
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871 enum xp_retval
0872 xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
0873 u16 payload_size)
0874 {
0875 struct xpc_partition *part = &xpc_partitions[partid];
0876 enum xp_retval ret = xpUnknownReason;
0877
0878 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
0879 partid, ch_number);
0880
0881 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
0882 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
0883 DBUG_ON(payload == NULL);
0884
0885 if (xpc_part_ref(part)) {
0886 ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
0887 flags, payload, payload_size, 0, NULL, NULL);
0888 xpc_part_deref(part);
0889 }
0890
0891 return ret;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922 enum xp_retval
0923 xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
0924 u16 payload_size, xpc_notify_func func, void *key)
0925 {
0926 struct xpc_partition *part = &xpc_partitions[partid];
0927 enum xp_retval ret = xpUnknownReason;
0928
0929 dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
0930 partid, ch_number);
0931
0932 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
0933 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
0934 DBUG_ON(payload == NULL);
0935 DBUG_ON(func == NULL);
0936
0937 if (xpc_part_ref(part)) {
0938 ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
0939 flags, payload, payload_size, XPC_N_CALL, func, key);
0940 xpc_part_deref(part);
0941 }
0942 return ret;
0943 }
0944
0945
0946
0947
0948 void
0949 xpc_deliver_payload(struct xpc_channel *ch)
0950 {
0951 void *payload;
0952
0953 payload = xpc_arch_ops.get_deliverable_payload(ch);
0954 if (payload != NULL) {
0955
0956
0957
0958
0959
0960
0961 xpc_msgqueue_ref(ch);
0962
0963 atomic_inc(&ch->kthreads_active);
0964
0965 if (ch->func != NULL) {
0966 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
0967 "partid=%d channel=%d\n", payload, ch->partid,
0968 ch->number);
0969
0970
0971 ch->func(xpMsgReceived, ch->partid, ch->number, payload,
0972 ch->key);
0973
0974 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
0975 "partid=%d channel=%d\n", payload, ch->partid,
0976 ch->number);
0977 }
0978
0979 atomic_dec(&ch->kthreads_active);
0980 }
0981 }
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997 void
0998 xpc_initiate_received(short partid, int ch_number, void *payload)
0999 {
1000 struct xpc_partition *part = &xpc_partitions[partid];
1001 struct xpc_channel *ch;
1002
1003 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
1004 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1005
1006 ch = &part->channels[ch_number];
1007 xpc_arch_ops.received_payload(ch, payload);
1008
1009
1010 xpc_msgqueue_deref(ch);
1011 }