Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * SSH request transport layer.
0004  *
0005  * Copyright (C) 2019-2022 Maximilian Luz <luzmaximilian@gmail.com>
0006  */
0007 
0008 #include <asm/unaligned.h>
0009 #include <linux/atomic.h>
0010 #include <linux/completion.h>
0011 #include <linux/error-injection.h>
0012 #include <linux/ktime.h>
0013 #include <linux/limits.h>
0014 #include <linux/list.h>
0015 #include <linux/slab.h>
0016 #include <linux/spinlock.h>
0017 #include <linux/types.h>
0018 #include <linux/workqueue.h>
0019 
0020 #include <linux/surface_aggregator/serial_hub.h>
0021 #include <linux/surface_aggregator/controller.h>
0022 
0023 #include "ssh_packet_layer.h"
0024 #include "ssh_request_layer.h"
0025 
0026 #include "trace.h"
0027 
0028 /*
0029  * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
0030  *
0031  * Timeout as ktime_t delta for request responses. If we have not received a
0032  * response in this time-frame after finishing the underlying packet
0033  * transmission, the request will be completed with %-ETIMEDOUT as status
0034  * code.
0035  */
0036 #define SSH_RTL_REQUEST_TIMEOUT         ms_to_ktime(3000)
0037 
0038 /*
0039  * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
0040  *
0041  * Time-resolution for timeouts. Should be larger than one jiffy to avoid
0042  * direct re-scheduling of reaper work_struct.
0043  */
0044 #define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION  ms_to_ktime(max(2000 / HZ, 50))
0045 
0046 /*
0047  * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
0048  *
0049  * Maximum number of requests concurrently waiting to be completed (i.e.
0050  * waiting for the corresponding packet transmission to finish if they don't
0051  * have a response or waiting for a response if they have one).
0052  */
0053 #define SSH_RTL_MAX_PENDING     3
0054 
0055 /*
0056  * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
0057  * Used to prevent livelocking of the workqueue. Value chosen via educated
0058  * guess, may be adjusted.
0059  */
0060 #define SSH_RTL_TX_BATCH        10
0061 
0062 #ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
0063 
0064 /**
0065  * ssh_rtl_should_drop_response() - Error injection hook to drop request
0066  * responses.
0067  *
0068  * Useful to cause request transmission timeouts in the driver by dropping the
0069  * response to a request.
0070  */
0071 static noinline bool ssh_rtl_should_drop_response(void)
0072 {
0073     return false;
0074 }
0075 ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
0076 
0077 #else
0078 
0079 static inline bool ssh_rtl_should_drop_response(void)
0080 {
0081     return false;
0082 }
0083 
0084 #endif
0085 
0086 static u16 ssh_request_get_rqid(struct ssh_request *rqst)
0087 {
0088     return get_unaligned_le16(rqst->packet.data.ptr
0089                   + SSH_MSGOFFSET_COMMAND(rqid));
0090 }
0091 
0092 static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
0093 {
0094     if (!rqst->packet.data.ptr)
0095         return U32_MAX;
0096 
0097     return ssh_request_get_rqid(rqst);
0098 }
0099 
0100 static void ssh_rtl_queue_remove(struct ssh_request *rqst)
0101 {
0102     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0103 
0104     spin_lock(&rtl->queue.lock);
0105 
0106     if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
0107         spin_unlock(&rtl->queue.lock);
0108         return;
0109     }
0110 
0111     list_del(&rqst->node);
0112 
0113     spin_unlock(&rtl->queue.lock);
0114     ssh_request_put(rqst);
0115 }
0116 
0117 static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
0118 {
0119     bool empty;
0120 
0121     spin_lock(&rtl->queue.lock);
0122     empty = list_empty(&rtl->queue.head);
0123     spin_unlock(&rtl->queue.lock);
0124 
0125     return empty;
0126 }
0127 
0128 static void ssh_rtl_pending_remove(struct ssh_request *rqst)
0129 {
0130     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0131 
0132     spin_lock(&rtl->pending.lock);
0133 
0134     if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
0135         spin_unlock(&rtl->pending.lock);
0136         return;
0137     }
0138 
0139     atomic_dec(&rtl->pending.count);
0140     list_del(&rqst->node);
0141 
0142     spin_unlock(&rtl->pending.lock);
0143 
0144     ssh_request_put(rqst);
0145 }
0146 
0147 static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
0148 {
0149     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0150 
0151     spin_lock(&rtl->pending.lock);
0152 
0153     if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
0154         spin_unlock(&rtl->pending.lock);
0155         return -EINVAL;
0156     }
0157 
0158     if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
0159         spin_unlock(&rtl->pending.lock);
0160         return -EALREADY;
0161     }
0162 
0163     atomic_inc(&rtl->pending.count);
0164     list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
0165 
0166     spin_unlock(&rtl->pending.lock);
0167     return 0;
0168 }
0169 
0170 static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
0171 {
0172     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0173 
0174     trace_ssam_request_complete(rqst, status);
0175 
0176     /* rtl/ptl may not be set if we're canceling before submitting. */
0177     rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
0178              ssh_request_get_rqid_safe(rqst), status);
0179 
0180     rqst->ops->complete(rqst, NULL, NULL, status);
0181 }
0182 
0183 static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
0184                       const struct ssh_command *cmd,
0185                       const struct ssam_span *data)
0186 {
0187     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0188 
0189     trace_ssam_request_complete(rqst, 0);
0190 
0191     rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
0192         ssh_request_get_rqid(rqst));
0193 
0194     rqst->ops->complete(rqst, cmd, data, 0);
0195 }
0196 
0197 static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
0198 {
0199     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0200 
0201     if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
0202         return !atomic_read(&rtl->pending.count);
0203 
0204     return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
0205 }
0206 
0207 static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
0208 {
0209     struct ssh_request *rqst = ERR_PTR(-ENOENT);
0210     struct ssh_request *p, *n;
0211 
0212     spin_lock(&rtl->queue.lock);
0213 
0214     /* Find first non-locked request and remove it. */
0215     list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
0216         if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
0217             continue;
0218 
0219         if (!ssh_rtl_tx_can_process(p)) {
0220             rqst = ERR_PTR(-EBUSY);
0221             break;
0222         }
0223 
0224         /* Remove from queue and mark as transmitting. */
0225         set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
0226         /* Ensure state never gets zero. */
0227         smp_mb__before_atomic();
0228         clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
0229 
0230         list_del(&p->node);
0231 
0232         rqst = p;
0233         break;
0234     }
0235 
0236     spin_unlock(&rtl->queue.lock);
0237     return rqst;
0238 }
0239 
0240 static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
0241 {
0242     struct ssh_request *rqst;
0243     int status;
0244 
0245     /* Get and prepare next request for transmit. */
0246     rqst = ssh_rtl_tx_next(rtl);
0247     if (IS_ERR(rqst))
0248         return PTR_ERR(rqst);
0249 
0250     /* Add it to/mark it as pending. */
0251     status = ssh_rtl_tx_pending_push(rqst);
0252     if (status) {
0253         ssh_request_put(rqst);
0254         return -EAGAIN;
0255     }
0256 
0257     /* Submit packet. */
0258     status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
0259     if (status == -ESHUTDOWN) {
0260         /*
0261          * Packet has been refused due to the packet layer shutting
0262          * down. Complete it here.
0263          */
0264         set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
0265         /*
0266          * Note: A barrier is not required here, as there are only two
0267          * references in the system at this point: The one that we have,
0268          * and the other one that belongs to the pending set. Due to the
0269          * request being marked as "transmitting", our process is the
0270          * only one allowed to remove the pending node and change the
0271          * state. Normally, the task would fall to the packet callback,
0272          * but as this is a path where submission failed, this callback
0273          * will never be executed.
0274          */
0275 
0276         ssh_rtl_pending_remove(rqst);
0277         ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
0278 
0279         ssh_request_put(rqst);
0280         return -ESHUTDOWN;
0281 
0282     } else if (status) {
0283         /*
0284          * If submitting the packet failed and the packet layer isn't
0285          * shutting down, the packet has either been submitted/queued
0286          * before (-EALREADY, which cannot happen as we have
0287          * guaranteed that requests cannot be re-submitted), or the
0288          * packet was marked as locked (-EINVAL). To mark the packet
0289          * locked at this stage, the request, and thus the packets
0290          * itself, had to have been canceled. Simply drop the
0291          * reference. Cancellation itself will remove it from the set
0292          * of pending requests.
0293          */
0294 
0295         WARN_ON(status != -EINVAL);
0296 
0297         ssh_request_put(rqst);
0298         return -EAGAIN;
0299     }
0300 
0301     ssh_request_put(rqst);
0302     return 0;
0303 }
0304 
0305 static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
0306 {
0307     if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
0308         return false;
0309 
0310     if (ssh_rtl_queue_empty(rtl))
0311         return false;
0312 
0313     return schedule_work(&rtl->tx.work);
0314 }
0315 
0316 static void ssh_rtl_tx_work_fn(struct work_struct *work)
0317 {
0318     struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
0319     unsigned int iterations = SSH_RTL_TX_BATCH;
0320     int status;
0321 
0322     /*
0323      * Try to be nice and not block/live-lock the workqueue: Run a maximum
0324      * of 10 tries, then re-submit if necessary. This should not be
0325      * necessary for normal execution, but guarantee it anyway.
0326      */
0327     do {
0328         status = ssh_rtl_tx_try_process_one(rtl);
0329         if (status == -ENOENT || status == -EBUSY)
0330             return;     /* No more requests to process. */
0331 
0332         if (status == -ESHUTDOWN) {
0333             /*
0334              * Packet system shutting down. No new packets can be
0335              * transmitted. Return silently, the party initiating
0336              * the shutdown should handle the rest.
0337              */
0338             return;
0339         }
0340 
0341         WARN_ON(status != 0 && status != -EAGAIN);
0342     } while (--iterations);
0343 
0344     /* Out of tries, reschedule. */
0345     ssh_rtl_tx_schedule(rtl);
0346 }
0347 
0348 /**
0349  * ssh_rtl_submit() - Submit a request to the transport layer.
0350  * @rtl:  The request transport layer.
0351  * @rqst: The request to submit.
0352  *
0353  * Submits a request to the transport layer. A single request may not be
0354  * submitted multiple times without reinitializing it.
0355  *
0356  * Return: Returns zero on success, %-EINVAL if the request type is invalid or
0357  * the request has been canceled prior to submission, %-EALREADY if the
0358  * request has already been submitted, or %-ESHUTDOWN in case the request
0359  * transport layer has been shut down.
0360  */
0361 int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
0362 {
0363     trace_ssam_request_submit(rqst);
0364 
0365     /*
0366      * Ensure that requests expecting a response are sequenced. If this
0367      * invariant ever changes, see the comment in ssh_rtl_complete() on what
0368      * is required to be changed in the code.
0369      */
0370     if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
0371         if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
0372             return -EINVAL;
0373 
0374     spin_lock(&rtl->queue.lock);
0375 
0376     /*
0377      * Try to set ptl and check if this request has already been submitted.
0378      *
0379      * Must be inside lock as we might run into a lost update problem
0380      * otherwise: If this were outside of the lock, cancellation in
0381      * ssh_rtl_cancel_nonpending() may run after we've set the ptl
0382      * reference but before we enter the lock. In that case, we'd detect
0383      * that the request is being added to the queue and would try to remove
0384      * it from that, but removal might fail because it hasn't actually been
0385      * added yet. By putting this cmpxchg in the critical section, we
0386      * ensure that the queuing detection only triggers when we are already
0387      * in the critical section and the remove process will wait until the
0388      * push operation has been completed (via lock) due to that. Only then,
0389      * we can safely try to remove it.
0390      */
0391     if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
0392         spin_unlock(&rtl->queue.lock);
0393         return -EALREADY;
0394     }
0395 
0396     /*
0397      * Ensure that we set ptl reference before we continue modifying state.
0398      * This is required for non-pending cancellation. This barrier is paired
0399      * with the one in ssh_rtl_cancel_nonpending().
0400      *
0401      * By setting the ptl reference before we test for "locked", we can
0402      * check if the "locked" test may have already run. See comments in
0403      * ssh_rtl_cancel_nonpending() for more detail.
0404      */
0405     smp_mb__after_atomic();
0406 
0407     if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
0408         spin_unlock(&rtl->queue.lock);
0409         return -ESHUTDOWN;
0410     }
0411 
0412     if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
0413         spin_unlock(&rtl->queue.lock);
0414         return -EINVAL;
0415     }
0416 
0417     set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
0418     list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
0419 
0420     spin_unlock(&rtl->queue.lock);
0421 
0422     ssh_rtl_tx_schedule(rtl);
0423     return 0;
0424 }
0425 
0426 static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
0427                        ktime_t expires)
0428 {
0429     unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
0430     ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
0431 
0432     spin_lock(&rtl->rtx_timeout.lock);
0433 
0434     /* Re-adjust / schedule reaper only if it is above resolution delta. */
0435     if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
0436         rtl->rtx_timeout.expires = expires;
0437         mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
0438     }
0439 
0440     spin_unlock(&rtl->rtx_timeout.lock);
0441 }
0442 
0443 static void ssh_rtl_timeout_start(struct ssh_request *rqst)
0444 {
0445     struct ssh_rtl *rtl = ssh_request_rtl(rqst);
0446     ktime_t timestamp = ktime_get_coarse_boottime();
0447     ktime_t timeout = rtl->rtx_timeout.timeout;
0448 
0449     if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
0450         return;
0451 
0452     /*
0453      * Note: The timestamp gets set only once. This happens on the packet
0454      * callback. All other access to it is read-only.
0455      */
0456     WRITE_ONCE(rqst->timestamp, timestamp);
0457     /*
0458      * Ensure timestamp is set before starting the reaper. Paired with
0459      * implicit barrier following check on ssh_request_get_expiration() in
0460      * ssh_rtl_timeout_reap.
0461      */
0462     smp_mb__after_atomic();
0463 
0464     ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
0465 }
0466 
0467 static void ssh_rtl_complete(struct ssh_rtl *rtl,
0468                  const struct ssh_command *command,
0469                  const struct ssam_span *command_data)
0470 {
0471     struct ssh_request *r = NULL;
0472     struct ssh_request *p, *n;
0473     u16 rqid = get_unaligned_le16(&command->rqid);
0474 
0475     trace_ssam_rx_response_received(command, command_data->len);
0476 
0477     /*
0478      * Get request from pending based on request ID and mark it as response
0479      * received and locked.
0480      */
0481     spin_lock(&rtl->pending.lock);
0482     list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
0483         /* We generally expect requests to be processed in order. */
0484         if (unlikely(ssh_request_get_rqid(p) != rqid))
0485             continue;
0486 
0487         /* Simulate response timeout. */
0488         if (ssh_rtl_should_drop_response()) {
0489             spin_unlock(&rtl->pending.lock);
0490 
0491             trace_ssam_ei_rx_drop_response(p);
0492             rtl_info(rtl, "request error injection: dropping response for request %p\n",
0493                  &p->packet);
0494             return;
0495         }
0496 
0497         /*
0498          * Mark as "response received" and "locked" as we're going to
0499          * complete it.
0500          */
0501         set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
0502         set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
0503         /* Ensure state never gets zero. */
0504         smp_mb__before_atomic();
0505         clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
0506 
0507         atomic_dec(&rtl->pending.count);
0508         list_del(&p->node);
0509 
0510         r = p;
0511         break;
0512     }
0513     spin_unlock(&rtl->pending.lock);
0514 
0515     if (!r) {
0516         rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
0517              rqid);
0518         return;
0519     }
0520 
0521     /* If the request hasn't been completed yet, we will do this now. */
0522     if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
0523         ssh_request_put(r);
0524         ssh_rtl_tx_schedule(rtl);
0525         return;
0526     }
0527 
0528     /*
0529      * Make sure the request has been transmitted. In case of a sequenced
0530      * request, we are guaranteed that the completion callback will run on
0531      * the receiver thread directly when the ACK for the packet has been
0532      * received. Similarly, this function is guaranteed to run on the
0533      * receiver thread. Thus we are guaranteed that if the packet has been
0534      * successfully transmitted and received an ACK, the transmitted flag
0535      * has been set and is visible here.
0536      *
0537      * We are currently not handling unsequenced packets here, as those
0538      * should never expect a response as ensured in ssh_rtl_submit. If this
0539      * ever changes, one would have to test for
0540      *
0541      *  (r->state & (transmitting | transmitted))
0542      *
0543      * on unsequenced packets to determine if they could have been
0544      * transmitted. There are no synchronization guarantees as in the
0545      * sequenced case, since, in this case, the callback function will not
0546      * run on the same thread. Thus an exact determination is impossible.
0547      */
0548     if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
0549         rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
0550             rqid);
0551 
0552         /*
0553          * NB: Timeout has already been canceled, request already been
0554          * removed from pending and marked as locked and completed. As
0555          * we receive a "false" response, the packet might still be
0556          * queued though.
0557          */
0558         ssh_rtl_queue_remove(r);
0559 
0560         ssh_rtl_complete_with_status(r, -EREMOTEIO);
0561         ssh_request_put(r);
0562 
0563         ssh_rtl_tx_schedule(rtl);
0564         return;
0565     }
0566 
0567     /*
0568      * NB: Timeout has already been canceled, request already been
0569      * removed from pending and marked as locked and completed. The request
0570      * can also not be queued any more, as it has been marked as
0571      * transmitting and later transmitted. Thus no need to remove it from
0572      * anywhere.
0573      */
0574 
0575     ssh_rtl_complete_with_rsp(r, command, command_data);
0576     ssh_request_put(r);
0577 
0578     ssh_rtl_tx_schedule(rtl);
0579 }
0580 
0581 static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
0582 {
0583     struct ssh_rtl *rtl;
0584     unsigned long flags, fixed;
0585     bool remove;
0586 
0587     /*
0588      * Handle unsubmitted request: Try to mark the packet as locked,
0589      * expecting the state to be zero (i.e. unsubmitted). Note that, if
0590      * setting the state worked, we might still be adding the packet to the
0591      * queue in a currently executing submit call. In that case, however,
0592      * ptl reference must have been set previously, as locked is checked
0593      * after setting ptl. Furthermore, when the ptl reference is set, the
0594      * submission process is guaranteed to have entered the critical
0595      * section. Thus only if we successfully locked this request and ptl is
0596      * NULL, we have successfully removed the request, i.e. we are
0597      * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
0598      * packet will never be added. Otherwise, we need to try and grab it
0599      * from the queue, where we are now guaranteed that the packet is or has
0600      * been due to the critical section.
0601      *
0602      * Note that if the cmpxchg() fails, we are guaranteed that ptl has
0603      * been set and is non-NULL, as states can only be nonzero after this
0604      * has been set. Also note that we need to fetch the static (type)
0605      * flags to ensure that they don't cause the cmpxchg() to fail.
0606      */
0607     fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
0608     flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
0609 
0610     /*
0611      * Force correct ordering with regards to state and ptl reference access
0612      * to safe-guard cancellation to concurrent submission against a
0613      * lost-update problem. First try to exchange state, then also check
0614      * ptl if that worked. This barrier is paired with the
0615      * one in ssh_rtl_submit().
0616      */
0617     smp_mb__after_atomic();
0618 
0619     if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
0620         if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0621             return true;
0622 
0623         ssh_rtl_complete_with_status(r, -ECANCELED);
0624         return true;
0625     }
0626 
0627     rtl = ssh_request_rtl(r);
0628     spin_lock(&rtl->queue.lock);
0629 
0630     /*
0631      * Note: 1) Requests cannot be re-submitted. 2) If a request is
0632      * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
0633      * successfully remove the request here, we have removed all its
0634      * occurrences in the system.
0635      */
0636 
0637     remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
0638     if (!remove) {
0639         spin_unlock(&rtl->queue.lock);
0640         return false;
0641     }
0642 
0643     set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0644     list_del(&r->node);
0645 
0646     spin_unlock(&rtl->queue.lock);
0647 
0648     ssh_request_put(r); /* Drop reference obtained from queue. */
0649 
0650     if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0651         return true;
0652 
0653     ssh_rtl_complete_with_status(r, -ECANCELED);
0654     return true;
0655 }
0656 
0657 static bool ssh_rtl_cancel_pending(struct ssh_request *r)
0658 {
0659     /* If the packet is already locked, it's going to be removed shortly. */
0660     if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
0661         return true;
0662 
0663     /*
0664      * Now that we have locked the packet, we have guaranteed that it can't
0665      * be added to the system any more. If ptl is NULL, the locked
0666      * check in ssh_rtl_submit() has not been run and any submission,
0667      * currently in progress or called later, won't add the packet. Thus we
0668      * can directly complete it.
0669      *
0670      * The implicit memory barrier of test_and_set_bit() should be enough
0671      * to ensure that the correct order (first lock, then check ptl) is
0672      * ensured. This is paired with the barrier in ssh_rtl_submit().
0673      */
0674     if (!READ_ONCE(r->packet.ptl)) {
0675         if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0676             return true;
0677 
0678         ssh_rtl_complete_with_status(r, -ECANCELED);
0679         return true;
0680     }
0681 
0682     /*
0683      * Try to cancel the packet. If the packet has not been completed yet,
0684      * this will subsequently (and synchronously) call the completion
0685      * callback of the packet, which will complete the request.
0686      */
0687     ssh_ptl_cancel(&r->packet);
0688 
0689     /*
0690      * If the packet has been completed with success, i.e. has not been
0691      * canceled by the above call, the request may not have been completed
0692      * yet (may be waiting for a response). Check if we need to do this
0693      * here.
0694      */
0695     if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0696         return true;
0697 
0698     ssh_rtl_queue_remove(r);
0699     ssh_rtl_pending_remove(r);
0700     ssh_rtl_complete_with_status(r, -ECANCELED);
0701 
0702     return true;
0703 }
0704 
0705 /**
0706  * ssh_rtl_cancel() - Cancel request.
0707  * @rqst:    The request to cancel.
0708  * @pending: Whether to also cancel pending requests.
0709  *
0710  * Cancels the given request. If @pending is %false, this will not cancel
0711  * pending requests, i.e. requests that have already been submitted to the
0712  * packet layer but not been completed yet. If @pending is %true, this will
0713  * cancel the given request regardless of the state it is in.
0714  *
0715  * If the request has been canceled by calling this function, both completion
0716  * and release callbacks of the request will be executed in a reasonable
0717  * time-frame. This may happen during execution of this function, however,
0718  * there is no guarantee for this. For example, a request currently
0719  * transmitting will be canceled/completed only after transmission has
0720  * completed, and the respective callbacks will be executed on the transmitter
0721  * thread, which may happen during, but also some time after execution of the
0722  * cancel function.
0723  *
0724  * Return: Returns %true if the given request has been canceled or completed,
0725  * either by this function or prior to calling this function, %false
0726  * otherwise. If @pending is %true, this function will always return %true.
0727  */
0728 bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
0729 {
0730     struct ssh_rtl *rtl;
0731     bool canceled;
0732 
0733     if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
0734         return true;
0735 
0736     trace_ssam_request_cancel(rqst);
0737 
0738     if (pending)
0739         canceled = ssh_rtl_cancel_pending(rqst);
0740     else
0741         canceled = ssh_rtl_cancel_nonpending(rqst);
0742 
0743     /* Note: rtl may be NULL if request has not been submitted yet. */
0744     rtl = ssh_request_rtl(rqst);
0745     if (canceled && rtl)
0746         ssh_rtl_tx_schedule(rtl);
0747 
0748     return canceled;
0749 }
0750 
0751 static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
0752 {
0753     struct ssh_request *r = to_ssh_request(p);
0754 
0755     if (unlikely(status)) {
0756         set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0757 
0758         if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0759             return;
0760 
0761         /*
0762          * The packet may get canceled even though it has not been
0763          * submitted yet. The request may still be queued. Check the
0764          * queue and remove it if necessary. As the timeout would have
0765          * been started in this function on success, there's no need
0766          * to cancel it here.
0767          */
0768         ssh_rtl_queue_remove(r);
0769         ssh_rtl_pending_remove(r);
0770         ssh_rtl_complete_with_status(r, status);
0771 
0772         ssh_rtl_tx_schedule(ssh_request_rtl(r));
0773         return;
0774     }
0775 
0776     /* Update state: Mark as transmitted and clear transmitting. */
0777     set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
0778     /* Ensure state never gets zero. */
0779     smp_mb__before_atomic();
0780     clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
0781 
0782     /* If we expect a response, we just need to start the timeout. */
0783     if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
0784         /*
0785          * Note: This is the only place where the timestamp gets set,
0786          * all other access to it is read-only.
0787          */
0788         ssh_rtl_timeout_start(r);
0789         return;
0790     }
0791 
0792     /*
0793      * If we don't expect a response, lock, remove, and complete the
0794      * request. Note that, at this point, the request is guaranteed to have
0795      * left the queue and no timeout has been started. Thus we only need to
0796      * remove it from pending. If the request has already been completed (it
0797      * may have been canceled) return.
0798      */
0799 
0800     set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
0801     if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0802         return;
0803 
0804     ssh_rtl_pending_remove(r);
0805     ssh_rtl_complete_with_status(r, 0);
0806 
0807     ssh_rtl_tx_schedule(ssh_request_rtl(r));
0808 }
0809 
0810 static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
0811 {
0812     ktime_t timestamp = READ_ONCE(r->timestamp);
0813 
0814     if (timestamp != KTIME_MAX)
0815         return ktime_add(timestamp, timeout);
0816     else
0817         return KTIME_MAX;
0818 }
0819 
0820 static void ssh_rtl_timeout_reap(struct work_struct *work)
0821 {
0822     struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
0823     struct ssh_request *r, *n;
0824     LIST_HEAD(claimed);
0825     ktime_t now = ktime_get_coarse_boottime();
0826     ktime_t timeout = rtl->rtx_timeout.timeout;
0827     ktime_t next = KTIME_MAX;
0828 
0829     trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
0830 
0831     /*
0832      * Mark reaper as "not pending". This is done before checking any
0833      * requests to avoid lost-update type problems.
0834      */
0835     spin_lock(&rtl->rtx_timeout.lock);
0836     rtl->rtx_timeout.expires = KTIME_MAX;
0837     spin_unlock(&rtl->rtx_timeout.lock);
0838 
0839     spin_lock(&rtl->pending.lock);
0840     list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
0841         ktime_t expires = ssh_request_get_expiration(r, timeout);
0842 
0843         /*
0844          * Check if the timeout hasn't expired yet. Find out next
0845          * expiration date to be handled after this run.
0846          */
0847         if (ktime_after(expires, now)) {
0848             next = ktime_before(expires, next) ? expires : next;
0849             continue;
0850         }
0851 
0852         /* Avoid further transitions if locked. */
0853         if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
0854             continue;
0855 
0856         /*
0857          * We have now marked the packet as locked. Thus it cannot be
0858          * added to the pending or queued lists again after we've
0859          * removed it here. We can therefore re-use the node of this
0860          * packet temporarily.
0861          */
0862 
0863         clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
0864 
0865         atomic_dec(&rtl->pending.count);
0866         list_move_tail(&r->node, &claimed);
0867     }
0868     spin_unlock(&rtl->pending.lock);
0869 
0870     /* Cancel and complete the request. */
0871     list_for_each_entry_safe(r, n, &claimed, node) {
0872         trace_ssam_request_timeout(r);
0873 
0874         /*
0875          * At this point we've removed the packet from pending. This
0876          * means that we've obtained the last (only) reference of the
0877          * system to it. Thus we can just complete it.
0878          */
0879         if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
0880             ssh_rtl_complete_with_status(r, -ETIMEDOUT);
0881 
0882         /*
0883          * Drop the reference we've obtained by removing it from the
0884          * pending set.
0885          */
0886         list_del(&r->node);
0887         ssh_request_put(r);
0888     }
0889 
0890     /* Ensure that the reaper doesn't run again immediately. */
0891     next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
0892     if (next != KTIME_MAX)
0893         ssh_rtl_timeout_reaper_mod(rtl, now, next);
0894 
0895     ssh_rtl_tx_schedule(rtl);
0896 }
0897 
0898 static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
0899                  const struct ssam_span *data)
0900 {
0901     trace_ssam_rx_event_received(cmd, data->len);
0902 
0903     rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
0904         get_unaligned_le16(&cmd->rqid));
0905 
0906     rtl->ops.handle_event(rtl, cmd, data);
0907 }
0908 
0909 static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
0910 {
0911     struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
0912     struct device *dev = &p->serdev->dev;
0913     struct ssh_command *command;
0914     struct ssam_span command_data;
0915 
0916     if (sshp_parse_command(dev, data, &command, &command_data))
0917         return;
0918 
0919     if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
0920         ssh_rtl_rx_event(rtl, command, &command_data);
0921     else
0922         ssh_rtl_complete(rtl, command, &command_data);
0923 }
0924 
0925 static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
0926 {
0927     if (!data->len) {
0928         ptl_err(p, "rtl: rx: no data frame payload\n");
0929         return;
0930     }
0931 
0932     switch (data->ptr[0]) {
0933     case SSH_PLD_TYPE_CMD:
0934         ssh_rtl_rx_command(p, data);
0935         break;
0936 
0937     default:
0938         ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
0939             data->ptr[0]);
0940         break;
0941     }
0942 }
0943 
0944 static void ssh_rtl_packet_release(struct ssh_packet *p)
0945 {
0946     struct ssh_request *rqst;
0947 
0948     rqst = to_ssh_request(p);
0949     rqst->ops->release(rqst);
0950 }
0951 
0952 static const struct ssh_packet_ops ssh_rtl_packet_ops = {
0953     .complete = ssh_rtl_packet_callback,
0954     .release = ssh_rtl_packet_release,
0955 };
0956 
0957 /**
0958  * ssh_request_init() - Initialize SSH request.
0959  * @rqst:  The request to initialize.
0960  * @flags: Request flags, determining the type of the request.
0961  * @ops:   Request operations.
0962  *
0963  * Initializes the given SSH request and underlying packet. Sets the message
0964  * buffer pointer to %NULL and the message buffer length to zero. This buffer
0965  * has to be set separately via ssh_request_set_data() before submission and
0966  * must contain a valid SSH request message.
0967  *
0968  * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
0969  */
0970 int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
0971              const struct ssh_request_ops *ops)
0972 {
0973     unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
0974 
0975     /* Unsequenced requests cannot have a response. */
0976     if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
0977         return -EINVAL;
0978 
0979     if (!(flags & SSAM_REQUEST_UNSEQUENCED))
0980         type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
0981 
0982     ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
0983             &ssh_rtl_packet_ops);
0984 
0985     INIT_LIST_HEAD(&rqst->node);
0986 
0987     rqst->state = 0;
0988     if (flags & SSAM_REQUEST_HAS_RESPONSE)
0989         rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
0990 
0991     rqst->timestamp = KTIME_MAX;
0992     rqst->ops = ops;
0993 
0994     return 0;
0995 }
0996 
0997 /**
0998  * ssh_rtl_init() - Initialize request transport layer.
0999  * @rtl:    The request transport layer to initialize.
1000  * @serdev: The underlying serial device, i.e. the lower-level transport.
1001  * @ops:    Request transport layer operations.
1002  *
1003  * Initializes the given request transport layer and associated packet
1004  * transport layer. Transmitter and receiver threads must be started
1005  * separately via ssh_rtl_start(), after the request-layer has been
1006  * initialized and the lower-level serial device layer has been set up.
1007  *
1008  * Return: Returns zero on success and a nonzero error code on failure.
1009  */
1010 int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
1011          const struct ssh_rtl_ops *ops)
1012 {
1013     struct ssh_ptl_ops ptl_ops;
1014     int status;
1015 
1016     ptl_ops.data_received = ssh_rtl_rx_data;
1017 
1018     status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
1019     if (status)
1020         return status;
1021 
1022     spin_lock_init(&rtl->queue.lock);
1023     INIT_LIST_HEAD(&rtl->queue.head);
1024 
1025     spin_lock_init(&rtl->pending.lock);
1026     INIT_LIST_HEAD(&rtl->pending.head);
1027     atomic_set_release(&rtl->pending.count, 0);
1028 
1029     INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
1030 
1031     spin_lock_init(&rtl->rtx_timeout.lock);
1032     rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
1033     rtl->rtx_timeout.expires = KTIME_MAX;
1034     INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
1035 
1036     rtl->ops = *ops;
1037 
1038     return 0;
1039 }
1040 
1041 /**
1042  * ssh_rtl_destroy() - Deinitialize request transport layer.
1043  * @rtl: The request transport layer to deinitialize.
1044  *
1045  * Deinitializes the given request transport layer and frees resources
1046  * associated with it. If receiver and/or transmitter threads have been
1047  * started, the layer must first be shut down via ssh_rtl_shutdown() before
1048  * this function can be called.
1049  */
1050 void ssh_rtl_destroy(struct ssh_rtl *rtl)
1051 {
1052     ssh_ptl_destroy(&rtl->ptl);
1053 }
1054 
1055 /**
1056  * ssh_rtl_start() - Start request transmitter and receiver.
1057  * @rtl: The request transport layer.
1058  *
1059  * Return: Returns zero on success, a negative error code on failure.
1060  */
1061 int ssh_rtl_start(struct ssh_rtl *rtl)
1062 {
1063     int status;
1064 
1065     status = ssh_ptl_tx_start(&rtl->ptl);
1066     if (status)
1067         return status;
1068 
1069     ssh_rtl_tx_schedule(rtl);
1070 
1071     status = ssh_ptl_rx_start(&rtl->ptl);
1072     if (status) {
1073         ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
1074         ssh_ptl_tx_stop(&rtl->ptl);
1075         return status;
1076     }
1077 
1078     return 0;
1079 }
1080 
1081 struct ssh_flush_request {
1082     struct ssh_request base;
1083     struct completion completion;
1084     int status;
1085 };
1086 
1087 static void ssh_rtl_flush_request_complete(struct ssh_request *r,
1088                        const struct ssh_command *cmd,
1089                        const struct ssam_span *data,
1090                        int status)
1091 {
1092     struct ssh_flush_request *rqst;
1093 
1094     rqst = container_of(r, struct ssh_flush_request, base);
1095     rqst->status = status;
1096 }
1097 
1098 static void ssh_rtl_flush_request_release(struct ssh_request *r)
1099 {
1100     struct ssh_flush_request *rqst;
1101 
1102     rqst = container_of(r, struct ssh_flush_request, base);
1103     complete_all(&rqst->completion);
1104 }
1105 
1106 static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
1107     .complete = ssh_rtl_flush_request_complete,
1108     .release = ssh_rtl_flush_request_release,
1109 };
1110 
1111 /**
1112  * ssh_rtl_flush() - Flush the request transport layer.
1113  * @rtl:     request transport layer
1114  * @timeout: timeout for the flush operation in jiffies
1115  *
1116  * Queue a special flush request and wait for its completion. This request
1117  * will be completed after all other currently queued and pending requests
1118  * have been completed. Instead of a normal data packet, this request submits
1119  * a special flush packet, meaning that upon completion, also the underlying
1120  * packet transport layer has been flushed.
1121  *
1122  * Flushing the request layer guarantees that all previously submitted
1123  * requests have been fully completed before this call returns. Additionally,
1124  * flushing blocks execution of all later submitted requests until the flush
1125  * has been completed.
1126  *
1127  * If the caller ensures that no new requests are submitted after a call to
1128  * this function, the request transport layer is guaranteed to have no
1129  * remaining requests when this call returns. The same guarantee does not hold
1130  * for the packet layer, on which control packets may still be queued after
1131  * this call.
1132  *
1133  * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
1134  * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
1135  * and/or request transport layer has been shut down before this call. May
1136  * also return %-EINTR if the underlying packet transmission has been
1137  * interrupted.
1138  */
1139 int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
1140 {
1141     const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
1142     struct ssh_flush_request rqst;
1143     int status;
1144 
1145     ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
1146     rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
1147     rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
1148     rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
1149 
1150     init_completion(&rqst.completion);
1151 
1152     status = ssh_rtl_submit(rtl, &rqst.base);
1153     if (status)
1154         return status;
1155 
1156     ssh_request_put(&rqst.base);
1157 
1158     if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
1159         ssh_rtl_cancel(&rqst.base, true);
1160         wait_for_completion(&rqst.completion);
1161     }
1162 
1163     WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
1164         rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
1165 
1166     return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
1167 }
1168 
1169 /**
1170  * ssh_rtl_shutdown() - Shut down request transport layer.
1171  * @rtl: The request transport layer.
1172  *
1173  * Shuts down the request transport layer, removing and canceling all queued
1174  * and pending requests. Requests canceled by this operation will be completed
1175  * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
1176  * stopped, the lower-level packet layer will be shutdown.
1177  *
1178  * As a result of this function, the transport layer will be marked as shut
1179  * down. Submission of requests after the transport layer has been shut down
1180  * will fail with %-ESHUTDOWN.
1181  */
1182 void ssh_rtl_shutdown(struct ssh_rtl *rtl)
1183 {
1184     struct ssh_request *r, *n;
1185     LIST_HEAD(claimed);
1186     int pending;
1187 
1188     set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
1189     /*
1190      * Ensure that the layer gets marked as shut-down before actually
1191      * stopping it. In combination with the check in ssh_rtl_submit(),
1192      * this guarantees that no new requests can be added and all already
1193      * queued requests are properly canceled.
1194      */
1195     smp_mb__after_atomic();
1196 
1197     /* Remove requests from queue. */
1198     spin_lock(&rtl->queue.lock);
1199     list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
1200         set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1201         /* Ensure state never gets zero. */
1202         smp_mb__before_atomic();
1203         clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
1204 
1205         list_move_tail(&r->node, &claimed);
1206     }
1207     spin_unlock(&rtl->queue.lock);
1208 
1209     /*
1210      * We have now guaranteed that the queue is empty and no more new
1211      * requests can be submitted (i.e. it will stay empty). This means that
1212      * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
1213      * we can simply call cancel_work_sync() on tx.work here and when that
1214      * returns, we've locked it down. This also means that after this call,
1215      * we don't submit any more packets to the underlying packet layer, so
1216      * we can also shut that down.
1217      */
1218 
1219     cancel_work_sync(&rtl->tx.work);
1220     ssh_ptl_shutdown(&rtl->ptl);
1221     cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
1222 
1223     /*
1224      * Shutting down the packet layer should also have canceled all
1225      * requests. Thus the pending set should be empty. Attempt to handle
1226      * this gracefully anyways, even though this should be dead code.
1227      */
1228 
1229     pending = atomic_read(&rtl->pending.count);
1230     if (WARN_ON(pending)) {
1231         spin_lock(&rtl->pending.lock);
1232         list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
1233             set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
1234             /* Ensure state never gets zero. */
1235             smp_mb__before_atomic();
1236             clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
1237 
1238             list_move_tail(&r->node, &claimed);
1239         }
1240         spin_unlock(&rtl->pending.lock);
1241     }
1242 
1243     /* Finally, cancel and complete the requests we claimed before. */
1244     list_for_each_entry_safe(r, n, &claimed, node) {
1245         /*
1246          * We need test_and_set() because we still might compete with
1247          * cancellation.
1248          */
1249         if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
1250             ssh_rtl_complete_with_status(r, -ESHUTDOWN);
1251 
1252         /*
1253          * Drop the reference we've obtained by removing it from the
1254          * lists.
1255          */
1256         list_del(&r->node);
1257         ssh_request_put(r);
1258     }
1259 }