Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * PowerNV OPAL asynchronous completion interfaces
0004  *
0005  * Copyright 2013-2017 IBM Corp.
0006  */
0007 
0008 #undef DEBUG
0009 
0010 #include <linux/kernel.h>
0011 #include <linux/init.h>
0012 #include <linux/slab.h>
0013 #include <linux/sched.h>
0014 #include <linux/semaphore.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/wait.h>
0017 #include <linux/gfp.h>
0018 #include <linux/of.h>
0019 #include <asm/machdep.h>
0020 #include <asm/opal.h>
0021 
0022 enum opal_async_token_state {
0023     ASYNC_TOKEN_UNALLOCATED = 0,
0024     ASYNC_TOKEN_ALLOCATED,
0025     ASYNC_TOKEN_DISPATCHED,
0026     ASYNC_TOKEN_ABANDONED,
0027     ASYNC_TOKEN_COMPLETED
0028 };
0029 
0030 struct opal_async_token {
0031     enum opal_async_token_state state;
0032     struct opal_msg response;
0033 };
0034 
0035 static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
0036 static DEFINE_SPINLOCK(opal_async_comp_lock);
0037 static struct semaphore opal_async_sem;
0038 static unsigned int opal_max_async_tokens;
0039 static struct opal_async_token *opal_async_tokens;
0040 
0041 static int __opal_async_get_token(void)
0042 {
0043     unsigned long flags;
0044     int i, token = -EBUSY;
0045 
0046     spin_lock_irqsave(&opal_async_comp_lock, flags);
0047 
0048     for (i = 0; i < opal_max_async_tokens; i++) {
0049         if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
0050             opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
0051             token = i;
0052             break;
0053         }
0054     }
0055 
0056     spin_unlock_irqrestore(&opal_async_comp_lock, flags);
0057     return token;
0058 }
0059 
0060 /*
0061  * Note: If the returned token is used in an opal call and opal returns
0062  * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
0063  * opal_async_wait_response_interruptible() at least once before calling another
0064  * opal_async_* function
0065  */
0066 int opal_async_get_token_interruptible(void)
0067 {
0068     int token;
0069 
0070     /* Wait until a token is available */
0071     if (down_interruptible(&opal_async_sem))
0072         return -ERESTARTSYS;
0073 
0074     token = __opal_async_get_token();
0075     if (token < 0)
0076         up(&opal_async_sem);
0077 
0078     return token;
0079 }
0080 EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
0081 
0082 static int __opal_async_release_token(int token)
0083 {
0084     unsigned long flags;
0085     int rc;
0086 
0087     if (token < 0 || token >= opal_max_async_tokens) {
0088         pr_err("%s: Passed token is out of range, token %d\n",
0089                 __func__, token);
0090         return -EINVAL;
0091     }
0092 
0093     spin_lock_irqsave(&opal_async_comp_lock, flags);
0094     switch (opal_async_tokens[token].state) {
0095     case ASYNC_TOKEN_COMPLETED:
0096     case ASYNC_TOKEN_ALLOCATED:
0097         opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
0098         rc = 0;
0099         break;
0100     /*
0101      * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
0102      * Mark a DISPATCHED token as ABANDONED so that the response handling
0103      * code knows no one cares and that it can free it then.
0104      */
0105     case ASYNC_TOKEN_DISPATCHED:
0106         opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
0107         fallthrough;
0108     default:
0109         rc = 1;
0110     }
0111     spin_unlock_irqrestore(&opal_async_comp_lock, flags);
0112 
0113     return rc;
0114 }
0115 
0116 int opal_async_release_token(int token)
0117 {
0118     int ret;
0119 
0120     ret = __opal_async_release_token(token);
0121     if (!ret)
0122         up(&opal_async_sem);
0123 
0124     return ret;
0125 }
0126 EXPORT_SYMBOL_GPL(opal_async_release_token);
0127 
0128 int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
0129 {
0130     if (token >= opal_max_async_tokens) {
0131         pr_err("%s: Invalid token passed\n", __func__);
0132         return -EINVAL;
0133     }
0134 
0135     if (!msg) {
0136         pr_err("%s: Invalid message pointer passed\n", __func__);
0137         return -EINVAL;
0138     }
0139 
0140     /*
0141      * There is no need to mark the token as dispatched, wait_event()
0142      * will block until the token completes.
0143      *
0144      * Wakeup the poller before we wait for events to speed things
0145      * up on platforms or simulators where the interrupts aren't
0146      * functional.
0147      */
0148     opal_wake_poller();
0149     wait_event(opal_async_wait, opal_async_tokens[token].state
0150             == ASYNC_TOKEN_COMPLETED);
0151     memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
0152 
0153     return 0;
0154 }
0155 EXPORT_SYMBOL_GPL(opal_async_wait_response);
0156 
0157 int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
0158 {
0159     unsigned long flags;
0160     int ret;
0161 
0162     if (token >= opal_max_async_tokens) {
0163         pr_err("%s: Invalid token passed\n", __func__);
0164         return -EINVAL;
0165     }
0166 
0167     if (!msg) {
0168         pr_err("%s: Invalid message pointer passed\n", __func__);
0169         return -EINVAL;
0170     }
0171 
0172     /*
0173      * The first time this gets called we mark the token as DISPATCHED
0174      * so that if wait_event_interruptible() returns not zero and the
0175      * caller frees the token, we know not to actually free the token
0176      * until the response comes.
0177      *
0178      * Only change if the token is ALLOCATED - it may have been
0179      * completed even before the caller gets around to calling this
0180      * the first time.
0181      *
0182      * There is also a dirty great comment at the token allocation
0183      * function that if the opal call returns OPAL_ASYNC_COMPLETION to
0184      * the caller then the caller *must* call this or the not
0185      * interruptible version before doing anything else with the
0186      * token.
0187      */
0188     if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
0189         spin_lock_irqsave(&opal_async_comp_lock, flags);
0190         if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
0191             opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
0192         spin_unlock_irqrestore(&opal_async_comp_lock, flags);
0193     }
0194 
0195     /*
0196      * Wakeup the poller before we wait for events to speed things
0197      * up on platforms or simulators where the interrupts aren't
0198      * functional.
0199      */
0200     opal_wake_poller();
0201     ret = wait_event_interruptible(opal_async_wait,
0202             opal_async_tokens[token].state ==
0203             ASYNC_TOKEN_COMPLETED);
0204     if (!ret)
0205         memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
0206 
0207     return ret;
0208 }
0209 EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
0210 
0211 /* Called from interrupt context */
0212 static int opal_async_comp_event(struct notifier_block *nb,
0213         unsigned long msg_type, void *msg)
0214 {
0215     struct opal_msg *comp_msg = msg;
0216     enum opal_async_token_state state;
0217     unsigned long flags;
0218     uint64_t token;
0219 
0220     if (msg_type != OPAL_MSG_ASYNC_COMP)
0221         return 0;
0222 
0223     token = be64_to_cpu(comp_msg->params[0]);
0224     spin_lock_irqsave(&opal_async_comp_lock, flags);
0225     state = opal_async_tokens[token].state;
0226     opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
0227     spin_unlock_irqrestore(&opal_async_comp_lock, flags);
0228 
0229     if (state == ASYNC_TOKEN_ABANDONED) {
0230         /* Free the token, no one else will */
0231         opal_async_release_token(token);
0232         return 0;
0233     }
0234     memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
0235     wake_up(&opal_async_wait);
0236 
0237     return 0;
0238 }
0239 
0240 static struct notifier_block opal_async_comp_nb = {
0241         .notifier_call  = opal_async_comp_event,
0242         .next       = NULL,
0243         .priority   = 0,
0244 };
0245 
0246 int __init opal_async_comp_init(void)
0247 {
0248     struct device_node *opal_node;
0249     const __be32 *async;
0250     int err;
0251 
0252     opal_node = of_find_node_by_path("/ibm,opal");
0253     if (!opal_node) {
0254         pr_err("%s: Opal node not found\n", __func__);
0255         err = -ENOENT;
0256         goto out;
0257     }
0258 
0259     async = of_get_property(opal_node, "opal-msg-async-num", NULL);
0260     if (!async) {
0261         pr_err("%s: %pOF has no opal-msg-async-num\n",
0262                 __func__, opal_node);
0263         err = -ENOENT;
0264         goto out_opal_node;
0265     }
0266 
0267     opal_max_async_tokens = be32_to_cpup(async);
0268     opal_async_tokens = kcalloc(opal_max_async_tokens,
0269             sizeof(*opal_async_tokens), GFP_KERNEL);
0270     if (!opal_async_tokens) {
0271         err = -ENOMEM;
0272         goto out_opal_node;
0273     }
0274 
0275     err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
0276             &opal_async_comp_nb);
0277     if (err) {
0278         pr_err("%s: Can't register OPAL event notifier (%d)\n",
0279                 __func__, err);
0280         kfree(opal_async_tokens);
0281         goto out_opal_node;
0282     }
0283 
0284     sema_init(&opal_async_sem, opal_max_async_tokens);
0285 
0286 out_opal_node:
0287     of_node_put(opal_node);
0288 out:
0289     return err;
0290 }