Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * System Control and Management Interface (SCMI) Notification support
0004  *
0005  * Copyright (C) 2020-2021 ARM Ltd.
0006  */
0007 /**
0008  * DOC: Theory of operation
0009  *
0010  * SCMI Protocol specification allows the platform to signal events to
0011  * interested agents via notification messages: this is an implementation
0012  * of the dispatch and delivery of such notifications to the interested users
0013  * inside the Linux kernel.
0014  *
0015  * An SCMI Notification core instance is initialized for each active platform
0016  * instance identified by the means of the usual &struct scmi_handle.
0017  *
0018  * Each SCMI Protocol implementation, during its initialization, registers with
0019  * this core its set of supported events using scmi_register_protocol_events():
0020  * all the needed descriptors are stored in the &struct registered_protocols and
0021  * &struct registered_events arrays.
0022  *
0023  * Kernel users interested in some specific event can register their callbacks
0024  * providing the usual notifier_block descriptor, since this core implements
0025  * events' delivery using the standard Kernel notification chains machinery.
0026  *
0027  * Given the number of possible events defined by SCMI and the extensibility
0028  * of the SCMI Protocol itself, the underlying notification chains are created
0029  * and destroyed dynamically on demand depending on the number of users
0030  * effectively registered for an event, so that no support structures or chains
0031  * are allocated until at least one user has registered a notifier_block for
0032  * such event. Similarly, events' generation itself is enabled at the platform
0033  * level only after at least one user has registered, and it is shutdown after
0034  * the last user for that event has gone.
0035  *
0036  * All users provided callbacks and allocated notification-chains are stored in
0037  * the @registered_events_handlers hashtable. Callbacks' registration requests
0038  * for still to be registered events are instead kept in the dedicated common
0039  * hashtable @pending_events_handlers.
0040  *
0041  * An event is identified univocally by the tuple (proto_id, evt_id, src_id)
0042  * and is served by its own dedicated notification chain; information contained
0043  * in such tuples is used, in a few different ways, to generate the needed
0044  * hash-keys.
0045  *
0046  * Here proto_id and evt_id are simply the protocol_id and message_id numbers
0047  * as described in the SCMI Protocol specification, while src_id represents an
0048  * optional, protocol dependent, source identifier (like domain_id, perf_id
0049  * or sensor_id and so forth).
0050  *
0051  * Upon reception of a notification message from the platform the SCMI RX ISR
0052  * passes the received message payload and some ancillary information (including
0053  * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
0054  * pushes the event-data itself on a protocol-dedicated kfifo queue for further
0055  * deferred processing as specified in @scmi_events_dispatcher().
0056  *
0057  * Each protocol has it own dedicated work_struct and worker which, once kicked
0058  * by the ISR, takes care to empty its own dedicated queue, deliverying the
0059  * queued items into the proper notification-chain: notifications processing can
0060  * proceed concurrently on distinct workers only between events belonging to
0061  * different protocols while delivery of events within the same protocol is
0062  * still strictly sequentially ordered by time of arrival.
0063  *
0064  * Events' information is then extracted from the SCMI Notification messages and
0065  * conveyed, converted into a custom per-event report struct, as the void *data
0066  * param to the user callback provided by the registered notifier_block, so that
0067  * from the user perspective his callback will look invoked like:
0068  *
0069  * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
0070  *
0071  */
0072 
0073 #define dev_fmt(fmt) "SCMI Notifications - " fmt
0074 #define pr_fmt(fmt) "SCMI Notifications - " fmt
0075 
0076 #include <linux/bitfield.h>
0077 #include <linux/bug.h>
0078 #include <linux/compiler.h>
0079 #include <linux/device.h>
0080 #include <linux/err.h>
0081 #include <linux/hashtable.h>
0082 #include <linux/kernel.h>
0083 #include <linux/ktime.h>
0084 #include <linux/kfifo.h>
0085 #include <linux/list.h>
0086 #include <linux/mutex.h>
0087 #include <linux/notifier.h>
0088 #include <linux/refcount.h>
0089 #include <linux/scmi_protocol.h>
0090 #include <linux/slab.h>
0091 #include <linux/types.h>
0092 #include <linux/workqueue.h>
0093 
0094 #include "common.h"
0095 #include "notify.h"
0096 
0097 #define SCMI_MAX_PROTO      256
0098 
0099 #define PROTO_ID_MASK       GENMASK(31, 24)
0100 #define EVT_ID_MASK     GENMASK(23, 16)
0101 #define SRC_ID_MASK     GENMASK(15, 0)
0102 
0103 /*
0104  * Builds an unsigned 32bit key from the given input tuple to be used
0105  * as a key in hashtables.
0106  */
0107 #define MAKE_HASH_KEY(p, e, s)          \
0108     (FIELD_PREP(PROTO_ID_MASK, (p)) |   \
0109        FIELD_PREP(EVT_ID_MASK, (e)) |   \
0110        FIELD_PREP(SRC_ID_MASK, (s)))
0111 
0112 #define MAKE_ALL_SRCS_KEY(p, e)     MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
0113 
0114 /*
0115  * Assumes that the stored obj includes its own hash-key in a field named 'key':
0116  * with this simplification this macro can be equally used for all the objects'
0117  * types hashed by this implementation.
0118  *
0119  * @__ht: The hashtable name
0120  * @__obj: A pointer to the object type to be retrieved from the hashtable;
0121  *     it will be used as a cursor while scanning the hastable and it will
0122  *     be possibly left as NULL when @__k is not found
0123  * @__k: The key to search for
0124  */
0125 #define KEY_FIND(__ht, __obj, __k)              \
0126 ({                              \
0127     typeof(__k) k_ = __k;                   \
0128     typeof(__obj) obj_;                 \
0129                                 \
0130     hash_for_each_possible((__ht), obj_, hash, k_)      \
0131         if (obj_->key == k_)                \
0132             break;                  \
0133     __obj = obj_;                       \
0134 })
0135 
0136 #define KEY_XTRACT_PROTO_ID(key)    FIELD_GET(PROTO_ID_MASK, (key))
0137 #define KEY_XTRACT_EVT_ID(key)      FIELD_GET(EVT_ID_MASK, (key))
0138 #define KEY_XTRACT_SRC_ID(key)      FIELD_GET(SRC_ID_MASK, (key))
0139 
0140 /*
0141  * A set of macros used to access safely @registered_protocols and
0142  * @registered_events arrays; these are fixed in size and each entry is possibly
0143  * populated at protocols' registration time and then only read but NEVER
0144  * modified or removed.
0145  */
0146 #define SCMI_GET_PROTO(__ni, __pid)                 \
0147 ({                                  \
0148     typeof(__ni) ni_ = __ni;                    \
0149     struct scmi_registered_events_desc *__pd = NULL;        \
0150                                     \
0151     if (ni_)                            \
0152         __pd = READ_ONCE(ni_->registered_protocols[(__pid)]);   \
0153     __pd;                               \
0154 })
0155 
0156 #define SCMI_GET_REVT_FROM_PD(__pd, __eid)              \
0157 ({                                  \
0158     typeof(__pd) pd_ = __pd;                    \
0159     typeof(__eid) eid_ = __eid;                 \
0160     struct scmi_registered_event *__revt = NULL;            \
0161                                     \
0162     if (pd_ && eid_ < pd_->num_events)              \
0163         __revt = READ_ONCE(pd_->registered_events[eid_]);   \
0164     __revt;                             \
0165 })
0166 
0167 #define SCMI_GET_REVT(__ni, __pid, __eid)               \
0168 ({                                  \
0169     struct scmi_registered_event *__revt;               \
0170     struct scmi_registered_events_desc *__pd;           \
0171                                     \
0172     __pd = SCMI_GET_PROTO((__ni), (__pid));             \
0173     __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid));          \
0174     __revt;                             \
0175 })
0176 
0177 /* A couple of utility macros to limit cruft when calling protocols' helpers */
0178 #define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state)       \
0179 ({                              \
0180     typeof(revt) r = revt;                  \
0181     r->proto->ops->set_notify_enabled(r->proto->ph,     \
0182                     (eid), (sid), (state)); \
0183 })
0184 
0185 #define REVT_NOTIFY_ENABLE(revt, eid, sid)          \
0186     REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
0187 
0188 #define REVT_NOTIFY_DISABLE(revt, eid, sid)         \
0189     REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
0190 
0191 #define REVT_FILL_REPORT(revt, ...)             \
0192 ({                              \
0193     typeof(revt) r = revt;                  \
0194     r->proto->ops->fill_custom_report(r->proto->ph,     \
0195                       __VA_ARGS__);     \
0196 })
0197 
0198 #define SCMI_PENDING_HASH_SZ        4
0199 #define SCMI_REGISTERED_HASH_SZ     6
0200 
0201 struct scmi_registered_events_desc;
0202 
0203 /**
0204  * struct scmi_notify_instance  - Represents an instance of the notification
0205  * core
0206  * @gid: GroupID used for devres
0207  * @handle: A reference to the platform instance
0208  * @init_work: A work item to perform final initializations of pending handlers
0209  * @notify_wq: A reference to the allocated Kernel cmwq
0210  * @pending_mtx: A mutex to protect @pending_events_handlers
0211  * @registered_protocols: A statically allocated array containing pointers to
0212  *            all the registered protocol-level specific information
0213  *            related to events' handling
0214  * @pending_events_handlers: An hashtable containing all pending events'
0215  *               handlers descriptors
0216  *
0217  * Each platform instance, represented by a handle, has its own instance of
0218  * the notification subsystem represented by this structure.
0219  */
0220 struct scmi_notify_instance {
0221     void            *gid;
0222     struct scmi_handle  *handle;
0223     struct work_struct  init_work;
0224     struct workqueue_struct *notify_wq;
0225     /* lock to protect pending_events_handlers */
0226     struct mutex        pending_mtx;
0227     struct scmi_registered_events_desc  **registered_protocols;
0228     DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
0229 };
0230 
0231 /**
0232  * struct events_queue  - Describes a queue and its associated worker
0233  * @sz: Size in bytes of the related kfifo
0234  * @kfifo: A dedicated Kernel kfifo descriptor
0235  * @notify_work: A custom work item bound to this queue
0236  * @wq: A reference to the associated workqueue
0237  *
0238  * Each protocol has its own dedicated events_queue descriptor.
0239  */
0240 struct events_queue {
0241     size_t          sz;
0242     struct kfifo        kfifo;
0243     struct work_struct  notify_work;
0244     struct workqueue_struct *wq;
0245 };
0246 
0247 /**
0248  * struct scmi_event_header  - A utility header
0249  * @timestamp: The timestamp, in nanoseconds (boottime), which was associated
0250  *         to this event as soon as it entered the SCMI RX ISR
0251  * @payld_sz: Effective size of the embedded message payload which follows
0252  * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
0253  * @payld: A reference to the embedded event payload
0254  *
0255  * This header is prepended to each received event message payload before
0256  * queueing it on the related &struct events_queue.
0257  */
0258 struct scmi_event_header {
0259     ktime_t timestamp;
0260     size_t payld_sz;
0261     unsigned char evt_id;
0262     unsigned char payld[];
0263 };
0264 
0265 struct scmi_registered_event;
0266 
0267 /**
0268  * struct scmi_registered_events_desc  - Protocol Specific information
0269  * @id: Protocol ID
0270  * @ops: Protocol specific and event-related operations
0271  * @equeue: The embedded per-protocol events_queue
0272  * @ni: A reference to the initialized instance descriptor
0273  * @eh: A reference to pre-allocated buffer to be used as a scratch area by the
0274  *  deferred worker when fetching data from the kfifo
0275  * @eh_sz: Size of the pre-allocated buffer @eh
0276  * @in_flight: A reference to an in flight &struct scmi_registered_event
0277  * @num_events: Number of events in @registered_events
0278  * @registered_events: A dynamically allocated array holding all the registered
0279  *             events' descriptors, whose fixed-size is determined at
0280  *             compile time.
0281  * @registered_mtx: A mutex to protect @registered_events_handlers
0282  * @ph: SCMI protocol handle reference
0283  * @registered_events_handlers: An hashtable containing all events' handlers
0284  *              descriptors registered for this protocol
0285  *
0286  * All protocols that register at least one event have their protocol-specific
0287  * information stored here, together with the embedded allocated events_queue.
0288  * These descriptors are stored in the @registered_protocols array at protocol
0289  * registration time.
0290  *
0291  * Once these descriptors are successfully registered, they are NEVER again
0292  * removed or modified since protocols do not unregister ever, so that, once
0293  * we safely grab a NON-NULL reference from the array we can keep it and use it.
0294  */
0295 struct scmi_registered_events_desc {
0296     u8              id;
0297     const struct scmi_event_ops *ops;
0298     struct events_queue     equeue;
0299     struct scmi_notify_instance *ni;
0300     struct scmi_event_header    *eh;
0301     size_t              eh_sz;
0302     void                *in_flight;
0303     int             num_events;
0304     struct scmi_registered_event    **registered_events;
0305     /* mutex to protect registered_events_handlers */
0306     struct mutex            registered_mtx;
0307     const struct scmi_protocol_handle   *ph;
0308     DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
0309 };
0310 
0311 /**
0312  * struct scmi_registered_event  - Event Specific Information
0313  * @proto: A reference to the associated protocol descriptor
0314  * @evt: A reference to the associated event descriptor (as provided at
0315  *       registration time)
0316  * @report: A pre-allocated buffer used by the deferred worker to fill a
0317  *      customized event report
0318  * @num_sources: The number of possible sources for this event as stated at
0319  *       events' registration time
0320  * @sources: A reference to a dynamically allocated array used to refcount the
0321  *       events' enable requests for all the existing sources
0322  * @sources_mtx: A mutex to serialize the access to @sources
0323  *
0324  * All registered events are represented by one of these structures that are
0325  * stored in the @registered_events array at protocol registration time.
0326  *
0327  * Once these descriptors are successfully registered, they are NEVER again
0328  * removed or modified since protocols do not unregister ever, so that once we
0329  * safely grab a NON-NULL reference from the table we can keep it and use it.
0330  */
0331 struct scmi_registered_event {
0332     struct scmi_registered_events_desc *proto;
0333     const struct scmi_event *evt;
0334     void        *report;
0335     u32     num_sources;
0336     refcount_t  *sources;
0337     /* locking to serialize the access to sources */
0338     struct mutex    sources_mtx;
0339 };
0340 
0341 /**
0342  * struct scmi_event_handler  - Event handler information
0343  * @key: The used hashkey
0344  * @users: A reference count for number of active users for this handler
0345  * @r_evt: A reference to the associated registered event; when this is NULL
0346  *     this handler is pending, which means that identifies a set of
0347  *     callbacks intended to be attached to an event which is still not
0348  *     known nor registered by any protocol at that point in time
0349  * @chain: The notification chain dedicated to this specific event tuple
0350  * @hash: The hlist_node used for collision handling
0351  * @enabled: A boolean which records if event's generation has been already
0352  *       enabled for this handler as a whole
0353  *
0354  * This structure collects all the information needed to process a received
0355  * event identified by the tuple (proto_id, evt_id, src_id).
0356  * These descriptors are stored in a per-protocol @registered_events_handlers
0357  * table using as a key a value derived from that tuple.
0358  */
0359 struct scmi_event_handler {
0360     u32             key;
0361     refcount_t          users;
0362     struct scmi_registered_event    *r_evt;
0363     struct blocking_notifier_head   chain;
0364     struct hlist_node       hash;
0365     bool                enabled;
0366 };
0367 
0368 #define IS_HNDL_PENDING(hndl)   (!(hndl)->r_evt)
0369 
0370 static struct scmi_event_handler *
0371 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
0372 static void scmi_put_active_handler(struct scmi_notify_instance *ni,
0373                     struct scmi_event_handler *hndl);
0374 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
0375                       struct scmi_event_handler *hndl);
0376 
0377 /**
0378  * scmi_lookup_and_call_event_chain()  - Lookup the proper chain and call it
0379  * @ni: A reference to the notification instance to use
0380  * @evt_key: The key to use to lookup the related notification chain
0381  * @report: The customized event-specific report to pass down to the callbacks
0382  *      as their *data parameter.
0383  */
0384 static inline void
0385 scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
0386                  u32 evt_key, void *report)
0387 {
0388     int ret;
0389     struct scmi_event_handler *hndl;
0390 
0391     /*
0392      * Here ensure the event handler cannot vanish while using it.
0393      * It is legitimate, though, for an handler not to be found at all here,
0394      * e.g. when it has been unregistered by the user after some events had
0395      * already been queued.
0396      */
0397     hndl = scmi_get_active_handler(ni, evt_key);
0398     if (!hndl)
0399         return;
0400 
0401     ret = blocking_notifier_call_chain(&hndl->chain,
0402                        KEY_XTRACT_EVT_ID(evt_key),
0403                        report);
0404     /* Notifiers are NOT supposed to cut the chain ... */
0405     WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
0406 
0407     scmi_put_active_handler(ni, hndl);
0408 }
0409 
0410 /**
0411  * scmi_process_event_header()  - Dequeue and process an event header
0412  * @eq: The queue to use
0413  * @pd: The protocol descriptor to use
0414  *
0415  * Read an event header from the protocol queue into the dedicated scratch
0416  * buffer and looks for a matching registered event; in case an anomalously
0417  * sized read is detected just flush the queue.
0418  *
0419  * Return:
0420  * * a reference to the matching registered event when found
0421  * * ERR_PTR(-EINVAL) when NO registered event could be found
0422  * * NULL when the queue is empty
0423  */
0424 static inline struct scmi_registered_event *
0425 scmi_process_event_header(struct events_queue *eq,
0426               struct scmi_registered_events_desc *pd)
0427 {
0428     unsigned int outs;
0429     struct scmi_registered_event *r_evt;
0430 
0431     outs = kfifo_out(&eq->kfifo, pd->eh,
0432              sizeof(struct scmi_event_header));
0433     if (!outs)
0434         return NULL;
0435     if (outs != sizeof(struct scmi_event_header)) {
0436         dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
0437         kfifo_reset_out(&eq->kfifo);
0438         return NULL;
0439     }
0440 
0441     r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
0442     if (!r_evt)
0443         r_evt = ERR_PTR(-EINVAL);
0444 
0445     return r_evt;
0446 }
0447 
0448 /**
0449  * scmi_process_event_payload()  - Dequeue and process an event payload
0450  * @eq: The queue to use
0451  * @pd: The protocol descriptor to use
0452  * @r_evt: The registered event descriptor to use
0453  *
0454  * Read an event payload from the protocol queue into the dedicated scratch
0455  * buffer, fills a custom report and then look for matching event handlers and
0456  * call them; skip any unknown event (as marked by scmi_process_event_header())
0457  * and in case an anomalously sized read is detected just flush the queue.
0458  *
0459  * Return: False when the queue is empty
0460  */
0461 static inline bool
0462 scmi_process_event_payload(struct events_queue *eq,
0463                struct scmi_registered_events_desc *pd,
0464                struct scmi_registered_event *r_evt)
0465 {
0466     u32 src_id, key;
0467     unsigned int outs;
0468     void *report = NULL;
0469 
0470     outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
0471     if (!outs)
0472         return false;
0473 
0474     /* Any in-flight event has now been officially processed */
0475     pd->in_flight = NULL;
0476 
0477     if (outs != pd->eh->payld_sz) {
0478         dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
0479         kfifo_reset_out(&eq->kfifo);
0480         return false;
0481     }
0482 
0483     if (IS_ERR(r_evt)) {
0484         dev_warn(pd->ni->handle->dev,
0485              "SKIP UNKNOWN EVT - proto:%X  evt:%d\n",
0486              pd->id, pd->eh->evt_id);
0487         return true;
0488     }
0489 
0490     report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
0491                   pd->eh->payld, pd->eh->payld_sz,
0492                   r_evt->report, &src_id);
0493     if (!report) {
0494         dev_err(pd->ni->handle->dev,
0495             "report not available - proto:%X  evt:%d\n",
0496             pd->id, pd->eh->evt_id);
0497         return true;
0498     }
0499 
0500     /* At first search for a generic ALL src_ids handler... */
0501     key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
0502     scmi_lookup_and_call_event_chain(pd->ni, key, report);
0503 
0504     /* ...then search for any specific src_id */
0505     key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
0506     scmi_lookup_and_call_event_chain(pd->ni, key, report);
0507 
0508     return true;
0509 }
0510 
0511 /**
0512  * scmi_events_dispatcher()  - Common worker logic for all work items.
0513  * @work: The work item to use, which is associated to a dedicated events_queue
0514  *
0515  * Logic:
0516  *  1. dequeue one pending RX notification (queued in SCMI RX ISR context)
0517  *  2. generate a custom event report from the received event message
0518  *  3. lookup for any registered ALL_SRC_IDs handler:
0519  *    - > call the related notification chain passing in the report
0520  *  4. lookup for any registered specific SRC_ID handler:
0521  *    - > call the related notification chain passing in the report
0522  *
0523  * Note that:
0524  * * a dedicated per-protocol kfifo queue is used: in this way an anomalous
0525  *   flood of events cannot saturate other protocols' queues.
0526  * * each per-protocol queue is associated to a distinct work_item, which
0527  *   means, in turn, that:
0528  *   + all protocols can process their dedicated queues concurrently
0529  *     (since notify_wq:max_active != 1)
0530  *   + anyway at most one worker instance is allowed to run on the same queue
0531  *     concurrently: this ensures that we can have only one concurrent
0532  *     reader/writer on the associated kfifo, so that we can use it lock-less
0533  *
0534  * Context: Process context.
0535  */
0536 static void scmi_events_dispatcher(struct work_struct *work)
0537 {
0538     struct events_queue *eq;
0539     struct scmi_registered_events_desc *pd;
0540     struct scmi_registered_event *r_evt;
0541 
0542     eq = container_of(work, struct events_queue, notify_work);
0543     pd = container_of(eq, struct scmi_registered_events_desc, equeue);
0544     /*
0545      * In order to keep the queue lock-less and the number of memcopies
0546      * to the bare minimum needed, the dispatcher accounts for the
0547      * possibility of per-protocol in-flight events: i.e. an event whose
0548      * reception could end up being split across two subsequent runs of this
0549      * worker, first the header, then the payload.
0550      */
0551     do {
0552         if (!pd->in_flight) {
0553             r_evt = scmi_process_event_header(eq, pd);
0554             if (!r_evt)
0555                 break;
0556             pd->in_flight = r_evt;
0557         } else {
0558             r_evt = pd->in_flight;
0559         }
0560     } while (scmi_process_event_payload(eq, pd, r_evt));
0561 }
0562 
0563 /**
0564  * scmi_notify()  - Queues a notification for further deferred processing
0565  * @handle: The handle identifying the platform instance from which the
0566  *      dispatched event is generated
0567  * @proto_id: Protocol ID
0568  * @evt_id: Event ID (msgID)
0569  * @buf: Event Message Payload (without the header)
0570  * @len: Event Message Payload size
0571  * @ts: RX Timestamp in nanoseconds (boottime)
0572  *
0573  * Context: Called in interrupt context to queue a received event for
0574  * deferred processing.
0575  *
0576  * Return: 0 on Success
0577  */
0578 int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
0579         const void *buf, size_t len, ktime_t ts)
0580 {
0581     struct scmi_registered_event *r_evt;
0582     struct scmi_event_header eh;
0583     struct scmi_notify_instance *ni;
0584 
0585     ni = scmi_notification_instance_data_get(handle);
0586     if (!ni)
0587         return 0;
0588 
0589     r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
0590     if (!r_evt)
0591         return -EINVAL;
0592 
0593     if (len > r_evt->evt->max_payld_sz) {
0594         dev_err(handle->dev, "discard badly sized message\n");
0595         return -EINVAL;
0596     }
0597     if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
0598         dev_warn(handle->dev,
0599              "queue full, dropping proto_id:%d  evt_id:%d  ts:%lld\n",
0600              proto_id, evt_id, ktime_to_ns(ts));
0601         return -ENOMEM;
0602     }
0603 
0604     eh.timestamp = ts;
0605     eh.evt_id = evt_id;
0606     eh.payld_sz = len;
0607     /*
0608      * Header and payload are enqueued with two distinct kfifo_in() (so non
0609      * atomic), but this situation is handled properly on the consumer side
0610      * with in-flight events tracking.
0611      */
0612     kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
0613     kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
0614     /*
0615      * Don't care about return value here since we just want to ensure that
0616      * a work is queued all the times whenever some items have been pushed
0617      * on the kfifo:
0618      * - if work was already queued it will simply fail to queue a new one
0619      *   since it is not needed
0620      * - if work was not queued already it will be now, even in case work
0621      *   was in fact already running: this behavior avoids any possible race
0622      *   when this function pushes new items onto the kfifos after the
0623      *   related executing worker had already determined the kfifo to be
0624      *   empty and it was terminating.
0625      */
0626     queue_work(r_evt->proto->equeue.wq,
0627            &r_evt->proto->equeue.notify_work);
0628 
0629     return 0;
0630 }
0631 
0632 /**
0633  * scmi_kfifo_free()  - Devres action helper to free the kfifo
0634  * @kfifo: The kfifo to free
0635  */
0636 static void scmi_kfifo_free(void *kfifo)
0637 {
0638     kfifo_free((struct kfifo *)kfifo);
0639 }
0640 
0641 /**
0642  * scmi_initialize_events_queue()  - Allocate/Initialize a kfifo buffer
0643  * @ni: A reference to the notification instance to use
0644  * @equeue: The events_queue to initialize
0645  * @sz: Size of the kfifo buffer to allocate
0646  *
0647  * Allocate a buffer for the kfifo and initialize it.
0648  *
0649  * Return: 0 on Success
0650  */
0651 static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
0652                     struct events_queue *equeue, size_t sz)
0653 {
0654     int ret;
0655 
0656     if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
0657         return -ENOMEM;
0658     /* Size could have been roundup to power-of-two */
0659     equeue->sz = kfifo_size(&equeue->kfifo);
0660 
0661     ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
0662                        &equeue->kfifo);
0663     if (ret)
0664         return ret;
0665 
0666     INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
0667     equeue->wq = ni->notify_wq;
0668 
0669     return ret;
0670 }
0671 
0672 /**
0673  * scmi_allocate_registered_events_desc()  - Allocate a registered events'
0674  * descriptor
0675  * @ni: A reference to the &struct scmi_notify_instance notification instance
0676  *  to use
0677  * @proto_id: Protocol ID
0678  * @queue_sz: Size of the associated queue to allocate
0679  * @eh_sz: Size of the event header scratch area to pre-allocate
0680  * @num_events: Number of events to support (size of @registered_events)
0681  * @ops: Pointer to a struct holding references to protocol specific helpers
0682  *   needed during events handling
0683  *
0684  * It is supposed to be called only once for each protocol at protocol
0685  * initialization time, so it warns if the requested protocol is found already
0686  * registered.
0687  *
0688  * Return: The allocated and registered descriptor on Success
0689  */
0690 static struct scmi_registered_events_desc *
0691 scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
0692                      u8 proto_id, size_t queue_sz, size_t eh_sz,
0693                      int num_events,
0694                      const struct scmi_event_ops *ops)
0695 {
0696     int ret;
0697     struct scmi_registered_events_desc *pd;
0698 
0699     /* Ensure protocols are up to date */
0700     smp_rmb();
0701     if (WARN_ON(ni->registered_protocols[proto_id]))
0702         return ERR_PTR(-EINVAL);
0703 
0704     pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
0705     if (!pd)
0706         return ERR_PTR(-ENOMEM);
0707     pd->id = proto_id;
0708     pd->ops = ops;
0709     pd->ni = ni;
0710 
0711     ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
0712     if (ret)
0713         return ERR_PTR(ret);
0714 
0715     pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
0716     if (!pd->eh)
0717         return ERR_PTR(-ENOMEM);
0718     pd->eh_sz = eh_sz;
0719 
0720     pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
0721                          sizeof(char *), GFP_KERNEL);
0722     if (!pd->registered_events)
0723         return ERR_PTR(-ENOMEM);
0724     pd->num_events = num_events;
0725 
0726     /* Initialize per protocol handlers table */
0727     mutex_init(&pd->registered_mtx);
0728     hash_init(pd->registered_events_handlers);
0729 
0730     return pd;
0731 }
0732 
0733 /**
0734  * scmi_register_protocol_events()  - Register Protocol Events with the core
0735  * @handle: The handle identifying the platform instance against which the
0736  *      protocol's events are registered
0737  * @proto_id: Protocol ID
0738  * @ph: SCMI protocol handle.
0739  * @ee: A structure describing the events supported by this protocol.
0740  *
0741  * Used by SCMI Protocols initialization code to register with the notification
0742  * core the list of supported events and their descriptors: takes care to
0743  * pre-allocate and store all needed descriptors, scratch buffers and event
0744  * queues.
0745  *
0746  * Return: 0 on Success
0747  */
0748 int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
0749                   const struct scmi_protocol_handle *ph,
0750                   const struct scmi_protocol_events *ee)
0751 {
0752     int i;
0753     unsigned int num_sources;
0754     size_t payld_sz = 0;
0755     struct scmi_registered_events_desc *pd;
0756     struct scmi_notify_instance *ni;
0757     const struct scmi_event *evt;
0758 
0759     if (!ee || !ee->ops || !ee->evts || !ph ||
0760         (!ee->num_sources && !ee->ops->get_num_sources))
0761         return -EINVAL;
0762 
0763     ni = scmi_notification_instance_data_get(handle);
0764     if (!ni)
0765         return -ENOMEM;
0766 
0767     /* num_sources cannot be <= 0 */
0768     if (ee->num_sources) {
0769         num_sources = ee->num_sources;
0770     } else {
0771         int nsrc = ee->ops->get_num_sources(ph);
0772 
0773         if (nsrc <= 0)
0774             return -EINVAL;
0775         num_sources = nsrc;
0776     }
0777 
0778     evt = ee->evts;
0779     for (i = 0; i < ee->num_events; i++)
0780         payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
0781     payld_sz += sizeof(struct scmi_event_header);
0782 
0783     pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
0784                           payld_sz, ee->num_events,
0785                           ee->ops);
0786     if (IS_ERR(pd))
0787         return PTR_ERR(pd);
0788 
0789     pd->ph = ph;
0790     for (i = 0; i < ee->num_events; i++, evt++) {
0791         struct scmi_registered_event *r_evt;
0792 
0793         r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
0794                      GFP_KERNEL);
0795         if (!r_evt)
0796             return -ENOMEM;
0797         r_evt->proto = pd;
0798         r_evt->evt = evt;
0799 
0800         r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
0801                           sizeof(refcount_t), GFP_KERNEL);
0802         if (!r_evt->sources)
0803             return -ENOMEM;
0804         r_evt->num_sources = num_sources;
0805         mutex_init(&r_evt->sources_mtx);
0806 
0807         r_evt->report = devm_kzalloc(ni->handle->dev,
0808                          evt->max_report_sz, GFP_KERNEL);
0809         if (!r_evt->report)
0810             return -ENOMEM;
0811 
0812         pd->registered_events[i] = r_evt;
0813         /* Ensure events are updated */
0814         smp_wmb();
0815         dev_dbg(handle->dev, "registered event - %lX\n",
0816             MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
0817     }
0818 
0819     /* Register protocol and events...it will never be removed */
0820     ni->registered_protocols[proto_id] = pd;
0821     /* Ensure protocols are updated */
0822     smp_wmb();
0823 
0824     /*
0825      * Finalize any pending events' handler which could have been waiting
0826      * for this protocol's events registration.
0827      */
0828     schedule_work(&ni->init_work);
0829 
0830     return 0;
0831 }
0832 
0833 /**
0834  * scmi_deregister_protocol_events  - Deregister protocol events with the core
0835  * @handle: The handle identifying the platform instance against which the
0836  *      protocol's events are registered
0837  * @proto_id: Protocol ID
0838  */
0839 void scmi_deregister_protocol_events(const struct scmi_handle *handle,
0840                      u8 proto_id)
0841 {
0842     struct scmi_notify_instance *ni;
0843     struct scmi_registered_events_desc *pd;
0844 
0845     ni = scmi_notification_instance_data_get(handle);
0846     if (!ni)
0847         return;
0848 
0849     pd = ni->registered_protocols[proto_id];
0850     if (!pd)
0851         return;
0852 
0853     ni->registered_protocols[proto_id] = NULL;
0854     /* Ensure protocols are updated */
0855     smp_wmb();
0856 
0857     cancel_work_sync(&pd->equeue.notify_work);
0858 }
0859 
0860 /**
0861  * scmi_allocate_event_handler()  - Allocate Event handler
0862  * @ni: A reference to the notification instance to use
0863  * @evt_key: 32bit key uniquely bind to the event identified by the tuple
0864  *       (proto_id, evt_id, src_id)
0865  *
0866  * Allocate an event handler and related notification chain associated with
0867  * the provided event handler key.
0868  * Note that, at this point, a related registered_event is still to be
0869  * associated to this handler descriptor (hndl->r_evt == NULL), so the handler
0870  * is initialized as pending.
0871  *
0872  * Context: Assumes to be called with @pending_mtx already acquired.
0873  * Return: the freshly allocated structure on Success
0874  */
0875 static struct scmi_event_handler *
0876 scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
0877 {
0878     struct scmi_event_handler *hndl;
0879 
0880     hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
0881     if (!hndl)
0882         return NULL;
0883     hndl->key = evt_key;
0884     BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
0885     refcount_set(&hndl->users, 1);
0886     /* New handlers are created pending */
0887     hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
0888 
0889     return hndl;
0890 }
0891 
0892 /**
0893  * scmi_free_event_handler()  - Free the provided Event handler
0894  * @hndl: The event handler structure to free
0895  *
0896  * Context: Assumes to be called with proper locking acquired depending
0897  *      on the situation.
0898  */
0899 static void scmi_free_event_handler(struct scmi_event_handler *hndl)
0900 {
0901     hash_del(&hndl->hash);
0902     kfree(hndl);
0903 }
0904 
0905 /**
0906  * scmi_bind_event_handler()  - Helper to attempt binding an handler to an event
0907  * @ni: A reference to the notification instance to use
0908  * @hndl: The event handler to bind
0909  *
0910  * If an associated registered event is found, move the handler from the pending
0911  * into the registered table.
0912  *
0913  * Context: Assumes to be called with @pending_mtx already acquired.
0914  *
0915  * Return: 0 on Success
0916  */
0917 static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
0918                       struct scmi_event_handler *hndl)
0919 {
0920     struct scmi_registered_event *r_evt;
0921 
0922     r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
0923                   KEY_XTRACT_EVT_ID(hndl->key));
0924     if (!r_evt)
0925         return -EINVAL;
0926 
0927     /*
0928      * Remove from pending and insert into registered while getting hold
0929      * of protocol instance.
0930      */
0931     hash_del(&hndl->hash);
0932     /*
0933      * Acquire protocols only for NON pending handlers, so as NOT to trigger
0934      * protocol initialization when a notifier is registered against a still
0935      * not registered protocol, since it would make little sense to force init
0936      * protocols for which still no SCMI driver user exists: they wouldn't
0937      * emit any event anyway till some SCMI driver starts using it.
0938      */
0939     scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
0940     hndl->r_evt = r_evt;
0941 
0942     mutex_lock(&r_evt->proto->registered_mtx);
0943     hash_add(r_evt->proto->registered_events_handlers,
0944          &hndl->hash, hndl->key);
0945     mutex_unlock(&r_evt->proto->registered_mtx);
0946 
0947     return 0;
0948 }
0949 
0950 /**
0951  * scmi_valid_pending_handler()  - Helper to check pending status of handlers
0952  * @ni: A reference to the notification instance to use
0953  * @hndl: The event handler to check
0954  *
0955  * An handler is considered pending when its r_evt == NULL, because the related
0956  * event was still unknown at handler's registration time; anyway, since all
0957  * protocols register their supported events once for all at protocols'
0958  * initialization time, a pending handler cannot be considered valid anymore if
0959  * the underlying event (which it is waiting for), belongs to an already
0960  * initialized and registered protocol.
0961  *
0962  * Return: 0 on Success
0963  */
0964 static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
0965                          struct scmi_event_handler *hndl)
0966 {
0967     struct scmi_registered_events_desc *pd;
0968 
0969     if (!IS_HNDL_PENDING(hndl))
0970         return -EINVAL;
0971 
0972     pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
0973     if (pd)
0974         return -EINVAL;
0975 
0976     return 0;
0977 }
0978 
0979 /**
0980  * scmi_register_event_handler()  - Register whenever possible an Event handler
0981  * @ni: A reference to the notification instance to use
0982  * @hndl: The event handler to register
0983  *
0984  * At first try to bind an event handler to its associated event, then check if
0985  * it was at least a valid pending handler: if it was not bound nor valid return
0986  * false.
0987  *
0988  * Valid pending incomplete bindings will be periodically retried by a dedicated
0989  * worker which is kicked each time a new protocol completes its own
0990  * registration phase.
0991  *
0992  * Context: Assumes to be called with @pending_mtx acquired.
0993  *
0994  * Return: 0 on Success
0995  */
0996 static int scmi_register_event_handler(struct scmi_notify_instance *ni,
0997                        struct scmi_event_handler *hndl)
0998 {
0999     int ret;
1000 
1001     ret = scmi_bind_event_handler(ni, hndl);
1002     if (!ret) {
1003         dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
1004             hndl->key);
1005     } else {
1006         ret = scmi_valid_pending_handler(ni, hndl);
1007         if (!ret)
1008             dev_dbg(ni->handle->dev,
1009                 "registered PENDING handler - key:%X\n",
1010                 hndl->key);
1011     }
1012 
1013     return ret;
1014 }
1015 
1016 /**
1017  * __scmi_event_handler_get_ops()  - Utility to get or create an event handler
1018  * @ni: A reference to the notification instance to use
1019  * @evt_key: The event key to use
1020  * @create: A boolean flag to specify if a handler must be created when
1021  *      not already existent
1022  *
1023  * Search for the desired handler matching the key in both the per-protocol
1024  * registered table and the common pending table:
1025  * * if found adjust users refcount
1026  * * if not found and @create is true, create and register the new handler:
1027  *   handler could end up being registered as pending if no matching event
1028  *   could be found.
1029  *
1030  * An handler is guaranteed to reside in one and only one of the tables at
1031  * any one time; to ensure this the whole search and create is performed
1032  * holding the @pending_mtx lock, with @registered_mtx additionally acquired
1033  * if needed.
1034  *
1035  * Note that when a nested acquisition of these mutexes is needed the locking
1036  * order is always (same as in @init_work):
1037  * 1. pending_mtx
1038  * 2. registered_mtx
1039  *
1040  * Events generation is NOT enabled right after creation within this routine
1041  * since at creation time we usually want to have all setup and ready before
1042  * events really start flowing.
1043  *
1044  * Return: A properly refcounted handler on Success, NULL on Failure
1045  */
1046 static inline struct scmi_event_handler *
1047 __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
1048                  u32 evt_key, bool create)
1049 {
1050     struct scmi_registered_event *r_evt;
1051     struct scmi_event_handler *hndl = NULL;
1052 
1053     r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1054                   KEY_XTRACT_EVT_ID(evt_key));
1055 
1056     mutex_lock(&ni->pending_mtx);
1057     /* Search registered events at first ... if possible at all */
1058     if (r_evt) {
1059         mutex_lock(&r_evt->proto->registered_mtx);
1060         hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1061                 hndl, evt_key);
1062         if (hndl)
1063             refcount_inc(&hndl->users);
1064         mutex_unlock(&r_evt->proto->registered_mtx);
1065     }
1066 
1067     /* ...then amongst pending. */
1068     if (!hndl) {
1069         hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
1070         if (hndl)
1071             refcount_inc(&hndl->users);
1072     }
1073 
1074     /* Create if still not found and required */
1075     if (!hndl && create) {
1076         hndl = scmi_allocate_event_handler(ni, evt_key);
1077         if (hndl && scmi_register_event_handler(ni, hndl)) {
1078             dev_dbg(ni->handle->dev,
1079                 "purging UNKNOWN handler - key:%X\n",
1080                 hndl->key);
1081             /* this hndl can be only a pending one */
1082             scmi_put_handler_unlocked(ni, hndl);
1083             hndl = NULL;
1084         }
1085     }
1086     mutex_unlock(&ni->pending_mtx);
1087 
1088     return hndl;
1089 }
1090 
1091 static struct scmi_event_handler *
1092 scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
1093 {
1094     return __scmi_event_handler_get_ops(ni, evt_key, false);
1095 }
1096 
1097 static struct scmi_event_handler *
1098 scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
1099 {
1100     return __scmi_event_handler_get_ops(ni, evt_key, true);
1101 }
1102 
1103 /**
1104  * scmi_get_active_handler()  - Helper to get active handlers only
1105  * @ni: A reference to the notification instance to use
1106  * @evt_key: The event key to use
1107  *
1108  * Search for the desired handler matching the key only in the per-protocol
1109  * table of registered handlers: this is called only from the dispatching path
1110  * so want to be as quick as possible and do not care about pending.
1111  *
1112  * Return: A properly refcounted active handler
1113  */
1114 static struct scmi_event_handler *
1115 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
1116 {
1117     struct scmi_registered_event *r_evt;
1118     struct scmi_event_handler *hndl = NULL;
1119 
1120     r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
1121                   KEY_XTRACT_EVT_ID(evt_key));
1122     if (r_evt) {
1123         mutex_lock(&r_evt->proto->registered_mtx);
1124         hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
1125                 hndl, evt_key);
1126         if (hndl)
1127             refcount_inc(&hndl->users);
1128         mutex_unlock(&r_evt->proto->registered_mtx);
1129     }
1130 
1131     return hndl;
1132 }
1133 
1134 /**
1135  * __scmi_enable_evt()  - Enable/disable events generation
1136  * @r_evt: The registered event to act upon
1137  * @src_id: The src_id to act upon
1138  * @enable: The action to perform: true->Enable, false->Disable
1139  *
1140  * Takes care of proper refcounting while performing enable/disable: handles
1141  * the special case of ALL sources requests by itself.
1142  * Returns successfully if at least one of the required src_id has been
1143  * successfully enabled/disabled.
1144  *
1145  * Return: 0 on Success
1146  */
1147 static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
1148                     u32 src_id, bool enable)
1149 {
1150     int retvals = 0;
1151     u32 num_sources;
1152     refcount_t *sid;
1153 
1154     if (src_id == SRC_ID_MASK) {
1155         src_id = 0;
1156         num_sources = r_evt->num_sources;
1157     } else if (src_id < r_evt->num_sources) {
1158         num_sources = 1;
1159     } else {
1160         return -EINVAL;
1161     }
1162 
1163     mutex_lock(&r_evt->sources_mtx);
1164     if (enable) {
1165         for (; num_sources; src_id++, num_sources--) {
1166             int ret = 0;
1167 
1168             sid = &r_evt->sources[src_id];
1169             if (refcount_read(sid) == 0) {
1170                 ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
1171                              src_id);
1172                 if (!ret)
1173                     refcount_set(sid, 1);
1174             } else {
1175                 refcount_inc(sid);
1176             }
1177             retvals += !ret;
1178         }
1179     } else {
1180         for (; num_sources; src_id++, num_sources--) {
1181             sid = &r_evt->sources[src_id];
1182             if (refcount_dec_and_test(sid))
1183                 REVT_NOTIFY_DISABLE(r_evt,
1184                             r_evt->evt->id, src_id);
1185         }
1186         retvals = 1;
1187     }
1188     mutex_unlock(&r_evt->sources_mtx);
1189 
1190     return retvals ? 0 : -EINVAL;
1191 }
1192 
1193 static int scmi_enable_events(struct scmi_event_handler *hndl)
1194 {
1195     int ret = 0;
1196 
1197     if (!hndl->enabled) {
1198         ret = __scmi_enable_evt(hndl->r_evt,
1199                     KEY_XTRACT_SRC_ID(hndl->key), true);
1200         if (!ret)
1201             hndl->enabled = true;
1202     }
1203 
1204     return ret;
1205 }
1206 
1207 static int scmi_disable_events(struct scmi_event_handler *hndl)
1208 {
1209     int ret = 0;
1210 
1211     if (hndl->enabled) {
1212         ret = __scmi_enable_evt(hndl->r_evt,
1213                     KEY_XTRACT_SRC_ID(hndl->key), false);
1214         if (!ret)
1215             hndl->enabled = false;
1216     }
1217 
1218     return ret;
1219 }
1220 
1221 /**
1222  * scmi_put_handler_unlocked()  - Put an event handler
1223  * @ni: A reference to the notification instance to use
1224  * @hndl: The event handler to act upon
1225  *
1226  * After having got exclusive access to the registered handlers hashtable,
1227  * update the refcount and if @hndl is no more in use by anyone:
1228  * * ask for events' generation disabling
1229  * * unregister and free the handler itself
1230  *
1231  * Context: Assumes all the proper locking has been managed by the caller.
1232  *
1233  * Return: True if handler was freed (users dropped to zero)
1234  */
1235 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
1236                       struct scmi_event_handler *hndl)
1237 {
1238     bool freed = false;
1239 
1240     if (refcount_dec_and_test(&hndl->users)) {
1241         if (!IS_HNDL_PENDING(hndl))
1242             scmi_disable_events(hndl);
1243         scmi_free_event_handler(hndl);
1244         freed = true;
1245     }
1246 
1247     return freed;
1248 }
1249 
1250 static void scmi_put_handler(struct scmi_notify_instance *ni,
1251                  struct scmi_event_handler *hndl)
1252 {
1253     bool freed;
1254     u8 protocol_id;
1255     struct scmi_registered_event *r_evt = hndl->r_evt;
1256 
1257     mutex_lock(&ni->pending_mtx);
1258     if (r_evt) {
1259         protocol_id = r_evt->proto->id;
1260         mutex_lock(&r_evt->proto->registered_mtx);
1261     }
1262 
1263     freed = scmi_put_handler_unlocked(ni, hndl);
1264 
1265     if (r_evt) {
1266         mutex_unlock(&r_evt->proto->registered_mtx);
1267         /*
1268          * Only registered handler acquired protocol; must be here
1269          * released only AFTER unlocking registered_mtx, since
1270          * releasing a protocol can trigger its de-initialization
1271          * (ie. including r_evt and registered_mtx)
1272          */
1273         if (freed)
1274             scmi_protocol_release(ni->handle, protocol_id);
1275     }
1276     mutex_unlock(&ni->pending_mtx);
1277 }
1278 
1279 static void scmi_put_active_handler(struct scmi_notify_instance *ni,
1280                     struct scmi_event_handler *hndl)
1281 {
1282     bool freed;
1283     struct scmi_registered_event *r_evt = hndl->r_evt;
1284     u8 protocol_id = r_evt->proto->id;
1285 
1286     mutex_lock(&r_evt->proto->registered_mtx);
1287     freed = scmi_put_handler_unlocked(ni, hndl);
1288     mutex_unlock(&r_evt->proto->registered_mtx);
1289     if (freed)
1290         scmi_protocol_release(ni->handle, protocol_id);
1291 }
1292 
1293 /**
1294  * scmi_event_handler_enable_events()  - Enable events associated to an handler
1295  * @hndl: The Event handler to act upon
1296  *
1297  * Return: 0 on Success
1298  */
1299 static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
1300 {
1301     if (scmi_enable_events(hndl)) {
1302         pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
1303         return -EINVAL;
1304     }
1305 
1306     return 0;
1307 }
1308 
1309 /**
1310  * scmi_notifier_register()  - Register a notifier_block for an event
1311  * @handle: The handle identifying the platform instance against which the
1312  *      callback is registered
1313  * @proto_id: Protocol ID
1314  * @evt_id: Event ID
1315  * @src_id: Source ID, when NULL register for events coming form ALL possible
1316  *      sources
1317  * @nb: A standard notifier block to register for the specified event
1318  *
1319  * Generic helper to register a notifier_block against a protocol event.
1320  *
1321  * A notifier_block @nb will be registered for each distinct event identified
1322  * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
1323  * so that:
1324  *
1325  *  (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
1326  *
1327  * @src_id meaning is protocol specific and identifies the origin of the event
1328  * (like domain_id, sensor_id and so forth).
1329  *
1330  * @src_id can be NULL to signify that the caller is interested in receiving
1331  * notifications from ALL the available sources for that protocol OR simply that
1332  * the protocol does not support distinct sources.
1333  *
1334  * As soon as one user for the specified tuple appears, an handler is created,
1335  * and that specific event's generation is enabled at the platform level, unless
1336  * an associated registered event is found missing, meaning that the needed
1337  * protocol is still to be initialized and the handler has just been registered
1338  * as still pending.
1339  *
1340  * Return: 0 on Success
1341  */
1342 static int scmi_notifier_register(const struct scmi_handle *handle,
1343                   u8 proto_id, u8 evt_id, const u32 *src_id,
1344                   struct notifier_block *nb)
1345 {
1346     int ret = 0;
1347     u32 evt_key;
1348     struct scmi_event_handler *hndl;
1349     struct scmi_notify_instance *ni;
1350 
1351     ni = scmi_notification_instance_data_get(handle);
1352     if (!ni)
1353         return -ENODEV;
1354 
1355     evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1356                 src_id ? *src_id : SRC_ID_MASK);
1357     hndl = scmi_get_or_create_handler(ni, evt_key);
1358     if (!hndl)
1359         return -EINVAL;
1360 
1361     blocking_notifier_chain_register(&hndl->chain, nb);
1362 
1363     /* Enable events for not pending handlers */
1364     if (!IS_HNDL_PENDING(hndl)) {
1365         ret = scmi_event_handler_enable_events(hndl);
1366         if (ret)
1367             scmi_put_handler(ni, hndl);
1368     }
1369 
1370     return ret;
1371 }
1372 
1373 /**
1374  * scmi_notifier_unregister()  - Unregister a notifier_block for an event
1375  * @handle: The handle identifying the platform instance against which the
1376  *      callback is unregistered
1377  * @proto_id: Protocol ID
1378  * @evt_id: Event ID
1379  * @src_id: Source ID
1380  * @nb: The notifier_block to unregister
1381  *
1382  * Takes care to unregister the provided @nb from the notification chain
1383  * associated to the specified event and, if there are no more users for the
1384  * event handler, frees also the associated event handler structures.
1385  * (this could possibly cause disabling of event's generation at platform level)
1386  *
1387  * Return: 0 on Success
1388  */
1389 static int scmi_notifier_unregister(const struct scmi_handle *handle,
1390                     u8 proto_id, u8 evt_id, const u32 *src_id,
1391                     struct notifier_block *nb)
1392 {
1393     u32 evt_key;
1394     struct scmi_event_handler *hndl;
1395     struct scmi_notify_instance *ni;
1396 
1397     ni = scmi_notification_instance_data_get(handle);
1398     if (!ni)
1399         return -ENODEV;
1400 
1401     evt_key = MAKE_HASH_KEY(proto_id, evt_id,
1402                 src_id ? *src_id : SRC_ID_MASK);
1403     hndl = scmi_get_handler(ni, evt_key);
1404     if (!hndl)
1405         return -EINVAL;
1406 
1407     /*
1408      * Note that this chain unregistration call is safe on its own
1409      * being internally protected by an rwsem.
1410      */
1411     blocking_notifier_chain_unregister(&hndl->chain, nb);
1412     scmi_put_handler(ni, hndl);
1413 
1414     /*
1415      * This balances the initial get issued in @scmi_notifier_register.
1416      * If this notifier_block happened to be the last known user callback
1417      * for this event, the handler is here freed and the event's generation
1418      * stopped.
1419      *
1420      * Note that, an ongoing concurrent lookup on the delivery workqueue
1421      * path could still hold the refcount to 1 even after this routine
1422      * completes: in such a case it will be the final put on the delivery
1423      * path which will finally free this unused handler.
1424      */
1425     scmi_put_handler(ni, hndl);
1426 
1427     return 0;
1428 }
1429 
1430 struct scmi_notifier_devres {
1431     const struct scmi_handle *handle;
1432     u8 proto_id;
1433     u8 evt_id;
1434     u32 __src_id;
1435     u32 *src_id;
1436     struct notifier_block *nb;
1437 };
1438 
1439 static void scmi_devm_release_notifier(struct device *dev, void *res)
1440 {
1441     struct scmi_notifier_devres *dres = res;
1442 
1443     scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
1444                  dres->src_id, dres->nb);
1445 }
1446 
1447 /**
1448  * scmi_devm_notifier_register()  - Managed registration of a notifier_block
1449  * for an event
1450  * @sdev: A reference to an scmi_device whose embedded struct device is to
1451  *    be used for devres accounting.
1452  * @proto_id: Protocol ID
1453  * @evt_id: Event ID
1454  * @src_id: Source ID, when NULL register for events coming form ALL possible
1455  *      sources
1456  * @nb: A standard notifier block to register for the specified event
1457  *
1458  * Generic devres managed helper to register a notifier_block against a
1459  * protocol event.
1460  *
1461  * Return: 0 on Success
1462  */
1463 static int scmi_devm_notifier_register(struct scmi_device *sdev,
1464                        u8 proto_id, u8 evt_id,
1465                        const u32 *src_id,
1466                        struct notifier_block *nb)
1467 {
1468     int ret;
1469     struct scmi_notifier_devres *dres;
1470 
1471     dres = devres_alloc(scmi_devm_release_notifier,
1472                 sizeof(*dres), GFP_KERNEL);
1473     if (!dres)
1474         return -ENOMEM;
1475 
1476     ret = scmi_notifier_register(sdev->handle, proto_id,
1477                      evt_id, src_id, nb);
1478     if (ret) {
1479         devres_free(dres);
1480         return ret;
1481     }
1482 
1483     dres->handle = sdev->handle;
1484     dres->proto_id = proto_id;
1485     dres->evt_id = evt_id;
1486     dres->nb = nb;
1487     if (src_id) {
1488         dres->__src_id = *src_id;
1489         dres->src_id = &dres->__src_id;
1490     } else {
1491         dres->src_id = NULL;
1492     }
1493     devres_add(&sdev->dev, dres);
1494 
1495     return ret;
1496 }
1497 
1498 static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
1499 {
1500     struct scmi_notifier_devres *dres = res;
1501     struct scmi_notifier_devres *xres = data;
1502 
1503     if (WARN_ON(!dres || !xres))
1504         return 0;
1505 
1506     return dres->proto_id == xres->proto_id &&
1507         dres->evt_id == xres->evt_id &&
1508         dres->nb == xres->nb &&
1509         ((!dres->src_id && !xres->src_id) ||
1510           (dres->src_id && xres->src_id &&
1511            dres->__src_id == xres->__src_id));
1512 }
1513 
1514 /**
1515  * scmi_devm_notifier_unregister()  - Managed un-registration of a
1516  * notifier_block for an event
1517  * @sdev: A reference to an scmi_device whose embedded struct device is to
1518  *    be used for devres accounting.
1519  * @proto_id: Protocol ID
1520  * @evt_id: Event ID
1521  * @src_id: Source ID, when NULL register for events coming form ALL possible
1522  *      sources
1523  * @nb: A standard notifier block to register for the specified event
1524  *
1525  * Generic devres managed helper to explicitly un-register a notifier_block
1526  * against a protocol event, which was previously registered using the above
1527  * @scmi_devm_notifier_register.
1528  *
1529  * Return: 0 on Success
1530  */
1531 static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
1532                      u8 proto_id, u8 evt_id,
1533                      const u32 *src_id,
1534                      struct notifier_block *nb)
1535 {
1536     int ret;
1537     struct scmi_notifier_devres dres;
1538 
1539     dres.handle = sdev->handle;
1540     dres.proto_id = proto_id;
1541     dres.evt_id = evt_id;
1542     if (src_id) {
1543         dres.__src_id = *src_id;
1544         dres.src_id = &dres.__src_id;
1545     } else {
1546         dres.src_id = NULL;
1547     }
1548 
1549     ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
1550                  scmi_devm_notifier_match, &dres);
1551 
1552     WARN_ON(ret);
1553 
1554     return ret;
1555 }
1556 
1557 /**
1558  * scmi_protocols_late_init()  - Worker for late initialization
1559  * @work: The work item to use associated to the proper SCMI instance
1560  *
1561  * This kicks in whenever a new protocol has completed its own registration via
1562  * scmi_register_protocol_events(): it is in charge of scanning the table of
1563  * pending handlers (registered by users while the related protocol was still
1564  * not initialized) and finalizing their initialization whenever possible;
1565  * invalid pending handlers are purged at this point in time.
1566  */
1567 static void scmi_protocols_late_init(struct work_struct *work)
1568 {
1569     int bkt;
1570     struct scmi_event_handler *hndl;
1571     struct scmi_notify_instance *ni;
1572     struct hlist_node *tmp;
1573 
1574     ni = container_of(work, struct scmi_notify_instance, init_work);
1575 
1576     /* Ensure protocols and events are up to date */
1577     smp_rmb();
1578 
1579     mutex_lock(&ni->pending_mtx);
1580     hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
1581         int ret;
1582 
1583         ret = scmi_bind_event_handler(ni, hndl);
1584         if (!ret) {
1585             dev_dbg(ni->handle->dev,
1586                 "finalized PENDING handler - key:%X\n",
1587                 hndl->key);
1588             ret = scmi_event_handler_enable_events(hndl);
1589             if (ret) {
1590                 dev_dbg(ni->handle->dev,
1591                     "purging INVALID handler - key:%X\n",
1592                     hndl->key);
1593                 scmi_put_active_handler(ni, hndl);
1594             }
1595         } else {
1596             ret = scmi_valid_pending_handler(ni, hndl);
1597             if (ret) {
1598                 dev_dbg(ni->handle->dev,
1599                     "purging PENDING handler - key:%X\n",
1600                     hndl->key);
1601                 /* this hndl can be only a pending one */
1602                 scmi_put_handler_unlocked(ni, hndl);
1603             }
1604         }
1605     }
1606     mutex_unlock(&ni->pending_mtx);
1607 }
1608 
1609 /*
1610  * notify_ops are attached to the handle so that can be accessed
1611  * directly from an scmi_driver to register its own notifiers.
1612  */
1613 static const struct scmi_notify_ops notify_ops = {
1614     .devm_event_notifier_register = scmi_devm_notifier_register,
1615     .devm_event_notifier_unregister = scmi_devm_notifier_unregister,
1616     .event_notifier_register = scmi_notifier_register,
1617     .event_notifier_unregister = scmi_notifier_unregister,
1618 };
1619 
1620 /**
1621  * scmi_notification_init()  - Initializes Notification Core Support
1622  * @handle: The handle identifying the platform instance to initialize
1623  *
1624  * This function lays out all the basic resources needed by the notification
1625  * core instance identified by the provided handle: once done, all of the
1626  * SCMI Protocols can register their events with the core during their own
1627  * initializations.
1628  *
1629  * Note that failing to initialize the core notifications support does not
1630  * cause the whole SCMI Protocols stack to fail its initialization.
1631  *
1632  * SCMI Notification Initialization happens in 2 steps:
1633  * * initialization: basic common allocations (this function)
1634  * * registration: protocols asynchronously come into life and registers their
1635  *         own supported list of events with the core; this causes
1636  *         further per-protocol allocations
1637  *
1638  * Any user's callback registration attempt, referring a still not registered
1639  * event, will be registered as pending and finalized later (if possible)
1640  * by scmi_protocols_late_init() work.
1641  * This allows for lazy initialization of SCMI Protocols due to late (or
1642  * missing) SCMI drivers' modules loading.
1643  *
1644  * Return: 0 on Success
1645  */
1646 int scmi_notification_init(struct scmi_handle *handle)
1647 {
1648     void *gid;
1649     struct scmi_notify_instance *ni;
1650 
1651     gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1652     if (!gid)
1653         return -ENOMEM;
1654 
1655     ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
1656     if (!ni)
1657         goto err;
1658 
1659     ni->gid = gid;
1660     ni->handle = handle;
1661 
1662     ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
1663                         sizeof(char *), GFP_KERNEL);
1664     if (!ni->registered_protocols)
1665         goto err;
1666 
1667     ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
1668                     WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
1669                     0);
1670     if (!ni->notify_wq)
1671         goto err;
1672 
1673     mutex_init(&ni->pending_mtx);
1674     hash_init(ni->pending_events_handlers);
1675 
1676     INIT_WORK(&ni->init_work, scmi_protocols_late_init);
1677 
1678     scmi_notification_instance_data_set(handle, ni);
1679     handle->notify_ops = &notify_ops;
1680     /* Ensure handle is up to date */
1681     smp_wmb();
1682 
1683     dev_info(handle->dev, "Core Enabled.\n");
1684 
1685     devres_close_group(handle->dev, ni->gid);
1686 
1687     return 0;
1688 
1689 err:
1690     dev_warn(handle->dev, "Initialization Failed.\n");
1691     devres_release_group(handle->dev, gid);
1692     return -ENOMEM;
1693 }
1694 
1695 /**
1696  * scmi_notification_exit()  - Shutdown and clean Notification core
1697  * @handle: The handle identifying the platform instance to shutdown
1698  */
1699 void scmi_notification_exit(struct scmi_handle *handle)
1700 {
1701     struct scmi_notify_instance *ni;
1702 
1703     ni = scmi_notification_instance_data_get(handle);
1704     if (!ni)
1705         return;
1706     scmi_notification_instance_data_set(handle, NULL);
1707 
1708     /* Destroy while letting pending work complete */
1709     destroy_workqueue(ni->notify_wq);
1710 
1711     devres_release_group(ni->handle->dev, ni->gid);
1712 }