Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2017 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
0025 #define DRM_SCHEDULER_SPSC_QUEUE_H_
0026 
0027 #include <linux/atomic.h>
0028 #include <linux/preempt.h>
0029 
0030 /** SPSC lockless queue */
0031 
0032 struct spsc_node {
0033 
0034     /* Stores spsc_node* */
0035     struct spsc_node *next;
0036 };
0037 
0038 struct spsc_queue {
0039 
0040      struct spsc_node *head;
0041 
0042     /* atomic pointer to struct spsc_node* */
0043     atomic_long_t tail;
0044 
0045     atomic_t job_count;
0046 };
0047 
0048 static inline void spsc_queue_init(struct spsc_queue *queue)
0049 {
0050     queue->head = NULL;
0051     atomic_long_set(&queue->tail, (long)&queue->head);
0052     atomic_set(&queue->job_count, 0);
0053 }
0054 
0055 static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
0056 {
0057     return queue->head;
0058 }
0059 
0060 static inline int spsc_queue_count(struct spsc_queue *queue)
0061 {
0062     return atomic_read(&queue->job_count);
0063 }
0064 
0065 static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
0066 {
0067     struct spsc_node **tail;
0068 
0069     node->next = NULL;
0070 
0071     preempt_disable();
0072 
0073     tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
0074     WRITE_ONCE(*tail, node);
0075     atomic_inc(&queue->job_count);
0076 
0077     /*
0078      * In case of first element verify new node will be visible to the consumer
0079      * thread when we ping the kernel thread that there is new work to do.
0080      */
0081     smp_wmb();
0082 
0083     preempt_enable();
0084 
0085     return tail == &queue->head;
0086 }
0087 
0088 
0089 static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
0090 {
0091     struct spsc_node *next, *node;
0092 
0093     /* Verify reading from memory and not the cache */
0094     smp_rmb();
0095 
0096     node = READ_ONCE(queue->head);
0097 
0098     if (!node)
0099         return NULL;
0100 
0101     next = READ_ONCE(node->next);
0102     WRITE_ONCE(queue->head, next);
0103 
0104     if (unlikely(!next)) {
0105         /* slowpath for the last element in the queue */
0106 
0107         if (atomic_long_cmpxchg(&queue->tail,
0108                 (long)&node->next, (long) &queue->head) != (long)&node->next) {
0109             /* Updating tail failed wait for new next to appear */
0110             do {
0111                 smp_rmb();
0112             } while (unlikely(!(queue->head = READ_ONCE(node->next))));
0113         }
0114     }
0115 
0116     atomic_dec(&queue->job_count);
0117     return node;
0118 }
0119 
0120 
0121 
0122 #endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */