Back to home page

LXR

 
 

    


0001 #ifndef INT_BLK_MQ_H
0002 #define INT_BLK_MQ_H
0003 
0004 #include "blk-stat.h"
0005 
0006 struct blk_mq_tag_set;
0007 
0008 struct blk_mq_ctx {
0009     struct {
0010         spinlock_t      lock;
0011         struct list_head    rq_list;
0012     }  ____cacheline_aligned_in_smp;
0013 
0014     unsigned int        cpu;
0015     unsigned int        index_hw;
0016 
0017     /* incremented at dispatch time */
0018     unsigned long       rq_dispatched[2];
0019     unsigned long       rq_merged;
0020 
0021     /* incremented at completion time */
0022     unsigned long       ____cacheline_aligned_in_smp rq_completed[2];
0023     struct blk_rq_stat  stat[2];
0024 
0025     struct request_queue    *queue;
0026     struct kobject      kobj;
0027 } ____cacheline_aligned_in_smp;
0028 
0029 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
0030 void blk_mq_freeze_queue(struct request_queue *q);
0031 void blk_mq_free_queue(struct request_queue *q);
0032 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
0033 void blk_mq_wake_waiters(struct request_queue *q);
0034 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
0035 
0036 /*
0037  * CPU hotplug helpers
0038  */
0039 void blk_mq_enable_hotplug(void);
0040 void blk_mq_disable_hotplug(void);
0041 
0042 /*
0043  * CPU -> queue mappings
0044  */
0045 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
0046 
0047 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
0048         int cpu)
0049 {
0050     return q->queue_hw_ctx[q->mq_map[cpu]];
0051 }
0052 
0053 /*
0054  * sysfs helpers
0055  */
0056 extern int blk_mq_sysfs_register(struct request_queue *q);
0057 extern void blk_mq_sysfs_unregister(struct request_queue *q);
0058 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
0059 
0060 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
0061 
0062 void blk_mq_release(struct request_queue *q);
0063 
0064 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
0065                        unsigned int cpu)
0066 {
0067     return per_cpu_ptr(q->queue_ctx, cpu);
0068 }
0069 
0070 /*
0071  * This assumes per-cpu software queueing queues. They could be per-node
0072  * as well, for instance. For now this is hardcoded as-is. Note that we don't
0073  * care about preemption, since we know the ctx's are persistent. This does
0074  * mean that we can't rely on ctx always matching the currently running CPU.
0075  */
0076 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
0077 {
0078     return __blk_mq_get_ctx(q, get_cpu());
0079 }
0080 
0081 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
0082 {
0083     put_cpu();
0084 }
0085 
0086 struct blk_mq_alloc_data {
0087     /* input parameter */
0088     struct request_queue *q;
0089     unsigned int flags;
0090 
0091     /* input & output parameter */
0092     struct blk_mq_ctx *ctx;
0093     struct blk_mq_hw_ctx *hctx;
0094 };
0095 
0096 static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
0097         struct request_queue *q, unsigned int flags,
0098         struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
0099 {
0100     data->q = q;
0101     data->flags = flags;
0102     data->ctx = ctx;
0103     data->hctx = hctx;
0104 }
0105 
0106 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
0107 {
0108     return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
0109 }
0110 
0111 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
0112 {
0113     return hctx->nr_ctx && hctx->tags;
0114 }
0115 
0116 #endif