0001
0002
0003
0004
0005 #ifndef __MMAP_UNLOCK_WORK_H__
0006 #define __MMAP_UNLOCK_WORK_H__
0007 #include <linux/irq_work.h>
0008
0009
0010 struct mmap_unlock_irq_work {
0011 struct irq_work irq_work;
0012 struct mm_struct *mm;
0013 };
0014
0015 DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
0016
0017
0018
0019
0020
0021
0022
0023
0024 static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
0025 {
0026 struct mmap_unlock_irq_work *work = NULL;
0027 bool irq_work_busy = false;
0028
0029 if (irqs_disabled()) {
0030 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
0031 work = this_cpu_ptr(&mmap_unlock_work);
0032 if (irq_work_is_busy(&work->irq_work)) {
0033
0034 irq_work_busy = true;
0035 }
0036 } else {
0037
0038
0039
0040
0041 irq_work_busy = true;
0042 }
0043 }
0044
0045 *work_ptr = work;
0046 return irq_work_busy;
0047 }
0048
0049 static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
0050 {
0051 if (!work) {
0052 mmap_read_unlock(mm);
0053 } else {
0054 work->mm = mm;
0055
0056
0057
0058
0059
0060 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
0061 irq_work_queue(&work->irq_work);
0062 }
0063 }
0064
0065 #endif