0001
0002
0003
0004
0005
0006
0007 #include <linux/list.h>
0008 #include <linux/rculist.h>
0009 #include <linux/mmu_notifier.h>
0010 #include <linux/interval_tree_generic.h>
0011 #include <linux/sched/mm.h>
0012
0013 #include "mmu_rb.h"
0014 #include "trace.h"
0015
0016 static unsigned long mmu_node_start(struct mmu_rb_node *);
0017 static unsigned long mmu_node_last(struct mmu_rb_node *);
0018 static int mmu_notifier_range_start(struct mmu_notifier *,
0019 const struct mmu_notifier_range *);
0020 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
0021 unsigned long, unsigned long);
0022 static void do_remove(struct mmu_rb_handler *handler,
0023 struct list_head *del_list);
0024 static void handle_remove(struct work_struct *work);
0025
0026 static const struct mmu_notifier_ops mn_opts = {
0027 .invalidate_range_start = mmu_notifier_range_start,
0028 };
0029
0030 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
0031 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
0032
0033 static unsigned long mmu_node_start(struct mmu_rb_node *node)
0034 {
0035 return node->addr & PAGE_MASK;
0036 }
0037
0038 static unsigned long mmu_node_last(struct mmu_rb_node *node)
0039 {
0040 return PAGE_ALIGN(node->addr + node->len) - 1;
0041 }
0042
0043 int hfi1_mmu_rb_register(void *ops_arg,
0044 struct mmu_rb_ops *ops,
0045 struct workqueue_struct *wq,
0046 struct mmu_rb_handler **handler)
0047 {
0048 struct mmu_rb_handler *h;
0049 int ret;
0050
0051 h = kzalloc(sizeof(*h), GFP_KERNEL);
0052 if (!h)
0053 return -ENOMEM;
0054
0055 h->root = RB_ROOT_CACHED;
0056 h->ops = ops;
0057 h->ops_arg = ops_arg;
0058 INIT_HLIST_NODE(&h->mn.hlist);
0059 spin_lock_init(&h->lock);
0060 h->mn.ops = &mn_opts;
0061 INIT_WORK(&h->del_work, handle_remove);
0062 INIT_LIST_HEAD(&h->del_list);
0063 INIT_LIST_HEAD(&h->lru_list);
0064 h->wq = wq;
0065
0066 ret = mmu_notifier_register(&h->mn, current->mm);
0067 if (ret) {
0068 kfree(h);
0069 return ret;
0070 }
0071
0072 *handler = h;
0073 return 0;
0074 }
0075
0076 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
0077 {
0078 struct mmu_rb_node *rbnode;
0079 struct rb_node *node;
0080 unsigned long flags;
0081 struct list_head del_list;
0082
0083
0084 mmgrab(handler->mn.mm);
0085
0086
0087 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
0088
0089
0090
0091
0092
0093 flush_work(&handler->del_work);
0094
0095 INIT_LIST_HEAD(&del_list);
0096
0097 spin_lock_irqsave(&handler->lock, flags);
0098 while ((node = rb_first_cached(&handler->root))) {
0099 rbnode = rb_entry(node, struct mmu_rb_node, node);
0100 rb_erase_cached(node, &handler->root);
0101
0102 list_move(&rbnode->list, &del_list);
0103 }
0104 spin_unlock_irqrestore(&handler->lock, flags);
0105
0106 do_remove(handler, &del_list);
0107
0108
0109 mmdrop(handler->mn.mm);
0110
0111 kfree(handler);
0112 }
0113
0114 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
0115 struct mmu_rb_node *mnode)
0116 {
0117 struct mmu_rb_node *node;
0118 unsigned long flags;
0119 int ret = 0;
0120
0121 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
0122
0123 if (current->mm != handler->mn.mm)
0124 return -EPERM;
0125
0126 spin_lock_irqsave(&handler->lock, flags);
0127 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
0128 if (node) {
0129 ret = -EINVAL;
0130 goto unlock;
0131 }
0132 __mmu_int_rb_insert(mnode, &handler->root);
0133 list_add(&mnode->list, &handler->lru_list);
0134
0135 ret = handler->ops->insert(handler->ops_arg, mnode);
0136 if (ret) {
0137 __mmu_int_rb_remove(mnode, &handler->root);
0138 list_del(&mnode->list);
0139 }
0140 mnode->handler = handler;
0141 unlock:
0142 spin_unlock_irqrestore(&handler->lock, flags);
0143 return ret;
0144 }
0145
0146
0147 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
0148 unsigned long addr,
0149 unsigned long len)
0150 {
0151 struct mmu_rb_node *node = NULL;
0152
0153 trace_hfi1_mmu_rb_search(addr, len);
0154 if (!handler->ops->filter) {
0155 node = __mmu_int_rb_iter_first(&handler->root, addr,
0156 (addr + len) - 1);
0157 } else {
0158 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
0159 (addr + len) - 1);
0160 node;
0161 node = __mmu_int_rb_iter_next(node, addr,
0162 (addr + len) - 1)) {
0163 if (handler->ops->filter(node, addr, len))
0164 return node;
0165 }
0166 }
0167 return node;
0168 }
0169
0170 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
0171 unsigned long addr, unsigned long len,
0172 struct mmu_rb_node **rb_node)
0173 {
0174 struct mmu_rb_node *node;
0175 unsigned long flags;
0176 bool ret = false;
0177
0178 if (current->mm != handler->mn.mm)
0179 return ret;
0180
0181 spin_lock_irqsave(&handler->lock, flags);
0182 node = __mmu_rb_search(handler, addr, len);
0183 if (node) {
0184 if (node->addr == addr && node->len == len)
0185 goto unlock;
0186 __mmu_int_rb_remove(node, &handler->root);
0187 list_del(&node->list);
0188 ret = true;
0189 }
0190 unlock:
0191 spin_unlock_irqrestore(&handler->lock, flags);
0192 *rb_node = node;
0193 return ret;
0194 }
0195
0196 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
0197 {
0198 struct mmu_rb_node *rbnode, *ptr;
0199 struct list_head del_list;
0200 unsigned long flags;
0201 bool stop = false;
0202
0203 if (current->mm != handler->mn.mm)
0204 return;
0205
0206 INIT_LIST_HEAD(&del_list);
0207
0208 spin_lock_irqsave(&handler->lock, flags);
0209 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
0210 list) {
0211 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
0212 &stop)) {
0213 __mmu_int_rb_remove(rbnode, &handler->root);
0214
0215 list_move(&rbnode->list, &del_list);
0216 }
0217 if (stop)
0218 break;
0219 }
0220 spin_unlock_irqrestore(&handler->lock, flags);
0221
0222 while (!list_empty(&del_list)) {
0223 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
0224 list_del(&rbnode->list);
0225 handler->ops->remove(handler->ops_arg, rbnode);
0226 }
0227 }
0228
0229
0230
0231
0232
0233
0234 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
0235 struct mmu_rb_node *node)
0236 {
0237 unsigned long flags;
0238
0239 if (current->mm != handler->mn.mm)
0240 return;
0241
0242
0243 trace_hfi1_mmu_rb_remove(node->addr, node->len);
0244 spin_lock_irqsave(&handler->lock, flags);
0245 __mmu_int_rb_remove(node, &handler->root);
0246 list_del(&node->list);
0247 spin_unlock_irqrestore(&handler->lock, flags);
0248
0249 handler->ops->remove(handler->ops_arg, node);
0250 }
0251
0252 static int mmu_notifier_range_start(struct mmu_notifier *mn,
0253 const struct mmu_notifier_range *range)
0254 {
0255 struct mmu_rb_handler *handler =
0256 container_of(mn, struct mmu_rb_handler, mn);
0257 struct rb_root_cached *root = &handler->root;
0258 struct mmu_rb_node *node, *ptr = NULL;
0259 unsigned long flags;
0260 bool added = false;
0261
0262 spin_lock_irqsave(&handler->lock, flags);
0263 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
0264 node; node = ptr) {
0265
0266 ptr = __mmu_int_rb_iter_next(node, range->start,
0267 range->end - 1);
0268 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
0269 if (handler->ops->invalidate(handler->ops_arg, node)) {
0270 __mmu_int_rb_remove(node, root);
0271
0272 list_move(&node->list, &handler->del_list);
0273 added = true;
0274 }
0275 }
0276 spin_unlock_irqrestore(&handler->lock, flags);
0277
0278 if (added)
0279 queue_work(handler->wq, &handler->del_work);
0280
0281 return 0;
0282 }
0283
0284
0285
0286
0287
0288
0289 static void do_remove(struct mmu_rb_handler *handler,
0290 struct list_head *del_list)
0291 {
0292 struct mmu_rb_node *node;
0293
0294 while (!list_empty(del_list)) {
0295 node = list_first_entry(del_list, struct mmu_rb_node, list);
0296 list_del(&node->list);
0297 handler->ops->remove(handler->ops_arg, node);
0298 }
0299 }
0300
0301
0302
0303
0304
0305
0306 static void handle_remove(struct work_struct *work)
0307 {
0308 struct mmu_rb_handler *handler = container_of(work,
0309 struct mmu_rb_handler,
0310 del_work);
0311 struct list_head del_list;
0312 unsigned long flags;
0313
0314
0315 spin_lock_irqsave(&handler->lock, flags);
0316 list_replace_init(&handler->del_list, &del_list);
0317 spin_unlock_irqrestore(&handler->lock, flags);
0318
0319 do_remove(handler, &del_list);
0320 }