0001
0002 #include <linux/kernel.h>
0003 #include <linux/slab.h>
0004 #include <net/act_api.h>
0005 #include <net/flow_offload.h>
0006 #include <linux/rtnetlink.h>
0007 #include <linux/mutex.h>
0008 #include <linux/rhashtable.h>
0009
0010 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
0011 {
0012 struct flow_rule *rule;
0013 int i;
0014
0015 rule = kzalloc(struct_size(rule, action.entries, num_actions),
0016 GFP_KERNEL);
0017 if (!rule)
0018 return NULL;
0019
0020 rule->action.num_entries = num_actions;
0021
0022
0023
0024 for (i = 0; i < num_actions; i++)
0025 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
0026
0027 return rule;
0028 }
0029 EXPORT_SYMBOL(flow_rule_alloc);
0030
0031 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
0032 {
0033 struct flow_offload_action *fl_action;
0034 int i;
0035
0036 fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
0037 GFP_KERNEL);
0038 if (!fl_action)
0039 return NULL;
0040
0041 fl_action->action.num_entries = num_actions;
0042
0043
0044
0045 for (i = 0; i < num_actions; i++)
0046 fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
0047
0048 return fl_action;
0049 }
0050
0051 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
0052 const struct flow_match *__m = &(__rule)->match; \
0053 struct flow_dissector *__d = (__m)->dissector; \
0054 \
0055 (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \
0056 (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \
0057
0058 void flow_rule_match_meta(const struct flow_rule *rule,
0059 struct flow_match_meta *out)
0060 {
0061 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
0062 }
0063 EXPORT_SYMBOL(flow_rule_match_meta);
0064
0065 void flow_rule_match_basic(const struct flow_rule *rule,
0066 struct flow_match_basic *out)
0067 {
0068 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
0069 }
0070 EXPORT_SYMBOL(flow_rule_match_basic);
0071
0072 void flow_rule_match_control(const struct flow_rule *rule,
0073 struct flow_match_control *out)
0074 {
0075 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
0076 }
0077 EXPORT_SYMBOL(flow_rule_match_control);
0078
0079 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
0080 struct flow_match_eth_addrs *out)
0081 {
0082 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
0083 }
0084 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
0085
0086 void flow_rule_match_vlan(const struct flow_rule *rule,
0087 struct flow_match_vlan *out)
0088 {
0089 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
0090 }
0091 EXPORT_SYMBOL(flow_rule_match_vlan);
0092
0093 void flow_rule_match_cvlan(const struct flow_rule *rule,
0094 struct flow_match_vlan *out)
0095 {
0096 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
0097 }
0098 EXPORT_SYMBOL(flow_rule_match_cvlan);
0099
0100 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
0101 struct flow_match_ipv4_addrs *out)
0102 {
0103 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
0104 }
0105 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
0106
0107 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
0108 struct flow_match_ipv6_addrs *out)
0109 {
0110 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
0111 }
0112 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
0113
0114 void flow_rule_match_ip(const struct flow_rule *rule,
0115 struct flow_match_ip *out)
0116 {
0117 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
0118 }
0119 EXPORT_SYMBOL(flow_rule_match_ip);
0120
0121 void flow_rule_match_ports(const struct flow_rule *rule,
0122 struct flow_match_ports *out)
0123 {
0124 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
0125 }
0126 EXPORT_SYMBOL(flow_rule_match_ports);
0127
0128 void flow_rule_match_ports_range(const struct flow_rule *rule,
0129 struct flow_match_ports_range *out)
0130 {
0131 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
0132 }
0133 EXPORT_SYMBOL(flow_rule_match_ports_range);
0134
0135 void flow_rule_match_tcp(const struct flow_rule *rule,
0136 struct flow_match_tcp *out)
0137 {
0138 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
0139 }
0140 EXPORT_SYMBOL(flow_rule_match_tcp);
0141
0142 void flow_rule_match_icmp(const struct flow_rule *rule,
0143 struct flow_match_icmp *out)
0144 {
0145 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
0146 }
0147 EXPORT_SYMBOL(flow_rule_match_icmp);
0148
0149 void flow_rule_match_mpls(const struct flow_rule *rule,
0150 struct flow_match_mpls *out)
0151 {
0152 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
0153 }
0154 EXPORT_SYMBOL(flow_rule_match_mpls);
0155
0156 void flow_rule_match_enc_control(const struct flow_rule *rule,
0157 struct flow_match_control *out)
0158 {
0159 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
0160 }
0161 EXPORT_SYMBOL(flow_rule_match_enc_control);
0162
0163 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
0164 struct flow_match_ipv4_addrs *out)
0165 {
0166 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
0167 }
0168 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
0169
0170 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
0171 struct flow_match_ipv6_addrs *out)
0172 {
0173 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
0174 }
0175 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
0176
0177 void flow_rule_match_enc_ip(const struct flow_rule *rule,
0178 struct flow_match_ip *out)
0179 {
0180 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
0181 }
0182 EXPORT_SYMBOL(flow_rule_match_enc_ip);
0183
0184 void flow_rule_match_enc_ports(const struct flow_rule *rule,
0185 struct flow_match_ports *out)
0186 {
0187 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
0188 }
0189 EXPORT_SYMBOL(flow_rule_match_enc_ports);
0190
0191 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
0192 struct flow_match_enc_keyid *out)
0193 {
0194 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
0195 }
0196 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
0197
0198 void flow_rule_match_enc_opts(const struct flow_rule *rule,
0199 struct flow_match_enc_opts *out)
0200 {
0201 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
0202 }
0203 EXPORT_SYMBOL(flow_rule_match_enc_opts);
0204
0205 struct flow_action_cookie *flow_action_cookie_create(void *data,
0206 unsigned int len,
0207 gfp_t gfp)
0208 {
0209 struct flow_action_cookie *cookie;
0210
0211 cookie = kmalloc(sizeof(*cookie) + len, gfp);
0212 if (!cookie)
0213 return NULL;
0214 cookie->cookie_len = len;
0215 memcpy(cookie->cookie, data, len);
0216 return cookie;
0217 }
0218 EXPORT_SYMBOL(flow_action_cookie_create);
0219
0220 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
0221 {
0222 kfree(cookie);
0223 }
0224 EXPORT_SYMBOL(flow_action_cookie_destroy);
0225
0226 void flow_rule_match_ct(const struct flow_rule *rule,
0227 struct flow_match_ct *out)
0228 {
0229 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
0230 }
0231 EXPORT_SYMBOL(flow_rule_match_ct);
0232
0233 void flow_rule_match_pppoe(const struct flow_rule *rule,
0234 struct flow_match_pppoe *out)
0235 {
0236 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
0237 }
0238 EXPORT_SYMBOL(flow_rule_match_pppoe);
0239
0240 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
0241 void *cb_ident, void *cb_priv,
0242 void (*release)(void *cb_priv))
0243 {
0244 struct flow_block_cb *block_cb;
0245
0246 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
0247 if (!block_cb)
0248 return ERR_PTR(-ENOMEM);
0249
0250 block_cb->cb = cb;
0251 block_cb->cb_ident = cb_ident;
0252 block_cb->cb_priv = cb_priv;
0253 block_cb->release = release;
0254
0255 return block_cb;
0256 }
0257 EXPORT_SYMBOL(flow_block_cb_alloc);
0258
0259 void flow_block_cb_free(struct flow_block_cb *block_cb)
0260 {
0261 if (block_cb->release)
0262 block_cb->release(block_cb->cb_priv);
0263
0264 kfree(block_cb);
0265 }
0266 EXPORT_SYMBOL(flow_block_cb_free);
0267
0268 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
0269 flow_setup_cb_t *cb, void *cb_ident)
0270 {
0271 struct flow_block_cb *block_cb;
0272
0273 list_for_each_entry(block_cb, &block->cb_list, list) {
0274 if (block_cb->cb == cb &&
0275 block_cb->cb_ident == cb_ident)
0276 return block_cb;
0277 }
0278
0279 return NULL;
0280 }
0281 EXPORT_SYMBOL(flow_block_cb_lookup);
0282
0283 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
0284 {
0285 return block_cb->cb_priv;
0286 }
0287 EXPORT_SYMBOL(flow_block_cb_priv);
0288
0289 void flow_block_cb_incref(struct flow_block_cb *block_cb)
0290 {
0291 block_cb->refcnt++;
0292 }
0293 EXPORT_SYMBOL(flow_block_cb_incref);
0294
0295 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
0296 {
0297 return --block_cb->refcnt;
0298 }
0299 EXPORT_SYMBOL(flow_block_cb_decref);
0300
0301 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
0302 struct list_head *driver_block_list)
0303 {
0304 struct flow_block_cb *block_cb;
0305
0306 list_for_each_entry(block_cb, driver_block_list, driver_list) {
0307 if (block_cb->cb == cb &&
0308 block_cb->cb_ident == cb_ident)
0309 return true;
0310 }
0311
0312 return false;
0313 }
0314 EXPORT_SYMBOL(flow_block_cb_is_busy);
0315
0316 int flow_block_cb_setup_simple(struct flow_block_offload *f,
0317 struct list_head *driver_block_list,
0318 flow_setup_cb_t *cb,
0319 void *cb_ident, void *cb_priv,
0320 bool ingress_only)
0321 {
0322 struct flow_block_cb *block_cb;
0323
0324 if (ingress_only &&
0325 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
0326 return -EOPNOTSUPP;
0327
0328 f->driver_block_list = driver_block_list;
0329
0330 switch (f->command) {
0331 case FLOW_BLOCK_BIND:
0332 if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
0333 return -EBUSY;
0334
0335 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
0336 if (IS_ERR(block_cb))
0337 return PTR_ERR(block_cb);
0338
0339 flow_block_cb_add(block_cb, f);
0340 list_add_tail(&block_cb->driver_list, driver_block_list);
0341 return 0;
0342 case FLOW_BLOCK_UNBIND:
0343 block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
0344 if (!block_cb)
0345 return -ENOENT;
0346
0347 flow_block_cb_remove(block_cb, f);
0348 list_del(&block_cb->driver_list);
0349 return 0;
0350 default:
0351 return -EOPNOTSUPP;
0352 }
0353 }
0354 EXPORT_SYMBOL(flow_block_cb_setup_simple);
0355
0356 static DEFINE_MUTEX(flow_indr_block_lock);
0357 static LIST_HEAD(flow_block_indr_list);
0358 static LIST_HEAD(flow_block_indr_dev_list);
0359 static LIST_HEAD(flow_indir_dev_list);
0360
0361 struct flow_indr_dev {
0362 struct list_head list;
0363 flow_indr_block_bind_cb_t *cb;
0364 void *cb_priv;
0365 refcount_t refcnt;
0366 };
0367
0368 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
0369 void *cb_priv)
0370 {
0371 struct flow_indr_dev *indr_dev;
0372
0373 indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
0374 if (!indr_dev)
0375 return NULL;
0376
0377 indr_dev->cb = cb;
0378 indr_dev->cb_priv = cb_priv;
0379 refcount_set(&indr_dev->refcnt, 1);
0380
0381 return indr_dev;
0382 }
0383
0384 struct flow_indir_dev_info {
0385 void *data;
0386 struct net_device *dev;
0387 struct Qdisc *sch;
0388 enum tc_setup_type type;
0389 void (*cleanup)(struct flow_block_cb *block_cb);
0390 struct list_head list;
0391 enum flow_block_command command;
0392 enum flow_block_binder_type binder_type;
0393 struct list_head *cb_list;
0394 };
0395
0396 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
0397 {
0398 struct flow_block_offload bo;
0399 struct flow_indir_dev_info *cur;
0400
0401 list_for_each_entry(cur, &flow_indir_dev_list, list) {
0402 memset(&bo, 0, sizeof(bo));
0403 bo.command = cur->command;
0404 bo.binder_type = cur->binder_type;
0405 INIT_LIST_HEAD(&bo.cb_list);
0406 cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
0407 list_splice(&bo.cb_list, cur->cb_list);
0408 }
0409 }
0410
0411 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
0412 {
0413 struct flow_indr_dev *indr_dev;
0414
0415 mutex_lock(&flow_indr_block_lock);
0416 list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
0417 if (indr_dev->cb == cb &&
0418 indr_dev->cb_priv == cb_priv) {
0419 refcount_inc(&indr_dev->refcnt);
0420 mutex_unlock(&flow_indr_block_lock);
0421 return 0;
0422 }
0423 }
0424
0425 indr_dev = flow_indr_dev_alloc(cb, cb_priv);
0426 if (!indr_dev) {
0427 mutex_unlock(&flow_indr_block_lock);
0428 return -ENOMEM;
0429 }
0430
0431 list_add(&indr_dev->list, &flow_block_indr_dev_list);
0432 existing_qdiscs_register(cb, cb_priv);
0433 mutex_unlock(&flow_indr_block_lock);
0434
0435 tcf_action_reoffload_cb(cb, cb_priv, true);
0436
0437 return 0;
0438 }
0439 EXPORT_SYMBOL(flow_indr_dev_register);
0440
0441 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
0442 void *cb_priv,
0443 struct list_head *cleanup_list)
0444 {
0445 struct flow_block_cb *this, *next;
0446
0447 list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
0448 if (this->release == release &&
0449 this->indr.cb_priv == cb_priv)
0450 list_move(&this->indr.list, cleanup_list);
0451 }
0452 }
0453
0454 static void flow_block_indr_notify(struct list_head *cleanup_list)
0455 {
0456 struct flow_block_cb *this, *next;
0457
0458 list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
0459 list_del(&this->indr.list);
0460 this->indr.cleanup(this);
0461 }
0462 }
0463
0464 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
0465 void (*release)(void *cb_priv))
0466 {
0467 struct flow_indr_dev *this, *next, *indr_dev = NULL;
0468 LIST_HEAD(cleanup_list);
0469
0470 mutex_lock(&flow_indr_block_lock);
0471 list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
0472 if (this->cb == cb &&
0473 this->cb_priv == cb_priv &&
0474 refcount_dec_and_test(&this->refcnt)) {
0475 indr_dev = this;
0476 list_del(&indr_dev->list);
0477 break;
0478 }
0479 }
0480
0481 if (!indr_dev) {
0482 mutex_unlock(&flow_indr_block_lock);
0483 return;
0484 }
0485
0486 __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
0487 mutex_unlock(&flow_indr_block_lock);
0488
0489 tcf_action_reoffload_cb(cb, cb_priv, false);
0490 flow_block_indr_notify(&cleanup_list);
0491 kfree(indr_dev);
0492 }
0493 EXPORT_SYMBOL(flow_indr_dev_unregister);
0494
0495 static void flow_block_indr_init(struct flow_block_cb *flow_block,
0496 struct flow_block_offload *bo,
0497 struct net_device *dev, struct Qdisc *sch, void *data,
0498 void *cb_priv,
0499 void (*cleanup)(struct flow_block_cb *block_cb))
0500 {
0501 flow_block->indr.binder_type = bo->binder_type;
0502 flow_block->indr.data = data;
0503 flow_block->indr.cb_priv = cb_priv;
0504 flow_block->indr.dev = dev;
0505 flow_block->indr.sch = sch;
0506 flow_block->indr.cleanup = cleanup;
0507 }
0508
0509 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
0510 void *cb_ident, void *cb_priv,
0511 void (*release)(void *cb_priv),
0512 struct flow_block_offload *bo,
0513 struct net_device *dev,
0514 struct Qdisc *sch, void *data,
0515 void *indr_cb_priv,
0516 void (*cleanup)(struct flow_block_cb *block_cb))
0517 {
0518 struct flow_block_cb *block_cb;
0519
0520 block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
0521 if (IS_ERR(block_cb))
0522 goto out;
0523
0524 flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
0525 list_add(&block_cb->indr.list, &flow_block_indr_list);
0526
0527 out:
0528 return block_cb;
0529 }
0530 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
0531
0532 static struct flow_indir_dev_info *find_indir_dev(void *data)
0533 {
0534 struct flow_indir_dev_info *cur;
0535
0536 list_for_each_entry(cur, &flow_indir_dev_list, list) {
0537 if (cur->data == data)
0538 return cur;
0539 }
0540 return NULL;
0541 }
0542
0543 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
0544 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
0545 struct flow_block_offload *bo)
0546 {
0547 struct flow_indir_dev_info *info;
0548
0549 info = find_indir_dev(data);
0550 if (info)
0551 return -EEXIST;
0552
0553 info = kzalloc(sizeof(*info), GFP_KERNEL);
0554 if (!info)
0555 return -ENOMEM;
0556
0557 info->data = data;
0558 info->dev = dev;
0559 info->sch = sch;
0560 info->type = type;
0561 info->cleanup = cleanup;
0562 info->command = bo->command;
0563 info->binder_type = bo->binder_type;
0564 info->cb_list = bo->cb_list_head;
0565
0566 list_add(&info->list, &flow_indir_dev_list);
0567 return 0;
0568 }
0569
0570 static int indir_dev_remove(void *data)
0571 {
0572 struct flow_indir_dev_info *info;
0573
0574 info = find_indir_dev(data);
0575 if (!info)
0576 return -ENOENT;
0577
0578 list_del(&info->list);
0579
0580 kfree(info);
0581 return 0;
0582 }
0583
0584 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
0585 enum tc_setup_type type, void *data,
0586 struct flow_block_offload *bo,
0587 void (*cleanup)(struct flow_block_cb *block_cb))
0588 {
0589 struct flow_indr_dev *this;
0590 u32 count = 0;
0591 int err;
0592
0593 mutex_lock(&flow_indr_block_lock);
0594 if (bo) {
0595 if (bo->command == FLOW_BLOCK_BIND)
0596 indir_dev_add(data, dev, sch, type, cleanup, bo);
0597 else if (bo->command == FLOW_BLOCK_UNBIND)
0598 indir_dev_remove(data);
0599 }
0600
0601 list_for_each_entry(this, &flow_block_indr_dev_list, list) {
0602 err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
0603 if (!err)
0604 count++;
0605 }
0606
0607 mutex_unlock(&flow_indr_block_lock);
0608
0609 return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
0610 }
0611 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
0612
0613 bool flow_indr_dev_exists(void)
0614 {
0615 return !list_empty(&flow_block_indr_dev_list);
0616 }
0617 EXPORT_SYMBOL(flow_indr_dev_exists);