0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/debugfs.h>
0009 #include <linux/device.h>
0010 #include <linux/err.h>
0011 #include <linux/i2c.h>
0012 #include <linux/list.h>
0013 #include <linux/mm.h>
0014 #include <linux/module.h>
0015 #include <linux/mutex.h>
0016 #include <linux/of.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/seq_file.h>
0019 #include <linux/slab.h>
0020 #include <linux/types.h>
0021
0022 #include <media/v4l2-async.h>
0023 #include <media/v4l2-device.h>
0024 #include <media/v4l2-fwnode.h>
0025 #include <media/v4l2-subdev.h>
0026
0027 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
0028 struct v4l2_subdev *subdev,
0029 struct v4l2_async_subdev *asd)
0030 {
0031 if (!n->ops || !n->ops->bound)
0032 return 0;
0033
0034 return n->ops->bound(n, subdev, asd);
0035 }
0036
0037 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
0038 struct v4l2_subdev *subdev,
0039 struct v4l2_async_subdev *asd)
0040 {
0041 if (!n->ops || !n->ops->unbind)
0042 return;
0043
0044 n->ops->unbind(n, subdev, asd);
0045 }
0046
0047 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
0048 {
0049 if (!n->ops || !n->ops->complete)
0050 return 0;
0051
0052 return n->ops->complete(n);
0053 }
0054
0055 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
0056 struct v4l2_async_subdev *asd)
0057 {
0058 if (!n->ops || !n->ops->destroy)
0059 return;
0060
0061 n->ops->destroy(asd);
0062 }
0063
0064 static bool match_i2c(struct v4l2_async_notifier *notifier,
0065 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
0066 {
0067 #if IS_ENABLED(CONFIG_I2C)
0068 struct i2c_client *client = i2c_verify_client(sd->dev);
0069
0070 return client &&
0071 asd->match.i2c.adapter_id == client->adapter->nr &&
0072 asd->match.i2c.address == client->addr;
0073 #else
0074 return false;
0075 #endif
0076 }
0077
0078 static bool
0079 match_fwnode_one(struct v4l2_async_notifier *notifier,
0080 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
0081 struct v4l2_async_subdev *asd)
0082 {
0083 struct fwnode_handle *other_fwnode;
0084 struct fwnode_handle *dev_fwnode;
0085 bool asd_fwnode_is_ep;
0086 bool sd_fwnode_is_ep;
0087 struct device *dev;
0088
0089
0090
0091
0092
0093
0094 if (sd_fwnode == asd->match.fwnode)
0095 return true;
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
0106 asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
0107
0108 if (sd_fwnode_is_ep == asd_fwnode_is_ep)
0109 return false;
0110
0111
0112
0113
0114
0115 if (sd_fwnode_is_ep) {
0116 dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
0117 other_fwnode = asd->match.fwnode;
0118 } else {
0119 dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
0120 other_fwnode = sd_fwnode;
0121 }
0122
0123 fwnode_handle_put(dev_fwnode);
0124
0125 if (dev_fwnode != other_fwnode)
0126 return false;
0127
0128
0129
0130
0131
0132 if (sd_fwnode_is_ep)
0133 dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
0134 : notifier->sd->dev;
0135 else
0136 dev = sd->dev;
0137
0138 if (dev && dev->driver) {
0139 if (sd_fwnode_is_ep)
0140 dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
0141 dev->driver->name);
0142 dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
0143 dev->driver->name);
0144 }
0145
0146 return true;
0147 }
0148
0149 static bool match_fwnode(struct v4l2_async_notifier *notifier,
0150 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
0151 {
0152 if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
0153 return true;
0154
0155
0156 if (IS_ERR_OR_NULL(sd->fwnode->secondary))
0157 return false;
0158
0159 return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
0160 }
0161
0162 static LIST_HEAD(subdev_list);
0163 static LIST_HEAD(notifier_list);
0164 static DEFINE_MUTEX(list_lock);
0165
0166 static struct v4l2_async_subdev *
0167 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
0168 struct v4l2_subdev *sd)
0169 {
0170 bool (*match)(struct v4l2_async_notifier *notifier,
0171 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
0172 struct v4l2_async_subdev *asd;
0173
0174 list_for_each_entry(asd, ¬ifier->waiting, list) {
0175
0176 switch (asd->match_type) {
0177 case V4L2_ASYNC_MATCH_I2C:
0178 match = match_i2c;
0179 break;
0180 case V4L2_ASYNC_MATCH_FWNODE:
0181 match = match_fwnode;
0182 break;
0183 default:
0184
0185 WARN_ON(true);
0186 return NULL;
0187 }
0188
0189
0190 if (match(notifier, sd, asd))
0191 return asd;
0192 }
0193
0194 return NULL;
0195 }
0196
0197
0198 static bool asd_equal(struct v4l2_async_subdev *asd_x,
0199 struct v4l2_async_subdev *asd_y)
0200 {
0201 if (asd_x->match_type != asd_y->match_type)
0202 return false;
0203
0204 switch (asd_x->match_type) {
0205 case V4L2_ASYNC_MATCH_I2C:
0206 return asd_x->match.i2c.adapter_id ==
0207 asd_y->match.i2c.adapter_id &&
0208 asd_x->match.i2c.address ==
0209 asd_y->match.i2c.address;
0210 case V4L2_ASYNC_MATCH_FWNODE:
0211 return asd_x->match.fwnode == asd_y->match.fwnode;
0212 default:
0213 break;
0214 }
0215
0216 return false;
0217 }
0218
0219
0220 static struct v4l2_async_notifier *
0221 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
0222 {
0223 struct v4l2_async_notifier *n;
0224
0225 list_for_each_entry(n, ¬ifier_list, list)
0226 if (n->sd == sd)
0227 return n;
0228
0229 return NULL;
0230 }
0231
0232
0233 static struct v4l2_device *
0234 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
0235 {
0236 while (notifier->parent)
0237 notifier = notifier->parent;
0238
0239 return notifier->v4l2_dev;
0240 }
0241
0242
0243
0244
0245 static bool
0246 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
0247 {
0248 struct v4l2_subdev *sd;
0249
0250 if (!list_empty(¬ifier->waiting))
0251 return false;
0252
0253 list_for_each_entry(sd, ¬ifier->done, async_list) {
0254 struct v4l2_async_notifier *subdev_notifier =
0255 v4l2_async_find_subdev_notifier(sd);
0256
0257 if (subdev_notifier &&
0258 !v4l2_async_nf_can_complete(subdev_notifier))
0259 return false;
0260 }
0261
0262 return true;
0263 }
0264
0265
0266
0267
0268
0269 static int
0270 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
0271 {
0272
0273 if (!list_empty(¬ifier->waiting))
0274 return 0;
0275
0276
0277 while (notifier->parent)
0278 notifier = notifier->parent;
0279
0280
0281 if (!notifier->v4l2_dev)
0282 return 0;
0283
0284
0285 if (!v4l2_async_nf_can_complete(notifier))
0286 return 0;
0287
0288 return v4l2_async_nf_call_complete(notifier);
0289 }
0290
0291 static int
0292 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
0293
0294 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
0295 struct v4l2_subdev *sd)
0296 {
0297 struct media_link *link = NULL;
0298
0299 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
0300
0301 if (sd->entity.function != MEDIA_ENT_F_LENS &&
0302 sd->entity.function != MEDIA_ENT_F_FLASH)
0303 return 0;
0304
0305 link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
0306
0307 #endif
0308
0309 return IS_ERR(link) ? PTR_ERR(link) : 0;
0310 }
0311
0312 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
0313 struct v4l2_device *v4l2_dev,
0314 struct v4l2_subdev *sd,
0315 struct v4l2_async_subdev *asd)
0316 {
0317 struct v4l2_async_notifier *subdev_notifier;
0318 int ret;
0319
0320 ret = v4l2_device_register_subdev(v4l2_dev, sd);
0321 if (ret < 0)
0322 return ret;
0323
0324 ret = v4l2_async_nf_call_bound(notifier, sd, asd);
0325 if (ret < 0) {
0326 v4l2_device_unregister_subdev(sd);
0327 return ret;
0328 }
0329
0330
0331
0332
0333
0334
0335
0336 ret = v4l2_async_create_ancillary_links(notifier, sd);
0337 if (ret) {
0338 v4l2_async_nf_call_unbind(notifier, sd, asd);
0339 v4l2_device_unregister_subdev(sd);
0340 return ret;
0341 }
0342
0343
0344 list_del(&asd->list);
0345 sd->asd = asd;
0346 sd->notifier = notifier;
0347
0348
0349 list_move(&sd->async_list, ¬ifier->done);
0350
0351
0352
0353
0354 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
0355 if (!subdev_notifier || subdev_notifier->parent)
0356 return 0;
0357
0358
0359
0360
0361
0362
0363 subdev_notifier->parent = notifier;
0364
0365 return v4l2_async_nf_try_all_subdevs(subdev_notifier);
0366 }
0367
0368
0369 static int
0370 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
0371 {
0372 struct v4l2_device *v4l2_dev =
0373 v4l2_async_nf_find_v4l2_dev(notifier);
0374 struct v4l2_subdev *sd;
0375
0376 if (!v4l2_dev)
0377 return 0;
0378
0379 again:
0380 list_for_each_entry(sd, &subdev_list, async_list) {
0381 struct v4l2_async_subdev *asd;
0382 int ret;
0383
0384 asd = v4l2_async_find_match(notifier, sd);
0385 if (!asd)
0386 continue;
0387
0388 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
0389 if (ret < 0)
0390 return ret;
0391
0392
0393
0394
0395
0396
0397
0398 goto again;
0399 }
0400
0401 return 0;
0402 }
0403
0404 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
0405 {
0406 v4l2_device_unregister_subdev(sd);
0407
0408
0409
0410
0411 list_del_init(&sd->async_list);
0412 sd->asd = NULL;
0413 }
0414
0415
0416 static void
0417 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
0418 {
0419 struct v4l2_subdev *sd, *tmp;
0420
0421 list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
0422 struct v4l2_async_notifier *subdev_notifier =
0423 v4l2_async_find_subdev_notifier(sd);
0424
0425 if (subdev_notifier)
0426 v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
0427
0428 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
0429 v4l2_async_cleanup(sd);
0430
0431 list_move(&sd->async_list, &subdev_list);
0432 }
0433
0434 notifier->parent = NULL;
0435 }
0436
0437
0438 static bool
0439 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
0440 struct v4l2_async_subdev *asd)
0441 {
0442 struct v4l2_async_subdev *asd_y;
0443 struct v4l2_subdev *sd;
0444
0445 list_for_each_entry(asd_y, ¬ifier->waiting, list)
0446 if (asd_equal(asd, asd_y))
0447 return true;
0448
0449 list_for_each_entry(sd, ¬ifier->done, async_list) {
0450 if (WARN_ON(!sd->asd))
0451 continue;
0452
0453 if (asd_equal(asd, sd->asd))
0454 return true;
0455 }
0456
0457 return false;
0458 }
0459
0460
0461
0462
0463
0464
0465 static bool
0466 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
0467 struct v4l2_async_subdev *asd, int this_index)
0468 {
0469 struct v4l2_async_subdev *asd_y;
0470 int j = 0;
0471
0472 lockdep_assert_held(&list_lock);
0473
0474
0475 list_for_each_entry(asd_y, ¬ifier->asd_list, asd_list) {
0476 if (this_index >= 0 && j++ >= this_index)
0477 break;
0478 if (asd_equal(asd, asd_y))
0479 return true;
0480 }
0481
0482
0483 list_for_each_entry(notifier, ¬ifier_list, list)
0484 if (__v4l2_async_nf_has_async_subdev(notifier, asd))
0485 return true;
0486
0487 return false;
0488 }
0489
0490 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
0491 struct v4l2_async_subdev *asd,
0492 int this_index)
0493 {
0494 struct device *dev =
0495 notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
0496
0497 if (!asd)
0498 return -EINVAL;
0499
0500 switch (asd->match_type) {
0501 case V4L2_ASYNC_MATCH_I2C:
0502 case V4L2_ASYNC_MATCH_FWNODE:
0503 if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
0504 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
0505 return -EEXIST;
0506 }
0507 break;
0508 default:
0509 dev_err(dev, "Invalid match type %u on %p\n",
0510 asd->match_type, asd);
0511 return -EINVAL;
0512 }
0513
0514 return 0;
0515 }
0516
0517 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
0518 {
0519 INIT_LIST_HEAD(¬ifier->asd_list);
0520 }
0521 EXPORT_SYMBOL(v4l2_async_nf_init);
0522
0523 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
0524 {
0525 struct v4l2_async_subdev *asd;
0526 int ret, i = 0;
0527
0528 INIT_LIST_HEAD(¬ifier->waiting);
0529 INIT_LIST_HEAD(¬ifier->done);
0530
0531 mutex_lock(&list_lock);
0532
0533 list_for_each_entry(asd, ¬ifier->asd_list, asd_list) {
0534 ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
0535 if (ret)
0536 goto err_unlock;
0537
0538 list_add_tail(&asd->list, ¬ifier->waiting);
0539 }
0540
0541 ret = v4l2_async_nf_try_all_subdevs(notifier);
0542 if (ret < 0)
0543 goto err_unbind;
0544
0545 ret = v4l2_async_nf_try_complete(notifier);
0546 if (ret < 0)
0547 goto err_unbind;
0548
0549
0550 list_add(¬ifier->list, ¬ifier_list);
0551
0552 mutex_unlock(&list_lock);
0553
0554 return 0;
0555
0556 err_unbind:
0557
0558
0559
0560 v4l2_async_nf_unbind_all_subdevs(notifier);
0561
0562 err_unlock:
0563 mutex_unlock(&list_lock);
0564
0565 return ret;
0566 }
0567
0568 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
0569 struct v4l2_async_notifier *notifier)
0570 {
0571 int ret;
0572
0573 if (WARN_ON(!v4l2_dev || notifier->sd))
0574 return -EINVAL;
0575
0576 notifier->v4l2_dev = v4l2_dev;
0577
0578 ret = __v4l2_async_nf_register(notifier);
0579 if (ret)
0580 notifier->v4l2_dev = NULL;
0581
0582 return ret;
0583 }
0584 EXPORT_SYMBOL(v4l2_async_nf_register);
0585
0586 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
0587 struct v4l2_async_notifier *notifier)
0588 {
0589 int ret;
0590
0591 if (WARN_ON(!sd || notifier->v4l2_dev))
0592 return -EINVAL;
0593
0594 notifier->sd = sd;
0595
0596 ret = __v4l2_async_nf_register(notifier);
0597 if (ret)
0598 notifier->sd = NULL;
0599
0600 return ret;
0601 }
0602 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
0603
0604 static void
0605 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
0606 {
0607 if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
0608 return;
0609
0610 v4l2_async_nf_unbind_all_subdevs(notifier);
0611
0612 notifier->sd = NULL;
0613 notifier->v4l2_dev = NULL;
0614
0615 list_del(¬ifier->list);
0616 }
0617
0618 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
0619 {
0620 mutex_lock(&list_lock);
0621
0622 __v4l2_async_nf_unregister(notifier);
0623
0624 mutex_unlock(&list_lock);
0625 }
0626 EXPORT_SYMBOL(v4l2_async_nf_unregister);
0627
0628 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
0629 {
0630 struct v4l2_async_subdev *asd, *tmp;
0631
0632 if (!notifier || !notifier->asd_list.next)
0633 return;
0634
0635 list_for_each_entry_safe(asd, tmp, ¬ifier->asd_list, asd_list) {
0636 switch (asd->match_type) {
0637 case V4L2_ASYNC_MATCH_FWNODE:
0638 fwnode_handle_put(asd->match.fwnode);
0639 break;
0640 default:
0641 break;
0642 }
0643
0644 list_del(&asd->asd_list);
0645 v4l2_async_nf_call_destroy(notifier, asd);
0646 kfree(asd);
0647 }
0648 }
0649
0650 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
0651 {
0652 mutex_lock(&list_lock);
0653
0654 __v4l2_async_nf_cleanup(notifier);
0655
0656 mutex_unlock(&list_lock);
0657 }
0658 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
0659
0660 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
0661 struct v4l2_async_subdev *asd)
0662 {
0663 int ret;
0664
0665 mutex_lock(&list_lock);
0666
0667 ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
0668 if (ret)
0669 goto unlock;
0670
0671 list_add_tail(&asd->asd_list, ¬ifier->asd_list);
0672
0673 unlock:
0674 mutex_unlock(&list_lock);
0675 return ret;
0676 }
0677 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
0678
0679 struct v4l2_async_subdev *
0680 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
0681 struct fwnode_handle *fwnode,
0682 unsigned int asd_struct_size)
0683 {
0684 struct v4l2_async_subdev *asd;
0685 int ret;
0686
0687 asd = kzalloc(asd_struct_size, GFP_KERNEL);
0688 if (!asd)
0689 return ERR_PTR(-ENOMEM);
0690
0691 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
0692 asd->match.fwnode = fwnode_handle_get(fwnode);
0693
0694 ret = __v4l2_async_nf_add_subdev(notifier, asd);
0695 if (ret) {
0696 fwnode_handle_put(fwnode);
0697 kfree(asd);
0698 return ERR_PTR(ret);
0699 }
0700
0701 return asd;
0702 }
0703 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
0704
0705 struct v4l2_async_subdev *
0706 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
0707 struct fwnode_handle *endpoint,
0708 unsigned int asd_struct_size)
0709 {
0710 struct v4l2_async_subdev *asd;
0711 struct fwnode_handle *remote;
0712
0713 remote = fwnode_graph_get_remote_endpoint(endpoint);
0714 if (!remote)
0715 return ERR_PTR(-ENOTCONN);
0716
0717 asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
0718
0719
0720
0721
0722 fwnode_handle_put(remote);
0723 return asd;
0724 }
0725 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
0726
0727 struct v4l2_async_subdev *
0728 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
0729 unsigned short address, unsigned int asd_struct_size)
0730 {
0731 struct v4l2_async_subdev *asd;
0732 int ret;
0733
0734 asd = kzalloc(asd_struct_size, GFP_KERNEL);
0735 if (!asd)
0736 return ERR_PTR(-ENOMEM);
0737
0738 asd->match_type = V4L2_ASYNC_MATCH_I2C;
0739 asd->match.i2c.adapter_id = adapter_id;
0740 asd->match.i2c.address = address;
0741
0742 ret = __v4l2_async_nf_add_subdev(notifier, asd);
0743 if (ret) {
0744 kfree(asd);
0745 return ERR_PTR(ret);
0746 }
0747
0748 return asd;
0749 }
0750 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
0751
0752 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
0753 {
0754 struct v4l2_async_notifier *subdev_notifier;
0755 struct v4l2_async_notifier *notifier;
0756 int ret;
0757
0758
0759
0760
0761
0762
0763 if (!sd->fwnode && sd->dev)
0764 sd->fwnode = dev_fwnode(sd->dev);
0765
0766 mutex_lock(&list_lock);
0767
0768 INIT_LIST_HEAD(&sd->async_list);
0769
0770 list_for_each_entry(notifier, ¬ifier_list, list) {
0771 struct v4l2_device *v4l2_dev =
0772 v4l2_async_nf_find_v4l2_dev(notifier);
0773 struct v4l2_async_subdev *asd;
0774
0775 if (!v4l2_dev)
0776 continue;
0777
0778 asd = v4l2_async_find_match(notifier, sd);
0779 if (!asd)
0780 continue;
0781
0782 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
0783 if (ret)
0784 goto err_unbind;
0785
0786 ret = v4l2_async_nf_try_complete(notifier);
0787 if (ret)
0788 goto err_unbind;
0789
0790 goto out_unlock;
0791 }
0792
0793
0794 list_add(&sd->async_list, &subdev_list);
0795
0796 out_unlock:
0797 mutex_unlock(&list_lock);
0798
0799 return 0;
0800
0801 err_unbind:
0802
0803
0804
0805
0806 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
0807 if (subdev_notifier)
0808 v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
0809
0810 if (sd->asd)
0811 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
0812 v4l2_async_cleanup(sd);
0813
0814 mutex_unlock(&list_lock);
0815
0816 return ret;
0817 }
0818 EXPORT_SYMBOL(v4l2_async_register_subdev);
0819
0820 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
0821 {
0822 if (!sd->async_list.next)
0823 return;
0824
0825 mutex_lock(&list_lock);
0826
0827 __v4l2_async_nf_unregister(sd->subdev_notifier);
0828 __v4l2_async_nf_cleanup(sd->subdev_notifier);
0829 kfree(sd->subdev_notifier);
0830 sd->subdev_notifier = NULL;
0831
0832 if (sd->asd) {
0833 struct v4l2_async_notifier *notifier = sd->notifier;
0834
0835 list_add(&sd->asd->list, ¬ifier->waiting);
0836
0837 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
0838 }
0839
0840 v4l2_async_cleanup(sd);
0841
0842 mutex_unlock(&list_lock);
0843 }
0844 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
0845
0846 static void print_waiting_subdev(struct seq_file *s,
0847 struct v4l2_async_subdev *asd)
0848 {
0849 switch (asd->match_type) {
0850 case V4L2_ASYNC_MATCH_I2C:
0851 seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
0852 asd->match.i2c.address);
0853 break;
0854 case V4L2_ASYNC_MATCH_FWNODE: {
0855 struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
0856
0857 devnode = fwnode_graph_is_endpoint(fwnode) ?
0858 fwnode_graph_get_port_parent(fwnode) :
0859 fwnode_handle_get(fwnode);
0860
0861 seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
0862 devnode->dev ? dev_name(devnode->dev) : "nil",
0863 fwnode);
0864
0865 fwnode_handle_put(devnode);
0866 break;
0867 }
0868 }
0869 }
0870
0871 static const char *
0872 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
0873 {
0874 if (notifier->v4l2_dev)
0875 return notifier->v4l2_dev->name;
0876 else if (notifier->sd)
0877 return notifier->sd->name;
0878 else
0879 return "nil";
0880 }
0881
0882 static int pending_subdevs_show(struct seq_file *s, void *data)
0883 {
0884 struct v4l2_async_notifier *notif;
0885 struct v4l2_async_subdev *asd;
0886
0887 mutex_lock(&list_lock);
0888
0889 list_for_each_entry(notif, ¬ifier_list, list) {
0890 seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
0891 list_for_each_entry(asd, ¬if->waiting, list)
0892 print_waiting_subdev(s, asd);
0893 }
0894
0895 mutex_unlock(&list_lock);
0896
0897 return 0;
0898 }
0899 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
0900
0901 static struct dentry *v4l2_async_debugfs_dir;
0902
0903 static int __init v4l2_async_init(void)
0904 {
0905 v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
0906 debugfs_create_file("pending_async_subdevices", 0444,
0907 v4l2_async_debugfs_dir, NULL,
0908 &pending_subdevs_fops);
0909
0910 return 0;
0911 }
0912
0913 static void __exit v4l2_async_exit(void)
0914 {
0915 debugfs_remove_recursive(v4l2_async_debugfs_dir);
0916 }
0917
0918 subsys_initcall(v4l2_async_init);
0919 module_exit(v4l2_async_exit);
0920
0921 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
0922 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
0923 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
0924 MODULE_LICENSE("GPL");