0001
0002
0003
0004
0005
0006
0007 #include <linux/debugfs.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/host1x.h>
0010 #include <linux/of.h>
0011 #include <linux/seq_file.h>
0012 #include <linux/slab.h>
0013 #include <linux/of_device.h>
0014
0015 #include "bus.h"
0016 #include "dev.h"
0017
0018 static DEFINE_MUTEX(clients_lock);
0019 static LIST_HEAD(clients);
0020
0021 static DEFINE_MUTEX(drivers_lock);
0022 static LIST_HEAD(drivers);
0023
0024 static DEFINE_MUTEX(devices_lock);
0025 static LIST_HEAD(devices);
0026
0027 struct host1x_subdev {
0028 struct host1x_client *client;
0029 struct device_node *np;
0030 struct list_head list;
0031 };
0032
0033
0034
0035
0036
0037
0038
0039 static int host1x_subdev_add(struct host1x_device *device,
0040 struct host1x_driver *driver,
0041 struct device_node *np)
0042 {
0043 struct host1x_subdev *subdev;
0044 struct device_node *child;
0045 int err;
0046
0047 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
0048 if (!subdev)
0049 return -ENOMEM;
0050
0051 INIT_LIST_HEAD(&subdev->list);
0052 subdev->np = of_node_get(np);
0053
0054 mutex_lock(&device->subdevs_lock);
0055 list_add_tail(&subdev->list, &device->subdevs);
0056 mutex_unlock(&device->subdevs_lock);
0057
0058
0059 for_each_child_of_node(np, child) {
0060 if (of_match_node(driver->subdevs, child) &&
0061 of_device_is_available(child)) {
0062 err = host1x_subdev_add(device, driver, child);
0063 if (err < 0) {
0064
0065 of_node_put(child);
0066 return err;
0067 }
0068 }
0069 }
0070
0071 return 0;
0072 }
0073
0074
0075
0076
0077
0078 static void host1x_subdev_del(struct host1x_subdev *subdev)
0079 {
0080 list_del(&subdev->list);
0081 of_node_put(subdev->np);
0082 kfree(subdev);
0083 }
0084
0085
0086
0087
0088
0089
0090 static int host1x_device_parse_dt(struct host1x_device *device,
0091 struct host1x_driver *driver)
0092 {
0093 struct device_node *np;
0094 int err;
0095
0096 for_each_child_of_node(device->dev.parent->of_node, np) {
0097 if (of_match_node(driver->subdevs, np) &&
0098 of_device_is_available(np)) {
0099 err = host1x_subdev_add(device, driver, np);
0100 if (err < 0) {
0101 of_node_put(np);
0102 return err;
0103 }
0104 }
0105 }
0106
0107 return 0;
0108 }
0109
0110 static void host1x_subdev_register(struct host1x_device *device,
0111 struct host1x_subdev *subdev,
0112 struct host1x_client *client)
0113 {
0114 int err;
0115
0116
0117
0118
0119
0120
0121 mutex_lock(&device->subdevs_lock);
0122 mutex_lock(&device->clients_lock);
0123 list_move_tail(&client->list, &device->clients);
0124 list_move_tail(&subdev->list, &device->active);
0125 client->host = &device->dev;
0126 subdev->client = client;
0127 mutex_unlock(&device->clients_lock);
0128 mutex_unlock(&device->subdevs_lock);
0129
0130 if (list_empty(&device->subdevs)) {
0131 err = device_add(&device->dev);
0132 if (err < 0)
0133 dev_err(&device->dev, "failed to add: %d\n", err);
0134 else
0135 device->registered = true;
0136 }
0137 }
0138
0139 static void __host1x_subdev_unregister(struct host1x_device *device,
0140 struct host1x_subdev *subdev)
0141 {
0142 struct host1x_client *client = subdev->client;
0143
0144
0145
0146
0147
0148 if (list_empty(&device->subdevs)) {
0149 if (device->registered) {
0150 device->registered = false;
0151 device_del(&device->dev);
0152 }
0153 }
0154
0155
0156
0157
0158
0159 mutex_lock(&device->clients_lock);
0160 subdev->client = NULL;
0161 client->host = NULL;
0162 list_move_tail(&subdev->list, &device->subdevs);
0163
0164
0165
0166
0167
0168
0169
0170
0171 list_del_init(&client->list);
0172 mutex_unlock(&device->clients_lock);
0173 }
0174
0175 static void host1x_subdev_unregister(struct host1x_device *device,
0176 struct host1x_subdev *subdev)
0177 {
0178 mutex_lock(&device->subdevs_lock);
0179 __host1x_subdev_unregister(device, subdev);
0180 mutex_unlock(&device->subdevs_lock);
0181 }
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 int host1x_device_init(struct host1x_device *device)
0194 {
0195 struct host1x_client *client;
0196 int err;
0197
0198 mutex_lock(&device->clients_lock);
0199
0200 list_for_each_entry(client, &device->clients, list) {
0201 if (client->ops && client->ops->early_init) {
0202 err = client->ops->early_init(client);
0203 if (err < 0) {
0204 dev_err(&device->dev, "failed to early initialize %s: %d\n",
0205 dev_name(client->dev), err);
0206 goto teardown_late;
0207 }
0208 }
0209 }
0210
0211 list_for_each_entry(client, &device->clients, list) {
0212 if (client->ops && client->ops->init) {
0213 err = client->ops->init(client);
0214 if (err < 0) {
0215 dev_err(&device->dev,
0216 "failed to initialize %s: %d\n",
0217 dev_name(client->dev), err);
0218 goto teardown;
0219 }
0220 }
0221 }
0222
0223 mutex_unlock(&device->clients_lock);
0224
0225 return 0;
0226
0227 teardown:
0228 list_for_each_entry_continue_reverse(client, &device->clients, list)
0229 if (client->ops->exit)
0230 client->ops->exit(client);
0231
0232
0233 client = list_entry(&device->clients, struct host1x_client, list);
0234
0235 teardown_late:
0236 list_for_each_entry_continue_reverse(client, &device->clients, list)
0237 if (client->ops->late_exit)
0238 client->ops->late_exit(client);
0239
0240 mutex_unlock(&device->clients_lock);
0241 return err;
0242 }
0243 EXPORT_SYMBOL(host1x_device_init);
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 int host1x_device_exit(struct host1x_device *device)
0255 {
0256 struct host1x_client *client;
0257 int err;
0258
0259 mutex_lock(&device->clients_lock);
0260
0261 list_for_each_entry_reverse(client, &device->clients, list) {
0262 if (client->ops && client->ops->exit) {
0263 err = client->ops->exit(client);
0264 if (err < 0) {
0265 dev_err(&device->dev,
0266 "failed to cleanup %s: %d\n",
0267 dev_name(client->dev), err);
0268 mutex_unlock(&device->clients_lock);
0269 return err;
0270 }
0271 }
0272 }
0273
0274 list_for_each_entry_reverse(client, &device->clients, list) {
0275 if (client->ops && client->ops->late_exit) {
0276 err = client->ops->late_exit(client);
0277 if (err < 0) {
0278 dev_err(&device->dev, "failed to late cleanup %s: %d\n",
0279 dev_name(client->dev), err);
0280 mutex_unlock(&device->clients_lock);
0281 return err;
0282 }
0283 }
0284 }
0285
0286 mutex_unlock(&device->clients_lock);
0287
0288 return 0;
0289 }
0290 EXPORT_SYMBOL(host1x_device_exit);
0291
0292 static int host1x_add_client(struct host1x *host1x,
0293 struct host1x_client *client)
0294 {
0295 struct host1x_device *device;
0296 struct host1x_subdev *subdev;
0297
0298 mutex_lock(&host1x->devices_lock);
0299
0300 list_for_each_entry(device, &host1x->devices, list) {
0301 list_for_each_entry(subdev, &device->subdevs, list) {
0302 if (subdev->np == client->dev->of_node) {
0303 host1x_subdev_register(device, subdev, client);
0304 mutex_unlock(&host1x->devices_lock);
0305 return 0;
0306 }
0307 }
0308 }
0309
0310 mutex_unlock(&host1x->devices_lock);
0311 return -ENODEV;
0312 }
0313
0314 static int host1x_del_client(struct host1x *host1x,
0315 struct host1x_client *client)
0316 {
0317 struct host1x_device *device, *dt;
0318 struct host1x_subdev *subdev;
0319
0320 mutex_lock(&host1x->devices_lock);
0321
0322 list_for_each_entry_safe(device, dt, &host1x->devices, list) {
0323 list_for_each_entry(subdev, &device->active, list) {
0324 if (subdev->client == client) {
0325 host1x_subdev_unregister(device, subdev);
0326 mutex_unlock(&host1x->devices_lock);
0327 return 0;
0328 }
0329 }
0330 }
0331
0332 mutex_unlock(&host1x->devices_lock);
0333 return -ENODEV;
0334 }
0335
0336 static int host1x_device_match(struct device *dev, struct device_driver *drv)
0337 {
0338 return strcmp(dev_name(dev), drv->name) == 0;
0339 }
0340
0341 static int host1x_device_uevent(struct device *dev,
0342 struct kobj_uevent_env *env)
0343 {
0344 struct device_node *np = dev->parent->of_node;
0345 unsigned int count = 0;
0346 struct property *p;
0347 const char *compat;
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 add_uevent_var(env, "OF_NAME=%pOFn", np);
0359 add_uevent_var(env, "OF_FULLNAME=%pOF", np);
0360
0361 of_property_for_each_string(np, "compatible", p, compat) {
0362 add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat);
0363 count++;
0364 }
0365
0366 add_uevent_var(env, "OF_COMPATIBLE_N=%u", count);
0367
0368 return 0;
0369 }
0370
0371 static int host1x_dma_configure(struct device *dev)
0372 {
0373 return of_dma_configure(dev, dev->of_node, true);
0374 }
0375
0376 static const struct dev_pm_ops host1x_device_pm_ops = {
0377 .suspend = pm_generic_suspend,
0378 .resume = pm_generic_resume,
0379 .freeze = pm_generic_freeze,
0380 .thaw = pm_generic_thaw,
0381 .poweroff = pm_generic_poweroff,
0382 .restore = pm_generic_restore,
0383 };
0384
0385 struct bus_type host1x_bus_type = {
0386 .name = "host1x",
0387 .match = host1x_device_match,
0388 .uevent = host1x_device_uevent,
0389 .dma_configure = host1x_dma_configure,
0390 .pm = &host1x_device_pm_ops,
0391 };
0392
0393 static void __host1x_device_del(struct host1x_device *device)
0394 {
0395 struct host1x_subdev *subdev, *sd;
0396 struct host1x_client *client, *cl;
0397
0398 mutex_lock(&device->subdevs_lock);
0399
0400
0401 list_for_each_entry_safe(subdev, sd, &device->active, list) {
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 client = subdev->client;
0412
0413 __host1x_subdev_unregister(device, subdev);
0414
0415
0416 mutex_lock(&clients_lock);
0417 list_add_tail(&client->list, &clients);
0418 mutex_unlock(&clients_lock);
0419 }
0420
0421
0422 list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
0423 host1x_subdev_del(subdev);
0424
0425 mutex_unlock(&device->subdevs_lock);
0426
0427
0428 mutex_lock(&clients_lock);
0429 mutex_lock(&device->clients_lock);
0430
0431 list_for_each_entry_safe(client, cl, &device->clients, list)
0432 list_move_tail(&client->list, &clients);
0433
0434 mutex_unlock(&device->clients_lock);
0435 mutex_unlock(&clients_lock);
0436
0437
0438 list_del_init(&device->list);
0439 }
0440
0441 static void host1x_device_release(struct device *dev)
0442 {
0443 struct host1x_device *device = to_host1x_device(dev);
0444
0445 __host1x_device_del(device);
0446 kfree(device);
0447 }
0448
0449 static int host1x_device_add(struct host1x *host1x,
0450 struct host1x_driver *driver)
0451 {
0452 struct host1x_client *client, *tmp;
0453 struct host1x_subdev *subdev;
0454 struct host1x_device *device;
0455 int err;
0456
0457 device = kzalloc(sizeof(*device), GFP_KERNEL);
0458 if (!device)
0459 return -ENOMEM;
0460
0461 device_initialize(&device->dev);
0462
0463 mutex_init(&device->subdevs_lock);
0464 INIT_LIST_HEAD(&device->subdevs);
0465 INIT_LIST_HEAD(&device->active);
0466 mutex_init(&device->clients_lock);
0467 INIT_LIST_HEAD(&device->clients);
0468 INIT_LIST_HEAD(&device->list);
0469 device->driver = driver;
0470
0471 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
0472 device->dev.dma_mask = &device->dev.coherent_dma_mask;
0473 dev_set_name(&device->dev, "%s", driver->driver.name);
0474 device->dev.release = host1x_device_release;
0475 device->dev.bus = &host1x_bus_type;
0476 device->dev.parent = host1x->dev;
0477
0478 of_dma_configure(&device->dev, host1x->dev->of_node, true);
0479
0480 device->dev.dma_parms = &device->dma_parms;
0481 dma_set_max_seg_size(&device->dev, UINT_MAX);
0482
0483 err = host1x_device_parse_dt(device, driver);
0484 if (err < 0) {
0485 kfree(device);
0486 return err;
0487 }
0488
0489 list_add_tail(&device->list, &host1x->devices);
0490
0491 mutex_lock(&clients_lock);
0492
0493 list_for_each_entry_safe(client, tmp, &clients, list) {
0494 list_for_each_entry(subdev, &device->subdevs, list) {
0495 if (subdev->np == client->dev->of_node) {
0496 host1x_subdev_register(device, subdev, client);
0497 break;
0498 }
0499 }
0500 }
0501
0502 mutex_unlock(&clients_lock);
0503
0504 return 0;
0505 }
0506
0507
0508
0509
0510
0511
0512
0513 static void host1x_device_del(struct host1x *host1x,
0514 struct host1x_device *device)
0515 {
0516 if (device->registered) {
0517 device->registered = false;
0518 device_del(&device->dev);
0519 }
0520
0521 put_device(&device->dev);
0522 }
0523
0524 static void host1x_attach_driver(struct host1x *host1x,
0525 struct host1x_driver *driver)
0526 {
0527 struct host1x_device *device;
0528 int err;
0529
0530 mutex_lock(&host1x->devices_lock);
0531
0532 list_for_each_entry(device, &host1x->devices, list) {
0533 if (device->driver == driver) {
0534 mutex_unlock(&host1x->devices_lock);
0535 return;
0536 }
0537 }
0538
0539 err = host1x_device_add(host1x, driver);
0540 if (err < 0)
0541 dev_err(host1x->dev, "failed to allocate device: %d\n", err);
0542
0543 mutex_unlock(&host1x->devices_lock);
0544 }
0545
0546 static void host1x_detach_driver(struct host1x *host1x,
0547 struct host1x_driver *driver)
0548 {
0549 struct host1x_device *device, *tmp;
0550
0551 mutex_lock(&host1x->devices_lock);
0552
0553 list_for_each_entry_safe(device, tmp, &host1x->devices, list)
0554 if (device->driver == driver)
0555 host1x_device_del(host1x, device);
0556
0557 mutex_unlock(&host1x->devices_lock);
0558 }
0559
0560 static int host1x_devices_show(struct seq_file *s, void *data)
0561 {
0562 struct host1x *host1x = s->private;
0563 struct host1x_device *device;
0564
0565 mutex_lock(&host1x->devices_lock);
0566
0567 list_for_each_entry(device, &host1x->devices, list) {
0568 struct host1x_subdev *subdev;
0569
0570 seq_printf(s, "%s\n", dev_name(&device->dev));
0571
0572 mutex_lock(&device->subdevs_lock);
0573
0574 list_for_each_entry(subdev, &device->active, list)
0575 seq_printf(s, " %pOFf: %s\n", subdev->np,
0576 dev_name(subdev->client->dev));
0577
0578 list_for_each_entry(subdev, &device->subdevs, list)
0579 seq_printf(s, " %pOFf:\n", subdev->np);
0580
0581 mutex_unlock(&device->subdevs_lock);
0582 }
0583
0584 mutex_unlock(&host1x->devices_lock);
0585
0586 return 0;
0587 }
0588 DEFINE_SHOW_ATTRIBUTE(host1x_devices);
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598 int host1x_register(struct host1x *host1x)
0599 {
0600 struct host1x_driver *driver;
0601
0602 mutex_lock(&devices_lock);
0603 list_add_tail(&host1x->list, &devices);
0604 mutex_unlock(&devices_lock);
0605
0606 mutex_lock(&drivers_lock);
0607
0608 list_for_each_entry(driver, &drivers, list)
0609 host1x_attach_driver(host1x, driver);
0610
0611 mutex_unlock(&drivers_lock);
0612
0613 debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x,
0614 &host1x_devices_fops);
0615
0616 return 0;
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626 int host1x_unregister(struct host1x *host1x)
0627 {
0628 struct host1x_driver *driver;
0629
0630 mutex_lock(&drivers_lock);
0631
0632 list_for_each_entry(driver, &drivers, list)
0633 host1x_detach_driver(host1x, driver);
0634
0635 mutex_unlock(&drivers_lock);
0636
0637 mutex_lock(&devices_lock);
0638 list_del_init(&host1x->list);
0639 mutex_unlock(&devices_lock);
0640
0641 return 0;
0642 }
0643
0644 static int host1x_device_probe(struct device *dev)
0645 {
0646 struct host1x_driver *driver = to_host1x_driver(dev->driver);
0647 struct host1x_device *device = to_host1x_device(dev);
0648
0649 if (driver->probe)
0650 return driver->probe(device);
0651
0652 return 0;
0653 }
0654
0655 static int host1x_device_remove(struct device *dev)
0656 {
0657 struct host1x_driver *driver = to_host1x_driver(dev->driver);
0658 struct host1x_device *device = to_host1x_device(dev);
0659
0660 if (driver->remove)
0661 return driver->remove(device);
0662
0663 return 0;
0664 }
0665
0666 static void host1x_device_shutdown(struct device *dev)
0667 {
0668 struct host1x_driver *driver = to_host1x_driver(dev->driver);
0669 struct host1x_device *device = to_host1x_device(dev);
0670
0671 if (driver->shutdown)
0672 driver->shutdown(device);
0673 }
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 int host1x_driver_register_full(struct host1x_driver *driver,
0686 struct module *owner)
0687 {
0688 struct host1x *host1x;
0689
0690 INIT_LIST_HEAD(&driver->list);
0691
0692 mutex_lock(&drivers_lock);
0693 list_add_tail(&driver->list, &drivers);
0694 mutex_unlock(&drivers_lock);
0695
0696 mutex_lock(&devices_lock);
0697
0698 list_for_each_entry(host1x, &devices, list)
0699 host1x_attach_driver(host1x, driver);
0700
0701 mutex_unlock(&devices_lock);
0702
0703 driver->driver.bus = &host1x_bus_type;
0704 driver->driver.owner = owner;
0705 driver->driver.probe = host1x_device_probe;
0706 driver->driver.remove = host1x_device_remove;
0707 driver->driver.shutdown = host1x_device_shutdown;
0708
0709 return driver_register(&driver->driver);
0710 }
0711 EXPORT_SYMBOL(host1x_driver_register_full);
0712
0713
0714
0715
0716
0717
0718
0719
0720 void host1x_driver_unregister(struct host1x_driver *driver)
0721 {
0722 struct host1x *host1x;
0723
0724 driver_unregister(&driver->driver);
0725
0726 mutex_lock(&devices_lock);
0727
0728 list_for_each_entry(host1x, &devices, list)
0729 host1x_detach_driver(host1x, driver);
0730
0731 mutex_unlock(&devices_lock);
0732
0733 mutex_lock(&drivers_lock);
0734 list_del_init(&driver->list);
0735 mutex_unlock(&drivers_lock);
0736 }
0737 EXPORT_SYMBOL(host1x_driver_unregister);
0738
0739
0740
0741
0742
0743
0744 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
0745 {
0746 host1x_bo_cache_init(&client->cache);
0747 INIT_LIST_HEAD(&client->list);
0748 __mutex_init(&client->lock, "host1x client lock", key);
0749 client->usecount = 0;
0750 }
0751 EXPORT_SYMBOL(__host1x_client_init);
0752
0753
0754
0755
0756
0757 void host1x_client_exit(struct host1x_client *client)
0758 {
0759 mutex_destroy(&client->lock);
0760 }
0761 EXPORT_SYMBOL(host1x_client_exit);
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 int __host1x_client_register(struct host1x_client *client)
0775 {
0776 struct host1x *host1x;
0777 int err;
0778
0779 mutex_lock(&devices_lock);
0780
0781 list_for_each_entry(host1x, &devices, list) {
0782 err = host1x_add_client(host1x, client);
0783 if (!err) {
0784 mutex_unlock(&devices_lock);
0785 return 0;
0786 }
0787 }
0788
0789 mutex_unlock(&devices_lock);
0790
0791 mutex_lock(&clients_lock);
0792 list_add_tail(&client->list, &clients);
0793 mutex_unlock(&clients_lock);
0794
0795 return 0;
0796 }
0797 EXPORT_SYMBOL(__host1x_client_register);
0798
0799
0800
0801
0802
0803
0804
0805
0806 int host1x_client_unregister(struct host1x_client *client)
0807 {
0808 struct host1x_client *c;
0809 struct host1x *host1x;
0810 int err;
0811
0812 mutex_lock(&devices_lock);
0813
0814 list_for_each_entry(host1x, &devices, list) {
0815 err = host1x_del_client(host1x, client);
0816 if (!err) {
0817 mutex_unlock(&devices_lock);
0818 return 0;
0819 }
0820 }
0821
0822 mutex_unlock(&devices_lock);
0823 mutex_lock(&clients_lock);
0824
0825 list_for_each_entry(c, &clients, list) {
0826 if (c == client) {
0827 list_del_init(&c->list);
0828 break;
0829 }
0830 }
0831
0832 mutex_unlock(&clients_lock);
0833
0834 host1x_bo_cache_destroy(&client->cache);
0835
0836 return 0;
0837 }
0838 EXPORT_SYMBOL(host1x_client_unregister);
0839
0840 int host1x_client_suspend(struct host1x_client *client)
0841 {
0842 int err = 0;
0843
0844 mutex_lock(&client->lock);
0845
0846 if (client->usecount == 1) {
0847 if (client->ops && client->ops->suspend) {
0848 err = client->ops->suspend(client);
0849 if (err < 0)
0850 goto unlock;
0851 }
0852 }
0853
0854 client->usecount--;
0855 dev_dbg(client->dev, "use count: %u\n", client->usecount);
0856
0857 if (client->parent) {
0858 err = host1x_client_suspend(client->parent);
0859 if (err < 0)
0860 goto resume;
0861 }
0862
0863 goto unlock;
0864
0865 resume:
0866 if (client->usecount == 0)
0867 if (client->ops && client->ops->resume)
0868 client->ops->resume(client);
0869
0870 client->usecount++;
0871 unlock:
0872 mutex_unlock(&client->lock);
0873 return err;
0874 }
0875 EXPORT_SYMBOL(host1x_client_suspend);
0876
0877 int host1x_client_resume(struct host1x_client *client)
0878 {
0879 int err = 0;
0880
0881 mutex_lock(&client->lock);
0882
0883 if (client->parent) {
0884 err = host1x_client_resume(client->parent);
0885 if (err < 0)
0886 goto unlock;
0887 }
0888
0889 if (client->usecount == 0) {
0890 if (client->ops && client->ops->resume) {
0891 err = client->ops->resume(client);
0892 if (err < 0)
0893 goto suspend;
0894 }
0895 }
0896
0897 client->usecount++;
0898 dev_dbg(client->dev, "use count: %u\n", client->usecount);
0899
0900 goto unlock;
0901
0902 suspend:
0903 if (client->parent)
0904 host1x_client_suspend(client->parent);
0905 unlock:
0906 mutex_unlock(&client->lock);
0907 return err;
0908 }
0909 EXPORT_SYMBOL(host1x_client_resume);
0910
0911 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
0912 enum dma_data_direction dir,
0913 struct host1x_bo_cache *cache)
0914 {
0915 struct host1x_bo_mapping *mapping;
0916
0917 if (cache) {
0918 mutex_lock(&cache->lock);
0919
0920 list_for_each_entry(mapping, &cache->mappings, entry) {
0921 if (mapping->bo == bo && mapping->direction == dir) {
0922 kref_get(&mapping->ref);
0923 goto unlock;
0924 }
0925 }
0926 }
0927
0928 mapping = bo->ops->pin(dev, bo, dir);
0929 if (IS_ERR(mapping))
0930 goto unlock;
0931
0932 spin_lock(&mapping->bo->lock);
0933 list_add_tail(&mapping->list, &bo->mappings);
0934 spin_unlock(&mapping->bo->lock);
0935
0936 if (cache) {
0937 INIT_LIST_HEAD(&mapping->entry);
0938 mapping->cache = cache;
0939
0940 list_add_tail(&mapping->entry, &cache->mappings);
0941
0942
0943 kref_get(&mapping->ref);
0944 }
0945
0946 unlock:
0947 if (cache)
0948 mutex_unlock(&cache->lock);
0949
0950 return mapping;
0951 }
0952 EXPORT_SYMBOL(host1x_bo_pin);
0953
0954 static void __host1x_bo_unpin(struct kref *ref)
0955 {
0956 struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
0957
0958
0959
0960
0961
0962 if (mapping->cache)
0963 list_del(&mapping->entry);
0964
0965 spin_lock(&mapping->bo->lock);
0966 list_del(&mapping->list);
0967 spin_unlock(&mapping->bo->lock);
0968
0969 mapping->bo->ops->unpin(mapping);
0970 }
0971
0972 void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
0973 {
0974 struct host1x_bo_cache *cache = mapping->cache;
0975
0976 if (cache)
0977 mutex_lock(&cache->lock);
0978
0979 kref_put(&mapping->ref, __host1x_bo_unpin);
0980
0981 if (cache)
0982 mutex_unlock(&cache->lock);
0983 }
0984 EXPORT_SYMBOL(host1x_bo_unpin);