0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/export.h>
0029 #include <linux/rfkill.h>
0030 #include <linux/debugfs.h>
0031 #include <linux/crypto.h>
0032 #include <linux/kcov.h>
0033 #include <linux/property.h>
0034 #include <linux/suspend.h>
0035 #include <linux/wait.h>
0036 #include <asm/unaligned.h>
0037
0038 #include <net/bluetooth/bluetooth.h>
0039 #include <net/bluetooth/hci_core.h>
0040 #include <net/bluetooth/l2cap.h>
0041 #include <net/bluetooth/mgmt.h>
0042
0043 #include "hci_request.h"
0044 #include "hci_debugfs.h"
0045 #include "smp.h"
0046 #include "leds.h"
0047 #include "msft.h"
0048 #include "aosp.h"
0049 #include "hci_codec.h"
0050
0051 static void hci_rx_work(struct work_struct *work);
0052 static void hci_cmd_work(struct work_struct *work);
0053 static void hci_tx_work(struct work_struct *work);
0054
0055
0056 LIST_HEAD(hci_dev_list);
0057 DEFINE_RWLOCK(hci_dev_list_lock);
0058
0059
0060 LIST_HEAD(hci_cb_list);
0061 DEFINE_MUTEX(hci_cb_list_lock);
0062
0063
0064 static DEFINE_IDA(hci_index_ida);
0065
0066 static int hci_scan_req(struct hci_request *req, unsigned long opt)
0067 {
0068 __u8 scan = opt;
0069
0070 BT_DBG("%s %x", req->hdev->name, scan);
0071
0072
0073 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
0074 return 0;
0075 }
0076
0077 static int hci_auth_req(struct hci_request *req, unsigned long opt)
0078 {
0079 __u8 auth = opt;
0080
0081 BT_DBG("%s %x", req->hdev->name, auth);
0082
0083
0084 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
0085 return 0;
0086 }
0087
0088 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
0089 {
0090 __u8 encrypt = opt;
0091
0092 BT_DBG("%s %x", req->hdev->name, encrypt);
0093
0094
0095 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
0096 return 0;
0097 }
0098
0099 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
0100 {
0101 __le16 policy = cpu_to_le16(opt);
0102
0103 BT_DBG("%s %x", req->hdev->name, policy);
0104
0105
0106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
0107 return 0;
0108 }
0109
0110
0111
0112 struct hci_dev *hci_dev_get(int index)
0113 {
0114 struct hci_dev *hdev = NULL, *d;
0115
0116 BT_DBG("%d", index);
0117
0118 if (index < 0)
0119 return NULL;
0120
0121 read_lock(&hci_dev_list_lock);
0122 list_for_each_entry(d, &hci_dev_list, list) {
0123 if (d->id == index) {
0124 hdev = hci_dev_hold(d);
0125 break;
0126 }
0127 }
0128 read_unlock(&hci_dev_list_lock);
0129 return hdev;
0130 }
0131
0132
0133
0134 bool hci_discovery_active(struct hci_dev *hdev)
0135 {
0136 struct discovery_state *discov = &hdev->discovery;
0137
0138 switch (discov->state) {
0139 case DISCOVERY_FINDING:
0140 case DISCOVERY_RESOLVING:
0141 return true;
0142
0143 default:
0144 return false;
0145 }
0146 }
0147
0148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
0149 {
0150 int old_state = hdev->discovery.state;
0151
0152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
0153
0154 if (old_state == state)
0155 return;
0156
0157 hdev->discovery.state = state;
0158
0159 switch (state) {
0160 case DISCOVERY_STOPPED:
0161 hci_update_passive_scan(hdev);
0162
0163 if (old_state != DISCOVERY_STARTING)
0164 mgmt_discovering(hdev, 0);
0165 break;
0166 case DISCOVERY_STARTING:
0167 break;
0168 case DISCOVERY_FINDING:
0169 mgmt_discovering(hdev, 1);
0170 break;
0171 case DISCOVERY_RESOLVING:
0172 break;
0173 case DISCOVERY_STOPPING:
0174 break;
0175 }
0176 }
0177
0178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
0179 {
0180 struct discovery_state *cache = &hdev->discovery;
0181 struct inquiry_entry *p, *n;
0182
0183 list_for_each_entry_safe(p, n, &cache->all, all) {
0184 list_del(&p->all);
0185 kfree(p);
0186 }
0187
0188 INIT_LIST_HEAD(&cache->unknown);
0189 INIT_LIST_HEAD(&cache->resolve);
0190 }
0191
0192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
0193 bdaddr_t *bdaddr)
0194 {
0195 struct discovery_state *cache = &hdev->discovery;
0196 struct inquiry_entry *e;
0197
0198 BT_DBG("cache %p, %pMR", cache, bdaddr);
0199
0200 list_for_each_entry(e, &cache->all, all) {
0201 if (!bacmp(&e->data.bdaddr, bdaddr))
0202 return e;
0203 }
0204
0205 return NULL;
0206 }
0207
0208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
0209 bdaddr_t *bdaddr)
0210 {
0211 struct discovery_state *cache = &hdev->discovery;
0212 struct inquiry_entry *e;
0213
0214 BT_DBG("cache %p, %pMR", cache, bdaddr);
0215
0216 list_for_each_entry(e, &cache->unknown, list) {
0217 if (!bacmp(&e->data.bdaddr, bdaddr))
0218 return e;
0219 }
0220
0221 return NULL;
0222 }
0223
0224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
0225 bdaddr_t *bdaddr,
0226 int state)
0227 {
0228 struct discovery_state *cache = &hdev->discovery;
0229 struct inquiry_entry *e;
0230
0231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
0232
0233 list_for_each_entry(e, &cache->resolve, list) {
0234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
0235 return e;
0236 if (!bacmp(&e->data.bdaddr, bdaddr))
0237 return e;
0238 }
0239
0240 return NULL;
0241 }
0242
0243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
0244 struct inquiry_entry *ie)
0245 {
0246 struct discovery_state *cache = &hdev->discovery;
0247 struct list_head *pos = &cache->resolve;
0248 struct inquiry_entry *p;
0249
0250 list_del(&ie->list);
0251
0252 list_for_each_entry(p, &cache->resolve, list) {
0253 if (p->name_state != NAME_PENDING &&
0254 abs(p->data.rssi) >= abs(ie->data.rssi))
0255 break;
0256 pos = &p->list;
0257 }
0258
0259 list_add(&ie->list, pos);
0260 }
0261
0262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
0263 bool name_known)
0264 {
0265 struct discovery_state *cache = &hdev->discovery;
0266 struct inquiry_entry *ie;
0267 u32 flags = 0;
0268
0269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
0270
0271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
0272
0273 if (!data->ssp_mode)
0274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
0275
0276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
0277 if (ie) {
0278 if (!ie->data.ssp_mode)
0279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
0280
0281 if (ie->name_state == NAME_NEEDED &&
0282 data->rssi != ie->data.rssi) {
0283 ie->data.rssi = data->rssi;
0284 hci_inquiry_cache_update_resolve(hdev, ie);
0285 }
0286
0287 goto update;
0288 }
0289
0290
0291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
0292 if (!ie) {
0293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
0294 goto done;
0295 }
0296
0297 list_add(&ie->all, &cache->all);
0298
0299 if (name_known) {
0300 ie->name_state = NAME_KNOWN;
0301 } else {
0302 ie->name_state = NAME_NOT_KNOWN;
0303 list_add(&ie->list, &cache->unknown);
0304 }
0305
0306 update:
0307 if (name_known && ie->name_state != NAME_KNOWN &&
0308 ie->name_state != NAME_PENDING) {
0309 ie->name_state = NAME_KNOWN;
0310 list_del(&ie->list);
0311 }
0312
0313 memcpy(&ie->data, data, sizeof(*data));
0314 ie->timestamp = jiffies;
0315 cache->timestamp = jiffies;
0316
0317 if (ie->name_state == NAME_NOT_KNOWN)
0318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
0319
0320 done:
0321 return flags;
0322 }
0323
0324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
0325 {
0326 struct discovery_state *cache = &hdev->discovery;
0327 struct inquiry_info *info = (struct inquiry_info *) buf;
0328 struct inquiry_entry *e;
0329 int copied = 0;
0330
0331 list_for_each_entry(e, &cache->all, all) {
0332 struct inquiry_data *data = &e->data;
0333
0334 if (copied >= num)
0335 break;
0336
0337 bacpy(&info->bdaddr, &data->bdaddr);
0338 info->pscan_rep_mode = data->pscan_rep_mode;
0339 info->pscan_period_mode = data->pscan_period_mode;
0340 info->pscan_mode = data->pscan_mode;
0341 memcpy(info->dev_class, data->dev_class, 3);
0342 info->clock_offset = data->clock_offset;
0343
0344 info++;
0345 copied++;
0346 }
0347
0348 BT_DBG("cache %p, copied %d", cache, copied);
0349 return copied;
0350 }
0351
0352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
0353 {
0354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
0355 struct hci_dev *hdev = req->hdev;
0356 struct hci_cp_inquiry cp;
0357
0358 BT_DBG("%s", hdev->name);
0359
0360 if (test_bit(HCI_INQUIRY, &hdev->flags))
0361 return 0;
0362
0363
0364 memcpy(&cp.lap, &ir->lap, 3);
0365 cp.length = ir->length;
0366 cp.num_rsp = ir->num_rsp;
0367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
0368
0369 return 0;
0370 }
0371
0372 int hci_inquiry(void __user *arg)
0373 {
0374 __u8 __user *ptr = arg;
0375 struct hci_inquiry_req ir;
0376 struct hci_dev *hdev;
0377 int err = 0, do_inquiry = 0, max_rsp;
0378 long timeo;
0379 __u8 *buf;
0380
0381 if (copy_from_user(&ir, ptr, sizeof(ir)))
0382 return -EFAULT;
0383
0384 hdev = hci_dev_get(ir.dev_id);
0385 if (!hdev)
0386 return -ENODEV;
0387
0388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0389 err = -EBUSY;
0390 goto done;
0391 }
0392
0393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
0394 err = -EOPNOTSUPP;
0395 goto done;
0396 }
0397
0398 if (hdev->dev_type != HCI_PRIMARY) {
0399 err = -EOPNOTSUPP;
0400 goto done;
0401 }
0402
0403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
0404 err = -EOPNOTSUPP;
0405 goto done;
0406 }
0407
0408
0409 if (ir.length > 60) {
0410 err = -EINVAL;
0411 goto done;
0412 }
0413
0414 hci_dev_lock(hdev);
0415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
0416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
0417 hci_inquiry_cache_flush(hdev);
0418 do_inquiry = 1;
0419 }
0420 hci_dev_unlock(hdev);
0421
0422 timeo = ir.length * msecs_to_jiffies(2000);
0423
0424 if (do_inquiry) {
0425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
0426 timeo, NULL);
0427 if (err < 0)
0428 goto done;
0429
0430
0431
0432
0433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
0434 TASK_INTERRUPTIBLE)) {
0435 err = -EINTR;
0436 goto done;
0437 }
0438 }
0439
0440
0441
0442
0443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
0444
0445
0446
0447
0448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
0449 if (!buf) {
0450 err = -ENOMEM;
0451 goto done;
0452 }
0453
0454 hci_dev_lock(hdev);
0455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
0456 hci_dev_unlock(hdev);
0457
0458 BT_DBG("num_rsp %d", ir.num_rsp);
0459
0460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
0461 ptr += sizeof(ir);
0462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
0463 ir.num_rsp))
0464 err = -EFAULT;
0465 } else
0466 err = -EFAULT;
0467
0468 kfree(buf);
0469
0470 done:
0471 hci_dev_put(hdev);
0472 return err;
0473 }
0474
0475 static int hci_dev_do_open(struct hci_dev *hdev)
0476 {
0477 int ret = 0;
0478
0479 BT_DBG("%s %p", hdev->name, hdev);
0480
0481 hci_req_sync_lock(hdev);
0482
0483 ret = hci_dev_open_sync(hdev);
0484
0485 hci_req_sync_unlock(hdev);
0486 return ret;
0487 }
0488
0489
0490
0491 int hci_dev_open(__u16 dev)
0492 {
0493 struct hci_dev *hdev;
0494 int err;
0495
0496 hdev = hci_dev_get(dev);
0497 if (!hdev)
0498 return -ENODEV;
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
0510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0511 err = -EOPNOTSUPP;
0512 goto done;
0513 }
0514
0515
0516
0517
0518
0519
0520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
0521 cancel_delayed_work(&hdev->power_off);
0522
0523
0524
0525
0526
0527 flush_workqueue(hdev->req_workqueue);
0528
0529
0530
0531
0532
0533
0534
0535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
0536 !hci_dev_test_flag(hdev, HCI_MGMT))
0537 hci_dev_set_flag(hdev, HCI_BONDABLE);
0538
0539 err = hci_dev_do_open(hdev);
0540
0541 done:
0542 hci_dev_put(hdev);
0543 return err;
0544 }
0545
0546 int hci_dev_do_close(struct hci_dev *hdev)
0547 {
0548 int err;
0549
0550 BT_DBG("%s %p", hdev->name, hdev);
0551
0552 hci_req_sync_lock(hdev);
0553
0554 err = hci_dev_close_sync(hdev);
0555
0556 hci_req_sync_unlock(hdev);
0557
0558 return err;
0559 }
0560
0561 int hci_dev_close(__u16 dev)
0562 {
0563 struct hci_dev *hdev;
0564 int err;
0565
0566 hdev = hci_dev_get(dev);
0567 if (!hdev)
0568 return -ENODEV;
0569
0570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0571 err = -EBUSY;
0572 goto done;
0573 }
0574
0575 cancel_work_sync(&hdev->power_on);
0576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
0577 cancel_delayed_work(&hdev->power_off);
0578
0579 err = hci_dev_do_close(hdev);
0580
0581 done:
0582 hci_dev_put(hdev);
0583 return err;
0584 }
0585
0586 static int hci_dev_do_reset(struct hci_dev *hdev)
0587 {
0588 int ret;
0589
0590 BT_DBG("%s %p", hdev->name, hdev);
0591
0592 hci_req_sync_lock(hdev);
0593
0594
0595 skb_queue_purge(&hdev->rx_q);
0596 skb_queue_purge(&hdev->cmd_q);
0597
0598
0599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
0600
0601
0602
0603
0604
0605
0606
0607 synchronize_rcu();
0608
0609 cancel_delayed_work(&hdev->cmd_timer);
0610 cancel_delayed_work(&hdev->ncmd_timer);
0611
0612
0613
0614
0615 drain_workqueue(hdev->workqueue);
0616
0617 hci_dev_lock(hdev);
0618 hci_inquiry_cache_flush(hdev);
0619 hci_conn_hash_flush(hdev);
0620 hci_dev_unlock(hdev);
0621
0622 if (hdev->flush)
0623 hdev->flush(hdev);
0624
0625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
0626
0627 atomic_set(&hdev->cmd_cnt, 1);
0628 hdev->acl_cnt = 0;
0629 hdev->sco_cnt = 0;
0630 hdev->le_cnt = 0;
0631 hdev->iso_cnt = 0;
0632
0633 ret = hci_reset_sync(hdev);
0634
0635 hci_req_sync_unlock(hdev);
0636 return ret;
0637 }
0638
0639 int hci_dev_reset(__u16 dev)
0640 {
0641 struct hci_dev *hdev;
0642 int err;
0643
0644 hdev = hci_dev_get(dev);
0645 if (!hdev)
0646 return -ENODEV;
0647
0648 if (!test_bit(HCI_UP, &hdev->flags)) {
0649 err = -ENETDOWN;
0650 goto done;
0651 }
0652
0653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0654 err = -EBUSY;
0655 goto done;
0656 }
0657
0658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
0659 err = -EOPNOTSUPP;
0660 goto done;
0661 }
0662
0663 err = hci_dev_do_reset(hdev);
0664
0665 done:
0666 hci_dev_put(hdev);
0667 return err;
0668 }
0669
0670 int hci_dev_reset_stat(__u16 dev)
0671 {
0672 struct hci_dev *hdev;
0673 int ret = 0;
0674
0675 hdev = hci_dev_get(dev);
0676 if (!hdev)
0677 return -ENODEV;
0678
0679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0680 ret = -EBUSY;
0681 goto done;
0682 }
0683
0684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
0685 ret = -EOPNOTSUPP;
0686 goto done;
0687 }
0688
0689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
0690
0691 done:
0692 hci_dev_put(hdev);
0693 return ret;
0694 }
0695
0696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
0697 {
0698 bool conn_changed, discov_changed;
0699
0700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
0701
0702 if ((scan & SCAN_PAGE))
0703 conn_changed = !hci_dev_test_and_set_flag(hdev,
0704 HCI_CONNECTABLE);
0705 else
0706 conn_changed = hci_dev_test_and_clear_flag(hdev,
0707 HCI_CONNECTABLE);
0708
0709 if ((scan & SCAN_INQUIRY)) {
0710 discov_changed = !hci_dev_test_and_set_flag(hdev,
0711 HCI_DISCOVERABLE);
0712 } else {
0713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
0714 discov_changed = hci_dev_test_and_clear_flag(hdev,
0715 HCI_DISCOVERABLE);
0716 }
0717
0718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
0719 return;
0720
0721 if (conn_changed || discov_changed) {
0722
0723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
0724
0725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
0726 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
0727
0728 mgmt_new_settings(hdev);
0729 }
0730 }
0731
0732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
0733 {
0734 struct hci_dev *hdev;
0735 struct hci_dev_req dr;
0736 int err = 0;
0737
0738 if (copy_from_user(&dr, arg, sizeof(dr)))
0739 return -EFAULT;
0740
0741 hdev = hci_dev_get(dr.dev_id);
0742 if (!hdev)
0743 return -ENODEV;
0744
0745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0746 err = -EBUSY;
0747 goto done;
0748 }
0749
0750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
0751 err = -EOPNOTSUPP;
0752 goto done;
0753 }
0754
0755 if (hdev->dev_type != HCI_PRIMARY) {
0756 err = -EOPNOTSUPP;
0757 goto done;
0758 }
0759
0760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
0761 err = -EOPNOTSUPP;
0762 goto done;
0763 }
0764
0765 switch (cmd) {
0766 case HCISETAUTH:
0767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
0768 HCI_INIT_TIMEOUT, NULL);
0769 break;
0770
0771 case HCISETENCRYPT:
0772 if (!lmp_encrypt_capable(hdev)) {
0773 err = -EOPNOTSUPP;
0774 break;
0775 }
0776
0777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
0778
0779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
0780 HCI_INIT_TIMEOUT, NULL);
0781 if (err)
0782 break;
0783 }
0784
0785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
0786 HCI_INIT_TIMEOUT, NULL);
0787 break;
0788
0789 case HCISETSCAN:
0790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
0791 HCI_INIT_TIMEOUT, NULL);
0792
0793
0794
0795
0796 if (!err)
0797 hci_update_passive_scan_state(hdev, dr.dev_opt);
0798 break;
0799
0800 case HCISETLINKPOL:
0801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
0802 HCI_INIT_TIMEOUT, NULL);
0803 break;
0804
0805 case HCISETLINKMODE:
0806 hdev->link_mode = ((__u16) dr.dev_opt) &
0807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
0808 break;
0809
0810 case HCISETPTYPE:
0811 if (hdev->pkt_type == (__u16) dr.dev_opt)
0812 break;
0813
0814 hdev->pkt_type = (__u16) dr.dev_opt;
0815 mgmt_phy_configuration_changed(hdev, NULL);
0816 break;
0817
0818 case HCISETACLMTU:
0819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
0820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
0821 break;
0822
0823 case HCISETSCOMTU:
0824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
0825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
0826 break;
0827
0828 default:
0829 err = -EINVAL;
0830 break;
0831 }
0832
0833 done:
0834 hci_dev_put(hdev);
0835 return err;
0836 }
0837
0838 int hci_get_dev_list(void __user *arg)
0839 {
0840 struct hci_dev *hdev;
0841 struct hci_dev_list_req *dl;
0842 struct hci_dev_req *dr;
0843 int n = 0, size, err;
0844 __u16 dev_num;
0845
0846 if (get_user(dev_num, (__u16 __user *) arg))
0847 return -EFAULT;
0848
0849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
0850 return -EINVAL;
0851
0852 size = sizeof(*dl) + dev_num * sizeof(*dr);
0853
0854 dl = kzalloc(size, GFP_KERNEL);
0855 if (!dl)
0856 return -ENOMEM;
0857
0858 dr = dl->dev_req;
0859
0860 read_lock(&hci_dev_list_lock);
0861 list_for_each_entry(hdev, &hci_dev_list, list) {
0862 unsigned long flags = hdev->flags;
0863
0864
0865
0866
0867
0868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
0869 flags &= ~BIT(HCI_UP);
0870
0871 (dr + n)->dev_id = hdev->id;
0872 (dr + n)->dev_opt = flags;
0873
0874 if (++n >= dev_num)
0875 break;
0876 }
0877 read_unlock(&hci_dev_list_lock);
0878
0879 dl->dev_num = n;
0880 size = sizeof(*dl) + n * sizeof(*dr);
0881
0882 err = copy_to_user(arg, dl, size);
0883 kfree(dl);
0884
0885 return err ? -EFAULT : 0;
0886 }
0887
0888 int hci_get_dev_info(void __user *arg)
0889 {
0890 struct hci_dev *hdev;
0891 struct hci_dev_info di;
0892 unsigned long flags;
0893 int err = 0;
0894
0895 if (copy_from_user(&di, arg, sizeof(di)))
0896 return -EFAULT;
0897
0898 hdev = hci_dev_get(di.dev_id);
0899 if (!hdev)
0900 return -ENODEV;
0901
0902
0903
0904
0905
0906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
0907 flags = hdev->flags & ~BIT(HCI_UP);
0908 else
0909 flags = hdev->flags;
0910
0911 strcpy(di.name, hdev->name);
0912 di.bdaddr = hdev->bdaddr;
0913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
0914 di.flags = flags;
0915 di.pkt_type = hdev->pkt_type;
0916 if (lmp_bredr_capable(hdev)) {
0917 di.acl_mtu = hdev->acl_mtu;
0918 di.acl_pkts = hdev->acl_pkts;
0919 di.sco_mtu = hdev->sco_mtu;
0920 di.sco_pkts = hdev->sco_pkts;
0921 } else {
0922 di.acl_mtu = hdev->le_mtu;
0923 di.acl_pkts = hdev->le_pkts;
0924 di.sco_mtu = 0;
0925 di.sco_pkts = 0;
0926 }
0927 di.link_policy = hdev->link_policy;
0928 di.link_mode = hdev->link_mode;
0929
0930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
0931 memcpy(&di.features, &hdev->features, sizeof(di.features));
0932
0933 if (copy_to_user(arg, &di, sizeof(di)))
0934 err = -EFAULT;
0935
0936 hci_dev_put(hdev);
0937
0938 return err;
0939 }
0940
0941
0942
0943 static int hci_rfkill_set_block(void *data, bool blocked)
0944 {
0945 struct hci_dev *hdev = data;
0946
0947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
0948
0949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0950 return -EBUSY;
0951
0952 if (blocked) {
0953 hci_dev_set_flag(hdev, HCI_RFKILLED);
0954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
0955 !hci_dev_test_flag(hdev, HCI_CONFIG))
0956 hci_dev_do_close(hdev);
0957 } else {
0958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
0959 }
0960
0961 return 0;
0962 }
0963
0964 static const struct rfkill_ops hci_rfkill_ops = {
0965 .set_block = hci_rfkill_set_block,
0966 };
0967
0968 static void hci_power_on(struct work_struct *work)
0969 {
0970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
0971 int err;
0972
0973 BT_DBG("%s", hdev->name);
0974
0975 if (test_bit(HCI_UP, &hdev->flags) &&
0976 hci_dev_test_flag(hdev, HCI_MGMT) &&
0977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
0978 cancel_delayed_work(&hdev->power_off);
0979 err = hci_powered_update_sync(hdev);
0980 mgmt_power_on(hdev, err);
0981 return;
0982 }
0983
0984 err = hci_dev_do_open(hdev);
0985 if (err < 0) {
0986 hci_dev_lock(hdev);
0987 mgmt_set_powered_failed(hdev, err);
0988 hci_dev_unlock(hdev);
0989 return;
0990 }
0991
0992
0993
0994
0995
0996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
0997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
0998 (hdev->dev_type == HCI_PRIMARY &&
0999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1006 }
1007
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009
1010
1011
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1014
1015
1016
1017
1018
1019
1020
1021
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024
1025
1026
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1029
1030
1031
1032
1033
1034 mgmt_index_added(hdev);
1035 }
1036 }
1037
1038 static void hci_power_off(struct work_struct *work)
1039 {
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 power_off.work);
1042
1043 BT_DBG("%s", hdev->name);
1044
1045 hci_dev_do_close(hdev);
1046 }
1047
1048 static void hci_error_reset(struct work_struct *work)
1049 {
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 if (hdev->hw_error)
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1056 else
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1058
1059 if (hci_dev_do_close(hdev))
1060 return;
1061
1062 hci_dev_do_open(hdev);
1063 }
1064
1065 void hci_uuids_clear(struct hci_dev *hdev)
1066 {
1067 struct bt_uuid *uuid, *tmp;
1068
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(&uuid->list);
1071 kfree(uuid);
1072 }
1073 }
1074
1075 void hci_link_keys_clear(struct hci_dev *hdev)
1076 {
1077 struct link_key *key;
1078
1079 list_for_each_entry(key, &hdev->link_keys, list) {
1080 list_del_rcu(&key->list);
1081 kfree_rcu(key, rcu);
1082 }
1083 }
1084
1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1086 {
1087 struct smp_ltk *k;
1088
1089 list_for_each_entry(k, &hdev->long_term_keys, list) {
1090 list_del_rcu(&k->list);
1091 kfree_rcu(k, rcu);
1092 }
1093 }
1094
1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1096 {
1097 struct smp_irk *k;
1098
1099 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(&k->list);
1101 kfree_rcu(k, rcu);
1102 }
1103 }
1104
1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1106 {
1107 struct blocked_key *b;
1108
1109 list_for_each_entry(b, &hdev->blocked_keys, list) {
1110 list_del_rcu(&b->list);
1111 kfree_rcu(b, rcu);
1112 }
1113 }
1114
1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1116 {
1117 bool blocked = false;
1118 struct blocked_key *b;
1119
1120 rcu_read_lock();
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1123 blocked = true;
1124 break;
1125 }
1126 }
1127
1128 rcu_read_unlock();
1129 return blocked;
1130 }
1131
1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1133 {
1134 struct link_key *k;
1135
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1139 rcu_read_unlock();
1140
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1143 k->val)) {
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1146 &k->bdaddr);
1147 return NULL;
1148 }
1149
1150 return k;
1151 }
1152 }
1153 rcu_read_unlock();
1154
1155 return NULL;
1156 }
1157
1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1160 {
1161
1162 if (key_type < 0x03)
1163 return true;
1164
1165
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1167 return false;
1168
1169
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1171 return false;
1172
1173
1174 if (!conn)
1175 return true;
1176
1177
1178 if (conn->type == LE_LINK)
1179 return true;
1180
1181
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1183 return true;
1184
1185
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1187 return true;
1188
1189
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1191 return true;
1192
1193
1194
1195 return false;
1196 }
1197
1198 static u8 ltk_role(u8 type)
1199 {
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1202
1203 return HCI_ROLE_SLAVE;
1204 }
1205
1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1208 {
1209 struct smp_ltk *k;
1210
1211 rcu_read_lock();
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1214 continue;
1215
1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1217 rcu_read_unlock();
1218
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1220 k->val)) {
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1223 &k->bdaddr);
1224 return NULL;
1225 }
1226
1227 return k;
1228 }
1229 }
1230 rcu_read_unlock();
1231
1232 return NULL;
1233 }
1234
1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1236 {
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1239
1240 rcu_read_lock();
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(&irk->rpa, rpa)) {
1243 irk_to_return = irk;
1244 goto done;
1245 }
1246 }
1247
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 bacpy(&irk->rpa, rpa);
1251 irk_to_return = irk;
1252 goto done;
1253 }
1254 }
1255
1256 done:
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1262 }
1263
1264 rcu_read_unlock();
1265
1266 return irk_to_return;
1267 }
1268
1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270 u8 addr_type)
1271 {
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1274
1275
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1277 return NULL;
1278
1279 rcu_read_lock();
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1284 goto done;
1285 }
1286 }
1287
1288 done:
1289
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1295 }
1296
1297 rcu_read_unlock();
1298
1299 return irk_to_return;
1300 }
1301
1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1305 {
1306 struct link_key *key, *old_key;
1307 u8 old_key_type;
1308
1309 old_key = hci_find_link_key(hdev, bdaddr);
1310 if (old_key) {
1311 old_key_type = old_key->type;
1312 key = old_key;
1313 } else {
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_KERNEL);
1316 if (!key)
1317 return NULL;
1318 list_add_rcu(&key->list, &hdev->link_keys);
1319 }
1320
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322
1323
1324
1325
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1329 if (conn)
1330 conn->key_type = type;
1331 }
1332
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1336
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1339 else
1340 key->type = type;
1341
1342 if (persistent)
1343 *persistent = hci_persistent_key(hdev, conn, type,
1344 old_key_type);
1345
1346 return key;
1347 }
1348
1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1352 {
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1355
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1357 if (old_key)
1358 key = old_key;
1359 else {
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1361 if (!key)
1362 return NULL;
1363 list_add_rcu(&key->list, &hdev->long_term_keys);
1364 }
1365
1366 bacpy(&key->bdaddr, bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1370 key->ediv = ediv;
1371 key->rand = rand;
1372 key->enc_size = enc_size;
1373 key->type = type;
1374
1375 return key;
1376 }
1377
1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1380 {
1381 struct smp_irk *irk;
1382
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1384 if (!irk) {
1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1386 if (!irk)
1387 return NULL;
1388
1389 bacpy(&irk->bdaddr, bdaddr);
1390 irk->addr_type = addr_type;
1391
1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1393 }
1394
1395 memcpy(irk->val, val, 16);
1396 bacpy(&irk->rpa, rpa);
1397
1398 return irk;
1399 }
1400
1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402 {
1403 struct link_key *key;
1404
1405 key = hci_find_link_key(hdev, bdaddr);
1406 if (!key)
1407 return -ENOENT;
1408
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1410
1411 list_del_rcu(&key->list);
1412 kfree_rcu(key, rcu);
1413
1414 return 0;
1415 }
1416
1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1418 {
1419 struct smp_ltk *k;
1420 int removed = 0;
1421
1422 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1424 continue;
1425
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1427
1428 list_del_rcu(&k->list);
1429 kfree_rcu(k, rcu);
1430 removed++;
1431 }
1432
1433 return removed ? 0 : -ENOENT;
1434 }
1435
1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1437 {
1438 struct smp_irk *k;
1439
1440 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1442 continue;
1443
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1445
1446 list_del_rcu(&k->list);
1447 kfree_rcu(k, rcu);
1448 }
1449 }
1450
1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1452 {
1453 struct smp_ltk *k;
1454 struct smp_irk *irk;
1455 u8 addr_type;
1456
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1459 return true;
1460 return false;
1461 }
1462
1463
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1466 else
1467 addr_type = ADDR_LE_DEV_RANDOM;
1468
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1470 if (irk) {
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1473 }
1474
1475 rcu_read_lock();
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1478 rcu_read_unlock();
1479 return true;
1480 }
1481 }
1482 rcu_read_unlock();
1483
1484 return false;
1485 }
1486
1487
1488 static void hci_cmd_timeout(struct work_struct *work)
1489 {
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1491 cmd_timer.work);
1492
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1496
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498 } else {
1499 bt_dev_err(hdev, "command tx timeout");
1500 }
1501
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1504
1505 atomic_set(&hdev->cmd_cnt, 1);
1506 queue_work(hdev->workqueue, &hdev->cmd_work);
1507 }
1508
1509
1510 static void hci_ncmd_timeout(struct work_struct *work)
1511 {
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1513 ncmd_timer.work);
1514
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1516
1517
1518
1519
1520 if (test_bit(HCI_INIT, &hdev->flags))
1521 return;
1522
1523
1524 hci_reset_dev(hdev);
1525 }
1526
1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1529 {
1530 struct oob_data *data;
1531
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(bdaddr, &data->bdaddr) != 0)
1534 continue;
1535 if (data->bdaddr_type != bdaddr_type)
1536 continue;
1537 return data;
1538 }
1539
1540 return NULL;
1541 }
1542
1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1544 u8 bdaddr_type)
1545 {
1546 struct oob_data *data;
1547
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1549 if (!data)
1550 return -ENOENT;
1551
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1553
1554 list_del(&data->list);
1555 kfree(data);
1556
1557 return 0;
1558 }
1559
1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1561 {
1562 struct oob_data *data, *n;
1563
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(&data->list);
1566 kfree(data);
1567 }
1568 }
1569
1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1573 {
1574 struct oob_data *data;
1575
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1577 if (!data) {
1578 data = kmalloc(sizeof(*data), GFP_KERNEL);
1579 if (!data)
1580 return -ENOMEM;
1581
1582 bacpy(&data->bdaddr, bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(&data->list, &hdev->remote_oob_data);
1585 }
1586
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1592 } else {
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1597 else
1598 data->present = 0x00;
1599 }
1600
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1604 } else {
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1609 }
1610
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1612
1613 return 0;
1614 }
1615
1616
1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 struct adv_info *adv_instance;
1620
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1624 }
1625
1626 return NULL;
1627 }
1628
1629
1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1631 {
1632 struct adv_info *cur_instance;
1633
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1635 if (!cur_instance)
1636 return NULL;
1637
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1642 else
1643 return list_next_entry(cur_instance, list);
1644 }
1645
1646
1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1648 {
1649 struct adv_info *adv_instance;
1650
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1652 if (!adv_instance)
1653 return -ENOENT;
1654
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1656
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1661 }
1662 hdev->cur_adv_instance = 0x00;
1663 }
1664
1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1666
1667 list_del(&adv_instance->list);
1668 kfree(adv_instance);
1669
1670 hdev->adv_instance_cnt--;
1671
1672 return 0;
1673 }
1674
1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1676 {
1677 struct adv_info *adv_instance, *n;
1678
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1681 }
1682
1683
1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1685 {
1686 struct adv_info *adv_instance, *n;
1687
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(&hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1691 }
1692
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 list_del(&adv_instance->list);
1696 kfree(adv_instance);
1697 }
1698
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1701 }
1702
1703 static void adv_instance_rpa_expired(struct work_struct *work)
1704 {
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1707
1708 BT_DBG("");
1709
1710 adv_instance->rpa_expired = true;
1711 }
1712
1713
1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval)
1719 {
1720 struct adv_info *adv;
1721
1722 adv = hci_find_adv_instance(hdev, instance);
1723 if (adv) {
1724 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1725 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1726 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1727 } else {
1728 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1729 instance < 1 || instance > hdev->le_num_of_adv_sets)
1730 return ERR_PTR(-EOVERFLOW);
1731
1732 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1733 if (!adv)
1734 return ERR_PTR(-ENOMEM);
1735
1736 adv->pending = true;
1737 adv->instance = instance;
1738 list_add(&adv->list, &hdev->adv_instances);
1739 hdev->adv_instance_cnt++;
1740 }
1741
1742 adv->flags = flags;
1743 adv->min_interval = min_interval;
1744 adv->max_interval = max_interval;
1745 adv->tx_power = tx_power;
1746
1747 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1748 scan_rsp_len, scan_rsp_data);
1749
1750 adv->timeout = timeout;
1751 adv->remaining_time = timeout;
1752
1753 if (duration == 0)
1754 adv->duration = hdev->def_multi_adv_rotation_duration;
1755 else
1756 adv->duration = duration;
1757
1758 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1759
1760 BT_DBG("%s for %dMR", hdev->name, instance);
1761
1762 return adv;
1763 }
1764
1765
1766 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1767 u32 flags, u8 data_len, u8 *data,
1768 u32 min_interval, u32 max_interval)
1769 {
1770 struct adv_info *adv;
1771
1772 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1773 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1774 min_interval, max_interval);
1775 if (IS_ERR(adv))
1776 return adv;
1777
1778 adv->periodic = true;
1779 adv->per_adv_data_len = data_len;
1780
1781 if (data)
1782 memcpy(adv->per_adv_data, data, data_len);
1783
1784 return adv;
1785 }
1786
1787
1788 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1789 u16 adv_data_len, u8 *adv_data,
1790 u16 scan_rsp_len, u8 *scan_rsp_data)
1791 {
1792 struct adv_info *adv;
1793
1794 adv = hci_find_adv_instance(hdev, instance);
1795
1796
1797 if (!adv)
1798 return -ENOENT;
1799
1800 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1801 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1802 memcpy(adv->adv_data, adv_data, adv_data_len);
1803 adv->adv_data_len = adv_data_len;
1804 adv->adv_data_changed = true;
1805 }
1806
1807 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1808 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1809 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1810 adv->scan_rsp_len = scan_rsp_len;
1811 adv->scan_rsp_changed = true;
1812 }
1813
1814
1815 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1816 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1817 adv->scan_rsp_changed = true;
1818
1819 return 0;
1820 }
1821
1822
1823 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1824 {
1825 u32 flags;
1826 struct adv_info *adv;
1827
1828 if (instance == 0x00) {
1829
1830
1831
1832 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1833
1834
1835
1836
1837 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1838 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1839
1840 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1841 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1842 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1843 flags |= MGMT_ADV_FLAG_DISCOV;
1844
1845 return flags;
1846 }
1847
1848 adv = hci_find_adv_instance(hdev, instance);
1849
1850
1851 if (!adv)
1852 return 0;
1853
1854 return adv->flags;
1855 }
1856
1857 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1858 {
1859 struct adv_info *adv;
1860
1861
1862 if (instance == 0x00)
1863 return true;
1864
1865 adv = hci_find_adv_instance(hdev, instance);
1866 if (!adv)
1867 return false;
1868
1869 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1870 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1871 return true;
1872
1873 return adv->scan_rsp_len ? true : false;
1874 }
1875
1876
1877 void hci_adv_monitors_clear(struct hci_dev *hdev)
1878 {
1879 struct adv_monitor *monitor;
1880 int handle;
1881
1882 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1883 hci_free_adv_monitor(hdev, monitor);
1884
1885 idr_destroy(&hdev->adv_monitors_idr);
1886 }
1887
1888
1889
1890
1891 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1892 {
1893 struct adv_pattern *pattern;
1894 struct adv_pattern *tmp;
1895
1896 if (!monitor)
1897 return;
1898
1899 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1900 list_del(&pattern->list);
1901 kfree(pattern);
1902 }
1903
1904 if (monitor->handle)
1905 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1906
1907 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1908 hdev->adv_monitors_cnt--;
1909 mgmt_adv_monitor_removed(hdev, monitor->handle);
1910 }
1911
1912 kfree(monitor);
1913 }
1914
1915
1916
1917
1918
1919 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1920 {
1921 int min, max, handle;
1922 int status = 0;
1923
1924 if (!monitor)
1925 return -EINVAL;
1926
1927 hci_dev_lock(hdev);
1928
1929 min = HCI_MIN_ADV_MONITOR_HANDLE;
1930 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1931 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1932 GFP_KERNEL);
1933
1934 hci_dev_unlock(hdev);
1935
1936 if (handle < 0)
1937 return handle;
1938
1939 monitor->handle = handle;
1940
1941 if (!hdev_is_powered(hdev))
1942 return status;
1943
1944 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945 case HCI_ADV_MONITOR_EXT_NONE:
1946 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1947 monitor->handle, status);
1948
1949 break;
1950
1951 case HCI_ADV_MONITOR_EXT_MSFT:
1952 status = msft_add_monitor_pattern(hdev, monitor);
1953 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1954 monitor->handle, status);
1955 break;
1956 }
1957
1958 return status;
1959 }
1960
1961
1962
1963
1964
1965 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1966 struct adv_monitor *monitor)
1967 {
1968 int status = 0;
1969
1970 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1971 case HCI_ADV_MONITOR_EXT_NONE:
1972 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1973 monitor->handle, status);
1974 goto free_monitor;
1975
1976 case HCI_ADV_MONITOR_EXT_MSFT:
1977 status = msft_remove_monitor(hdev, monitor);
1978 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1979 hdev->name, monitor->handle, status);
1980 break;
1981 }
1982
1983
1984 if (status == -ENOENT)
1985 goto free_monitor;
1986
1987 return status;
1988
1989 free_monitor:
1990 if (status == -ENOENT)
1991 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1992 monitor->handle);
1993 hci_free_adv_monitor(hdev, monitor);
1994
1995 return status;
1996 }
1997
1998
1999 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2000 {
2001 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2002
2003 if (!monitor)
2004 return -EINVAL;
2005
2006 return hci_remove_adv_monitor(hdev, monitor);
2007 }
2008
2009
2010 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2011 {
2012 struct adv_monitor *monitor;
2013 int idr_next_id = 0;
2014 int status = 0;
2015
2016 while (1) {
2017 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2018 if (!monitor)
2019 break;
2020
2021 status = hci_remove_adv_monitor(hdev, monitor);
2022 if (status)
2023 return status;
2024
2025 idr_next_id++;
2026 }
2027
2028 return status;
2029 }
2030
2031
2032 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2033 {
2034 return !idr_is_empty(&hdev->adv_monitors_idr);
2035 }
2036
2037 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2038 {
2039 if (msft_monitor_supported(hdev))
2040 return HCI_ADV_MONITOR_EXT_MSFT;
2041
2042 return HCI_ADV_MONITOR_EXT_NONE;
2043 }
2044
2045 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2046 bdaddr_t *bdaddr, u8 type)
2047 {
2048 struct bdaddr_list *b;
2049
2050 list_for_each_entry(b, bdaddr_list, list) {
2051 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2052 return b;
2053 }
2054
2055 return NULL;
2056 }
2057
2058 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2059 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2060 u8 type)
2061 {
2062 struct bdaddr_list_with_irk *b;
2063
2064 list_for_each_entry(b, bdaddr_list, list) {
2065 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2066 return b;
2067 }
2068
2069 return NULL;
2070 }
2071
2072 struct bdaddr_list_with_flags *
2073 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2074 bdaddr_t *bdaddr, u8 type)
2075 {
2076 struct bdaddr_list_with_flags *b;
2077
2078 list_for_each_entry(b, bdaddr_list, list) {
2079 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2080 return b;
2081 }
2082
2083 return NULL;
2084 }
2085
2086 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2087 {
2088 struct bdaddr_list *b, *n;
2089
2090 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2091 list_del(&b->list);
2092 kfree(b);
2093 }
2094 }
2095
2096 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2097 {
2098 struct bdaddr_list *entry;
2099
2100 if (!bacmp(bdaddr, BDADDR_ANY))
2101 return -EBADF;
2102
2103 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2104 return -EEXIST;
2105
2106 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2107 if (!entry)
2108 return -ENOMEM;
2109
2110 bacpy(&entry->bdaddr, bdaddr);
2111 entry->bdaddr_type = type;
2112
2113 list_add(&entry->list, list);
2114
2115 return 0;
2116 }
2117
2118 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2119 u8 type, u8 *peer_irk, u8 *local_irk)
2120 {
2121 struct bdaddr_list_with_irk *entry;
2122
2123 if (!bacmp(bdaddr, BDADDR_ANY))
2124 return -EBADF;
2125
2126 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2127 return -EEXIST;
2128
2129 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2130 if (!entry)
2131 return -ENOMEM;
2132
2133 bacpy(&entry->bdaddr, bdaddr);
2134 entry->bdaddr_type = type;
2135
2136 if (peer_irk)
2137 memcpy(entry->peer_irk, peer_irk, 16);
2138
2139 if (local_irk)
2140 memcpy(entry->local_irk, local_irk, 16);
2141
2142 list_add(&entry->list, list);
2143
2144 return 0;
2145 }
2146
2147 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2148 u8 type, u32 flags)
2149 {
2150 struct bdaddr_list_with_flags *entry;
2151
2152 if (!bacmp(bdaddr, BDADDR_ANY))
2153 return -EBADF;
2154
2155 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2156 return -EEXIST;
2157
2158 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2159 if (!entry)
2160 return -ENOMEM;
2161
2162 bacpy(&entry->bdaddr, bdaddr);
2163 entry->bdaddr_type = type;
2164 entry->flags = flags;
2165
2166 list_add(&entry->list, list);
2167
2168 return 0;
2169 }
2170
2171 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2172 {
2173 struct bdaddr_list *entry;
2174
2175 if (!bacmp(bdaddr, BDADDR_ANY)) {
2176 hci_bdaddr_list_clear(list);
2177 return 0;
2178 }
2179
2180 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2181 if (!entry)
2182 return -ENOENT;
2183
2184 list_del(&entry->list);
2185 kfree(entry);
2186
2187 return 0;
2188 }
2189
2190 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2191 u8 type)
2192 {
2193 struct bdaddr_list_with_irk *entry;
2194
2195 if (!bacmp(bdaddr, BDADDR_ANY)) {
2196 hci_bdaddr_list_clear(list);
2197 return 0;
2198 }
2199
2200 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2201 if (!entry)
2202 return -ENOENT;
2203
2204 list_del(&entry->list);
2205 kfree(entry);
2206
2207 return 0;
2208 }
2209
2210 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2211 u8 type)
2212 {
2213 struct bdaddr_list_with_flags *entry;
2214
2215 if (!bacmp(bdaddr, BDADDR_ANY)) {
2216 hci_bdaddr_list_clear(list);
2217 return 0;
2218 }
2219
2220 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2221 if (!entry)
2222 return -ENOENT;
2223
2224 list_del(&entry->list);
2225 kfree(entry);
2226
2227 return 0;
2228 }
2229
2230
2231 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2232 bdaddr_t *addr, u8 addr_type)
2233 {
2234 struct hci_conn_params *params;
2235
2236 list_for_each_entry(params, &hdev->le_conn_params, list) {
2237 if (bacmp(¶ms->addr, addr) == 0 &&
2238 params->addr_type == addr_type) {
2239 return params;
2240 }
2241 }
2242
2243 return NULL;
2244 }
2245
2246
2247 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2248 bdaddr_t *addr, u8 addr_type)
2249 {
2250 struct hci_conn_params *param;
2251
2252 list_for_each_entry(param, list, action) {
2253 if (bacmp(¶m->addr, addr) == 0 &&
2254 param->addr_type == addr_type)
2255 return param;
2256 }
2257
2258 return NULL;
2259 }
2260
2261
2262 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2263 bdaddr_t *addr, u8 addr_type)
2264 {
2265 struct hci_conn_params *params;
2266
2267 params = hci_conn_params_lookup(hdev, addr, addr_type);
2268 if (params)
2269 return params;
2270
2271 params = kzalloc(sizeof(*params), GFP_KERNEL);
2272 if (!params) {
2273 bt_dev_err(hdev, "out of memory");
2274 return NULL;
2275 }
2276
2277 bacpy(¶ms->addr, addr);
2278 params->addr_type = addr_type;
2279
2280 list_add(¶ms->list, &hdev->le_conn_params);
2281 INIT_LIST_HEAD(¶ms->action);
2282
2283 params->conn_min_interval = hdev->le_conn_min_interval;
2284 params->conn_max_interval = hdev->le_conn_max_interval;
2285 params->conn_latency = hdev->le_conn_latency;
2286 params->supervision_timeout = hdev->le_supv_timeout;
2287 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2288
2289 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2290
2291 return params;
2292 }
2293
2294 static void hci_conn_params_free(struct hci_conn_params *params)
2295 {
2296 if (params->conn) {
2297 hci_conn_drop(params->conn);
2298 hci_conn_put(params->conn);
2299 }
2300
2301 list_del(¶ms->action);
2302 list_del(¶ms->list);
2303 kfree(params);
2304 }
2305
2306
2307 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2308 {
2309 struct hci_conn_params *params;
2310
2311 params = hci_conn_params_lookup(hdev, addr, addr_type);
2312 if (!params)
2313 return;
2314
2315 hci_conn_params_free(params);
2316
2317 hci_update_passive_scan(hdev);
2318
2319 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2320 }
2321
2322
2323 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2324 {
2325 struct hci_conn_params *params, *tmp;
2326
2327 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2328 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2329 continue;
2330
2331
2332
2333
2334 if (params->explicit_connect) {
2335 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2336 continue;
2337 }
2338
2339 list_del(¶ms->list);
2340 kfree(params);
2341 }
2342
2343 BT_DBG("All LE disabled connection parameters were removed");
2344 }
2345
2346
2347 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2348 {
2349 struct hci_conn_params *params, *tmp;
2350
2351 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2352 hci_conn_params_free(params);
2353
2354 BT_DBG("All LE connection parameters were removed");
2355 }
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 u8 *bdaddr_type)
2372 {
2373 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2374 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2375 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2376 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2377 bacpy(bdaddr, &hdev->static_addr);
2378 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2379 } else {
2380 bacpy(bdaddr, &hdev->bdaddr);
2381 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2382 }
2383 }
2384
2385 static void hci_clear_wake_reason(struct hci_dev *hdev)
2386 {
2387 hci_dev_lock(hdev);
2388
2389 hdev->wake_reason = 0;
2390 bacpy(&hdev->wake_addr, BDADDR_ANY);
2391 hdev->wake_addr_type = 0;
2392
2393 hci_dev_unlock(hdev);
2394 }
2395
2396 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2397 void *data)
2398 {
2399 struct hci_dev *hdev =
2400 container_of(nb, struct hci_dev, suspend_notifier);
2401 int ret = 0;
2402
2403 if (action == PM_SUSPEND_PREPARE)
2404 ret = hci_suspend_dev(hdev);
2405 else if (action == PM_POST_SUSPEND)
2406 ret = hci_resume_dev(hdev);
2407
2408 if (ret)
2409 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2410 action, ret);
2411
2412 return NOTIFY_DONE;
2413 }
2414
2415
2416 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2417 {
2418 struct hci_dev *hdev;
2419 unsigned int alloc_size;
2420
2421 alloc_size = sizeof(*hdev);
2422 if (sizeof_priv) {
2423
2424 alloc_size += sizeof_priv;
2425 }
2426
2427 hdev = kzalloc(alloc_size, GFP_KERNEL);
2428 if (!hdev)
2429 return NULL;
2430
2431 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2432 hdev->esco_type = (ESCO_HV1);
2433 hdev->link_mode = (HCI_LM_ACCEPT);
2434 hdev->num_iac = 0x01;
2435 hdev->io_capability = 0x03;
2436 hdev->manufacturer = 0xffff;
2437 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2438 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2439 hdev->adv_instance_cnt = 0;
2440 hdev->cur_adv_instance = 0x00;
2441 hdev->adv_instance_timeout = 0;
2442
2443 hdev->advmon_allowlist_duration = 300;
2444 hdev->advmon_no_filter_duration = 500;
2445 hdev->enable_advmon_interleave_scan = 0x00;
2446
2447 hdev->sniff_max_interval = 800;
2448 hdev->sniff_min_interval = 80;
2449
2450 hdev->le_adv_channel_map = 0x07;
2451 hdev->le_adv_min_interval = 0x0800;
2452 hdev->le_adv_max_interval = 0x0800;
2453 hdev->le_scan_interval = 0x0060;
2454 hdev->le_scan_window = 0x0030;
2455 hdev->le_scan_int_suspend = 0x0400;
2456 hdev->le_scan_window_suspend = 0x0012;
2457 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2458 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2459 hdev->le_scan_int_adv_monitor = 0x0060;
2460 hdev->le_scan_window_adv_monitor = 0x0030;
2461 hdev->le_scan_int_connect = 0x0060;
2462 hdev->le_scan_window_connect = 0x0060;
2463 hdev->le_conn_min_interval = 0x0018;
2464 hdev->le_conn_max_interval = 0x0028;
2465 hdev->le_conn_latency = 0x0000;
2466 hdev->le_supv_timeout = 0x002a;
2467 hdev->le_def_tx_len = 0x001b;
2468 hdev->le_def_tx_time = 0x0148;
2469 hdev->le_max_tx_len = 0x001b;
2470 hdev->le_max_tx_time = 0x0148;
2471 hdev->le_max_rx_len = 0x001b;
2472 hdev->le_max_rx_time = 0x0148;
2473 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2474 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2475 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2476 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2477 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2478 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2479 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2480 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2481 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2482
2483 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2484 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2485 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2486 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2487 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2488 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2489
2490
2491 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2492 hdev->def_page_scan_int = 0x0800;
2493 hdev->def_page_scan_window = 0x0012;
2494
2495 mutex_init(&hdev->lock);
2496 mutex_init(&hdev->req_lock);
2497
2498 INIT_LIST_HEAD(&hdev->mgmt_pending);
2499 INIT_LIST_HEAD(&hdev->reject_list);
2500 INIT_LIST_HEAD(&hdev->accept_list);
2501 INIT_LIST_HEAD(&hdev->uuids);
2502 INIT_LIST_HEAD(&hdev->link_keys);
2503 INIT_LIST_HEAD(&hdev->long_term_keys);
2504 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2505 INIT_LIST_HEAD(&hdev->remote_oob_data);
2506 INIT_LIST_HEAD(&hdev->le_accept_list);
2507 INIT_LIST_HEAD(&hdev->le_resolv_list);
2508 INIT_LIST_HEAD(&hdev->le_conn_params);
2509 INIT_LIST_HEAD(&hdev->pend_le_conns);
2510 INIT_LIST_HEAD(&hdev->pend_le_reports);
2511 INIT_LIST_HEAD(&hdev->conn_hash.list);
2512 INIT_LIST_HEAD(&hdev->adv_instances);
2513 INIT_LIST_HEAD(&hdev->blocked_keys);
2514 INIT_LIST_HEAD(&hdev->monitored_devices);
2515
2516 INIT_LIST_HEAD(&hdev->local_codecs);
2517 INIT_WORK(&hdev->rx_work, hci_rx_work);
2518 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2519 INIT_WORK(&hdev->tx_work, hci_tx_work);
2520 INIT_WORK(&hdev->power_on, hci_power_on);
2521 INIT_WORK(&hdev->error_reset, hci_error_reset);
2522
2523 hci_cmd_sync_init(hdev);
2524
2525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2526
2527 skb_queue_head_init(&hdev->rx_q);
2528 skb_queue_head_init(&hdev->cmd_q);
2529 skb_queue_head_init(&hdev->raw_q);
2530
2531 init_waitqueue_head(&hdev->req_wait_q);
2532
2533 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2534 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2535
2536 hci_request_setup(hdev);
2537
2538 hci_init_sysfs(hdev);
2539 discovery_init(hdev);
2540
2541 return hdev;
2542 }
2543 EXPORT_SYMBOL(hci_alloc_dev_priv);
2544
2545
2546 void hci_free_dev(struct hci_dev *hdev)
2547 {
2548
2549 put_device(&hdev->dev);
2550 }
2551 EXPORT_SYMBOL(hci_free_dev);
2552
2553
2554 int hci_register_dev(struct hci_dev *hdev)
2555 {
2556 int id, error;
2557
2558 if (!hdev->open || !hdev->close || !hdev->send)
2559 return -EINVAL;
2560
2561
2562
2563
2564 switch (hdev->dev_type) {
2565 case HCI_PRIMARY:
2566 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2567 break;
2568 case HCI_AMP:
2569 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2570 break;
2571 default:
2572 return -EINVAL;
2573 }
2574
2575 if (id < 0)
2576 return id;
2577
2578 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2579 hdev->id = id;
2580
2581 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2582
2583 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2584 if (!hdev->workqueue) {
2585 error = -ENOMEM;
2586 goto err;
2587 }
2588
2589 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2590 hdev->name);
2591 if (!hdev->req_workqueue) {
2592 destroy_workqueue(hdev->workqueue);
2593 error = -ENOMEM;
2594 goto err;
2595 }
2596
2597 if (!IS_ERR_OR_NULL(bt_debugfs))
2598 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2599
2600 dev_set_name(&hdev->dev, "%s", hdev->name);
2601
2602 error = device_add(&hdev->dev);
2603 if (error < 0)
2604 goto err_wqueue;
2605
2606 hci_leds_init(hdev);
2607
2608 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2609 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2610 hdev);
2611 if (hdev->rfkill) {
2612 if (rfkill_register(hdev->rfkill) < 0) {
2613 rfkill_destroy(hdev->rfkill);
2614 hdev->rfkill = NULL;
2615 }
2616 }
2617
2618 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2619 hci_dev_set_flag(hdev, HCI_RFKILLED);
2620
2621 hci_dev_set_flag(hdev, HCI_SETUP);
2622 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2623
2624 if (hdev->dev_type == HCI_PRIMARY) {
2625
2626
2627
2628 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2629 }
2630
2631 write_lock(&hci_dev_list_lock);
2632 list_add(&hdev->list, &hci_dev_list);
2633 write_unlock(&hci_dev_list_lock);
2634
2635
2636
2637
2638 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2639 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2640
2641
2642
2643
2644 if (hdev->wakeup)
2645 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2646
2647 hci_sock_dev_event(hdev, HCI_DEV_REG);
2648 hci_dev_hold(hdev);
2649
2650 error = hci_register_suspend_notifier(hdev);
2651 if (error)
2652 goto err_wqueue;
2653
2654 queue_work(hdev->req_workqueue, &hdev->power_on);
2655
2656 idr_init(&hdev->adv_monitors_idr);
2657 msft_register(hdev);
2658
2659 return id;
2660
2661 err_wqueue:
2662 debugfs_remove_recursive(hdev->debugfs);
2663 destroy_workqueue(hdev->workqueue);
2664 destroy_workqueue(hdev->req_workqueue);
2665 err:
2666 ida_simple_remove(&hci_index_ida, hdev->id);
2667
2668 return error;
2669 }
2670 EXPORT_SYMBOL(hci_register_dev);
2671
2672
2673 void hci_unregister_dev(struct hci_dev *hdev)
2674 {
2675 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2676
2677 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2678
2679 write_lock(&hci_dev_list_lock);
2680 list_del(&hdev->list);
2681 write_unlock(&hci_dev_list_lock);
2682
2683 cancel_work_sync(&hdev->power_on);
2684
2685 hci_cmd_sync_clear(hdev);
2686
2687 hci_unregister_suspend_notifier(hdev);
2688
2689 msft_unregister(hdev);
2690
2691 hci_dev_do_close(hdev);
2692
2693 if (!test_bit(HCI_INIT, &hdev->flags) &&
2694 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2695 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2696 hci_dev_lock(hdev);
2697 mgmt_index_removed(hdev);
2698 hci_dev_unlock(hdev);
2699 }
2700
2701
2702
2703 BUG_ON(!list_empty(&hdev->mgmt_pending));
2704
2705 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2706
2707 if (hdev->rfkill) {
2708 rfkill_unregister(hdev->rfkill);
2709 rfkill_destroy(hdev->rfkill);
2710 }
2711
2712 device_del(&hdev->dev);
2713
2714 hci_dev_put(hdev);
2715 }
2716 EXPORT_SYMBOL(hci_unregister_dev);
2717
2718
2719 void hci_release_dev(struct hci_dev *hdev)
2720 {
2721 debugfs_remove_recursive(hdev->debugfs);
2722 kfree_const(hdev->hw_info);
2723 kfree_const(hdev->fw_info);
2724
2725 destroy_workqueue(hdev->workqueue);
2726 destroy_workqueue(hdev->req_workqueue);
2727
2728 hci_dev_lock(hdev);
2729 hci_bdaddr_list_clear(&hdev->reject_list);
2730 hci_bdaddr_list_clear(&hdev->accept_list);
2731 hci_uuids_clear(hdev);
2732 hci_link_keys_clear(hdev);
2733 hci_smp_ltks_clear(hdev);
2734 hci_smp_irks_clear(hdev);
2735 hci_remote_oob_data_clear(hdev);
2736 hci_adv_instances_clear(hdev);
2737 hci_adv_monitors_clear(hdev);
2738 hci_bdaddr_list_clear(&hdev->le_accept_list);
2739 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2740 hci_conn_params_clear_all(hdev);
2741 hci_discovery_filter_clear(hdev);
2742 hci_blocked_keys_clear(hdev);
2743 hci_dev_unlock(hdev);
2744
2745 ida_simple_remove(&hci_index_ida, hdev->id);
2746 kfree_skb(hdev->sent_cmd);
2747 kfree_skb(hdev->recv_event);
2748 kfree(hdev);
2749 }
2750 EXPORT_SYMBOL(hci_release_dev);
2751
2752 int hci_register_suspend_notifier(struct hci_dev *hdev)
2753 {
2754 int ret = 0;
2755
2756 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2757 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2758 ret = register_pm_notifier(&hdev->suspend_notifier);
2759 }
2760
2761 return ret;
2762 }
2763
2764 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2765 {
2766 int ret = 0;
2767
2768 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2769 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2770
2771 return ret;
2772 }
2773
2774
2775 int hci_suspend_dev(struct hci_dev *hdev)
2776 {
2777 int ret;
2778
2779 bt_dev_dbg(hdev, "");
2780
2781
2782 if (!hdev_is_powered(hdev) ||
2783 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2784 return 0;
2785
2786
2787 if (mgmt_powering_down(hdev))
2788 return 0;
2789
2790 hci_req_sync_lock(hdev);
2791 ret = hci_suspend_sync(hdev);
2792 hci_req_sync_unlock(hdev);
2793
2794 hci_clear_wake_reason(hdev);
2795 mgmt_suspending(hdev, hdev->suspend_state);
2796
2797 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2798 return ret;
2799 }
2800 EXPORT_SYMBOL(hci_suspend_dev);
2801
2802
2803 int hci_resume_dev(struct hci_dev *hdev)
2804 {
2805 int ret;
2806
2807 bt_dev_dbg(hdev, "");
2808
2809
2810 if (!hdev_is_powered(hdev) ||
2811 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2812 return 0;
2813
2814
2815 if (mgmt_powering_down(hdev))
2816 return 0;
2817
2818 hci_req_sync_lock(hdev);
2819 ret = hci_resume_sync(hdev);
2820 hci_req_sync_unlock(hdev);
2821
2822 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2823 hdev->wake_addr_type);
2824
2825 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2826 return ret;
2827 }
2828 EXPORT_SYMBOL(hci_resume_dev);
2829
2830
2831 int hci_reset_dev(struct hci_dev *hdev)
2832 {
2833 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2834 struct sk_buff *skb;
2835
2836 skb = bt_skb_alloc(3, GFP_ATOMIC);
2837 if (!skb)
2838 return -ENOMEM;
2839
2840 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2841 skb_put_data(skb, hw_err, 3);
2842
2843 bt_dev_err(hdev, "Injecting HCI hardware error event");
2844
2845
2846 return hci_recv_frame(hdev, skb);
2847 }
2848 EXPORT_SYMBOL(hci_reset_dev);
2849
2850
2851 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2852 {
2853 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2854 && !test_bit(HCI_INIT, &hdev->flags))) {
2855 kfree_skb(skb);
2856 return -ENXIO;
2857 }
2858
2859 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2860 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2861 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2862 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2863 kfree_skb(skb);
2864 return -EINVAL;
2865 }
2866
2867
2868 bt_cb(skb)->incoming = 1;
2869
2870
2871 __net_timestamp(skb);
2872
2873 skb_queue_tail(&hdev->rx_q, skb);
2874 queue_work(hdev->workqueue, &hdev->rx_work);
2875
2876 return 0;
2877 }
2878 EXPORT_SYMBOL(hci_recv_frame);
2879
2880
2881 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2882 {
2883
2884 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2885
2886
2887 __net_timestamp(skb);
2888
2889 skb_queue_tail(&hdev->rx_q, skb);
2890 queue_work(hdev->workqueue, &hdev->rx_work);
2891
2892 return 0;
2893 }
2894 EXPORT_SYMBOL(hci_recv_diag);
2895
2896 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2897 {
2898 va_list vargs;
2899
2900 va_start(vargs, fmt);
2901 kfree_const(hdev->hw_info);
2902 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2903 va_end(vargs);
2904 }
2905 EXPORT_SYMBOL(hci_set_hw_info);
2906
2907 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2908 {
2909 va_list vargs;
2910
2911 va_start(vargs, fmt);
2912 kfree_const(hdev->fw_info);
2913 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2914 va_end(vargs);
2915 }
2916 EXPORT_SYMBOL(hci_set_fw_info);
2917
2918
2919
2920 int hci_register_cb(struct hci_cb *cb)
2921 {
2922 BT_DBG("%p name %s", cb, cb->name);
2923
2924 mutex_lock(&hci_cb_list_lock);
2925 list_add_tail(&cb->list, &hci_cb_list);
2926 mutex_unlock(&hci_cb_list_lock);
2927
2928 return 0;
2929 }
2930 EXPORT_SYMBOL(hci_register_cb);
2931
2932 int hci_unregister_cb(struct hci_cb *cb)
2933 {
2934 BT_DBG("%p name %s", cb, cb->name);
2935
2936 mutex_lock(&hci_cb_list_lock);
2937 list_del(&cb->list);
2938 mutex_unlock(&hci_cb_list_lock);
2939
2940 return 0;
2941 }
2942 EXPORT_SYMBOL(hci_unregister_cb);
2943
2944 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2945 {
2946 int err;
2947
2948 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2949 skb->len);
2950
2951
2952 __net_timestamp(skb);
2953
2954
2955 hci_send_to_monitor(hdev, skb);
2956
2957 if (atomic_read(&hdev->promisc)) {
2958
2959 hci_send_to_sock(hdev, skb);
2960 }
2961
2962
2963 skb_orphan(skb);
2964
2965 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2966 kfree_skb(skb);
2967 return -EINVAL;
2968 }
2969
2970 err = hdev->send(hdev, skb);
2971 if (err < 0) {
2972 bt_dev_err(hdev, "sending frame failed (%d)", err);
2973 kfree_skb(skb);
2974 return err;
2975 }
2976
2977 return 0;
2978 }
2979
2980
2981 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2982 const void *param)
2983 {
2984 struct sk_buff *skb;
2985
2986 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2987
2988 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2989 if (!skb) {
2990 bt_dev_err(hdev, "no memory for command");
2991 return -ENOMEM;
2992 }
2993
2994
2995
2996
2997 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2998
2999 skb_queue_tail(&hdev->cmd_q, skb);
3000 queue_work(hdev->workqueue, &hdev->cmd_work);
3001
3002 return 0;
3003 }
3004
3005 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3006 const void *param)
3007 {
3008 struct sk_buff *skb;
3009
3010 if (hci_opcode_ogf(opcode) != 0x3f) {
3011
3012
3013
3014
3015
3016
3017
3018
3019 bt_dev_err(hdev, "unresponded command not supported");
3020 return -EINVAL;
3021 }
3022
3023 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3024 if (!skb) {
3025 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3026 opcode);
3027 return -ENOMEM;
3028 }
3029
3030 hci_send_frame(hdev, skb);
3031
3032 return 0;
3033 }
3034 EXPORT_SYMBOL(__hci_cmd_send);
3035
3036
3037 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3038 {
3039 struct hci_command_hdr *hdr;
3040
3041 if (!hdev->sent_cmd)
3042 return NULL;
3043
3044 hdr = (void *) hdev->sent_cmd->data;
3045
3046 if (hdr->opcode != cpu_to_le16(opcode))
3047 return NULL;
3048
3049 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3050
3051 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3052 }
3053
3054
3055 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3056 {
3057 struct hci_event_hdr *hdr;
3058 int offset;
3059
3060 if (!hdev->recv_event)
3061 return NULL;
3062
3063 hdr = (void *)hdev->recv_event->data;
3064 offset = sizeof(*hdr);
3065
3066 if (hdr->evt != event) {
3067
3068 if (hdr->evt == HCI_EV_LE_META) {
3069 struct hci_ev_le_meta *ev;
3070
3071 ev = (void *)hdev->recv_event->data + offset;
3072 offset += sizeof(*ev);
3073 if (ev->subevent == event)
3074 goto found;
3075 }
3076 return NULL;
3077 }
3078
3079 found:
3080 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3081
3082 return hdev->recv_event->data + offset;
3083 }
3084
3085
3086 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3087 {
3088 struct hci_acl_hdr *hdr;
3089 int len = skb->len;
3090
3091 skb_push(skb, HCI_ACL_HDR_SIZE);
3092 skb_reset_transport_header(skb);
3093 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3094 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3095 hdr->dlen = cpu_to_le16(len);
3096 }
3097
3098 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3099 struct sk_buff *skb, __u16 flags)
3100 {
3101 struct hci_conn *conn = chan->conn;
3102 struct hci_dev *hdev = conn->hdev;
3103 struct sk_buff *list;
3104
3105 skb->len = skb_headlen(skb);
3106 skb->data_len = 0;
3107
3108 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3109
3110 switch (hdev->dev_type) {
3111 case HCI_PRIMARY:
3112 hci_add_acl_hdr(skb, conn->handle, flags);
3113 break;
3114 case HCI_AMP:
3115 hci_add_acl_hdr(skb, chan->handle, flags);
3116 break;
3117 default:
3118 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3119 return;
3120 }
3121
3122 list = skb_shinfo(skb)->frag_list;
3123 if (!list) {
3124
3125 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3126
3127 skb_queue_tail(queue, skb);
3128 } else {
3129
3130 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3131
3132 skb_shinfo(skb)->frag_list = NULL;
3133
3134
3135
3136
3137
3138
3139 spin_lock_bh(&queue->lock);
3140
3141 __skb_queue_tail(queue, skb);
3142
3143 flags &= ~ACL_START;
3144 flags |= ACL_CONT;
3145 do {
3146 skb = list; list = list->next;
3147
3148 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3149 hci_add_acl_hdr(skb, conn->handle, flags);
3150
3151 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3152
3153 __skb_queue_tail(queue, skb);
3154 } while (list);
3155
3156 spin_unlock_bh(&queue->lock);
3157 }
3158 }
3159
3160 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3161 {
3162 struct hci_dev *hdev = chan->conn->hdev;
3163
3164 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3165
3166 hci_queue_acl(chan, &chan->data_q, skb, flags);
3167
3168 queue_work(hdev->workqueue, &hdev->tx_work);
3169 }
3170
3171
3172 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3173 {
3174 struct hci_dev *hdev = conn->hdev;
3175 struct hci_sco_hdr hdr;
3176
3177 BT_DBG("%s len %d", hdev->name, skb->len);
3178
3179 hdr.handle = cpu_to_le16(conn->handle);
3180 hdr.dlen = skb->len;
3181
3182 skb_push(skb, HCI_SCO_HDR_SIZE);
3183 skb_reset_transport_header(skb);
3184 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3185
3186 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3187
3188 skb_queue_tail(&conn->data_q, skb);
3189 queue_work(hdev->workqueue, &hdev->tx_work);
3190 }
3191
3192
3193 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3194 {
3195 struct hci_iso_hdr *hdr;
3196 int len = skb->len;
3197
3198 skb_push(skb, HCI_ISO_HDR_SIZE);
3199 skb_reset_transport_header(skb);
3200 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3201 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3202 hdr->dlen = cpu_to_le16(len);
3203 }
3204
3205 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3206 struct sk_buff *skb)
3207 {
3208 struct hci_dev *hdev = conn->hdev;
3209 struct sk_buff *list;
3210 __u16 flags;
3211
3212 skb->len = skb_headlen(skb);
3213 skb->data_len = 0;
3214
3215 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3216
3217 list = skb_shinfo(skb)->frag_list;
3218
3219 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3220 hci_add_iso_hdr(skb, conn->handle, flags);
3221
3222 if (!list) {
3223
3224 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3225
3226 skb_queue_tail(queue, skb);
3227 } else {
3228
3229 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3230
3231 skb_shinfo(skb)->frag_list = NULL;
3232
3233 __skb_queue_tail(queue, skb);
3234
3235 do {
3236 skb = list; list = list->next;
3237
3238 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3239 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3240 0x00);
3241 hci_add_iso_hdr(skb, conn->handle, flags);
3242
3243 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3244
3245 __skb_queue_tail(queue, skb);
3246 } while (list);
3247 }
3248 }
3249
3250 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3251 {
3252 struct hci_dev *hdev = conn->hdev;
3253
3254 BT_DBG("%s len %d", hdev->name, skb->len);
3255
3256 hci_queue_iso(conn, &conn->data_q, skb);
3257
3258 queue_work(hdev->workqueue, &hdev->tx_work);
3259 }
3260
3261
3262
3263
3264 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3265 {
3266 struct hci_dev *hdev;
3267 int cnt, q;
3268
3269 if (!conn) {
3270 *quote = 0;
3271 return;
3272 }
3273
3274 hdev = conn->hdev;
3275
3276 switch (conn->type) {
3277 case ACL_LINK:
3278 cnt = hdev->acl_cnt;
3279 break;
3280 case AMP_LINK:
3281 cnt = hdev->block_cnt;
3282 break;
3283 case SCO_LINK:
3284 case ESCO_LINK:
3285 cnt = hdev->sco_cnt;
3286 break;
3287 case LE_LINK:
3288 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3289 break;
3290 case ISO_LINK:
3291 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3292 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3293 break;
3294 default:
3295 cnt = 0;
3296 bt_dev_err(hdev, "unknown link type %d", conn->type);
3297 }
3298
3299 q = cnt / num;
3300 *quote = q ? q : 1;
3301 }
3302
3303 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3304 int *quote)
3305 {
3306 struct hci_conn_hash *h = &hdev->conn_hash;
3307 struct hci_conn *conn = NULL, *c;
3308 unsigned int num = 0, min = ~0;
3309
3310
3311
3312
3313 rcu_read_lock();
3314
3315 list_for_each_entry_rcu(c, &h->list, list) {
3316 if (c->type != type || skb_queue_empty(&c->data_q))
3317 continue;
3318
3319 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3320 continue;
3321
3322 num++;
3323
3324 if (c->sent < min) {
3325 min = c->sent;
3326 conn = c;
3327 }
3328
3329 if (hci_conn_num(hdev, type) == num)
3330 break;
3331 }
3332
3333 rcu_read_unlock();
3334
3335 hci_quote_sent(conn, num, quote);
3336
3337 BT_DBG("conn %p quote %d", conn, *quote);
3338 return conn;
3339 }
3340
3341 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3342 {
3343 struct hci_conn_hash *h = &hdev->conn_hash;
3344 struct hci_conn *c;
3345
3346 bt_dev_err(hdev, "link tx timeout");
3347
3348 rcu_read_lock();
3349
3350
3351 list_for_each_entry_rcu(c, &h->list, list) {
3352 if (c->type == type && c->sent) {
3353 bt_dev_err(hdev, "killing stalled connection %pMR",
3354 &c->dst);
3355 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3356 }
3357 }
3358
3359 rcu_read_unlock();
3360 }
3361
3362 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3363 int *quote)
3364 {
3365 struct hci_conn_hash *h = &hdev->conn_hash;
3366 struct hci_chan *chan = NULL;
3367 unsigned int num = 0, min = ~0, cur_prio = 0;
3368 struct hci_conn *conn;
3369 int conn_num = 0;
3370
3371 BT_DBG("%s", hdev->name);
3372
3373 rcu_read_lock();
3374
3375 list_for_each_entry_rcu(conn, &h->list, list) {
3376 struct hci_chan *tmp;
3377
3378 if (conn->type != type)
3379 continue;
3380
3381 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3382 continue;
3383
3384 conn_num++;
3385
3386 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3387 struct sk_buff *skb;
3388
3389 if (skb_queue_empty(&tmp->data_q))
3390 continue;
3391
3392 skb = skb_peek(&tmp->data_q);
3393 if (skb->priority < cur_prio)
3394 continue;
3395
3396 if (skb->priority > cur_prio) {
3397 num = 0;
3398 min = ~0;
3399 cur_prio = skb->priority;
3400 }
3401
3402 num++;
3403
3404 if (conn->sent < min) {
3405 min = conn->sent;
3406 chan = tmp;
3407 }
3408 }
3409
3410 if (hci_conn_num(hdev, type) == conn_num)
3411 break;
3412 }
3413
3414 rcu_read_unlock();
3415
3416 if (!chan)
3417 return NULL;
3418
3419 hci_quote_sent(chan->conn, num, quote);
3420
3421 BT_DBG("chan %p quote %d", chan, *quote);
3422 return chan;
3423 }
3424
3425 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3426 {
3427 struct hci_conn_hash *h = &hdev->conn_hash;
3428 struct hci_conn *conn;
3429 int num = 0;
3430
3431 BT_DBG("%s", hdev->name);
3432
3433 rcu_read_lock();
3434
3435 list_for_each_entry_rcu(conn, &h->list, list) {
3436 struct hci_chan *chan;
3437
3438 if (conn->type != type)
3439 continue;
3440
3441 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3442 continue;
3443
3444 num++;
3445
3446 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3447 struct sk_buff *skb;
3448
3449 if (chan->sent) {
3450 chan->sent = 0;
3451 continue;
3452 }
3453
3454 if (skb_queue_empty(&chan->data_q))
3455 continue;
3456
3457 skb = skb_peek(&chan->data_q);
3458 if (skb->priority >= HCI_PRIO_MAX - 1)
3459 continue;
3460
3461 skb->priority = HCI_PRIO_MAX - 1;
3462
3463 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3464 skb->priority);
3465 }
3466
3467 if (hci_conn_num(hdev, type) == num)
3468 break;
3469 }
3470
3471 rcu_read_unlock();
3472
3473 }
3474
3475 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3476 {
3477
3478 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3479 }
3480
3481 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3482 {
3483 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3484
3485
3486 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3487 HCI_ACL_TX_TIMEOUT))
3488 hci_link_tx_to(hdev, ACL_LINK);
3489 }
3490 }
3491
3492
3493 static void hci_sched_sco(struct hci_dev *hdev)
3494 {
3495 struct hci_conn *conn;
3496 struct sk_buff *skb;
3497 int quote;
3498
3499 BT_DBG("%s", hdev->name);
3500
3501 if (!hci_conn_num(hdev, SCO_LINK))
3502 return;
3503
3504 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3505 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3506 BT_DBG("skb %p len %d", skb, skb->len);
3507 hci_send_frame(hdev, skb);
3508
3509 conn->sent++;
3510 if (conn->sent == ~0)
3511 conn->sent = 0;
3512 }
3513 }
3514 }
3515
3516 static void hci_sched_esco(struct hci_dev *hdev)
3517 {
3518 struct hci_conn *conn;
3519 struct sk_buff *skb;
3520 int quote;
3521
3522 BT_DBG("%s", hdev->name);
3523
3524 if (!hci_conn_num(hdev, ESCO_LINK))
3525 return;
3526
3527 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3528 "e))) {
3529 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3530 BT_DBG("skb %p len %d", skb, skb->len);
3531 hci_send_frame(hdev, skb);
3532
3533 conn->sent++;
3534 if (conn->sent == ~0)
3535 conn->sent = 0;
3536 }
3537 }
3538 }
3539
3540 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3541 {
3542 unsigned int cnt = hdev->acl_cnt;
3543 struct hci_chan *chan;
3544 struct sk_buff *skb;
3545 int quote;
3546
3547 __check_timeout(hdev, cnt);
3548
3549 while (hdev->acl_cnt &&
3550 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3551 u32 priority = (skb_peek(&chan->data_q))->priority;
3552 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3553 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3554 skb->len, skb->priority);
3555
3556
3557 if (skb->priority < priority)
3558 break;
3559
3560 skb = skb_dequeue(&chan->data_q);
3561
3562 hci_conn_enter_active_mode(chan->conn,
3563 bt_cb(skb)->force_active);
3564
3565 hci_send_frame(hdev, skb);
3566 hdev->acl_last_tx = jiffies;
3567
3568 hdev->acl_cnt--;
3569 chan->sent++;
3570 chan->conn->sent++;
3571
3572
3573 hci_sched_sco(hdev);
3574 hci_sched_esco(hdev);
3575 }
3576 }
3577
3578 if (cnt != hdev->acl_cnt)
3579 hci_prio_recalculate(hdev, ACL_LINK);
3580 }
3581
3582 static void hci_sched_acl_blk(struct hci_dev *hdev)
3583 {
3584 unsigned int cnt = hdev->block_cnt;
3585 struct hci_chan *chan;
3586 struct sk_buff *skb;
3587 int quote;
3588 u8 type;
3589
3590 __check_timeout(hdev, cnt);
3591
3592 BT_DBG("%s", hdev->name);
3593
3594 if (hdev->dev_type == HCI_AMP)
3595 type = AMP_LINK;
3596 else
3597 type = ACL_LINK;
3598
3599 while (hdev->block_cnt > 0 &&
3600 (chan = hci_chan_sent(hdev, type, "e))) {
3601 u32 priority = (skb_peek(&chan->data_q))->priority;
3602 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3603 int blocks;
3604
3605 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3606 skb->len, skb->priority);
3607
3608
3609 if (skb->priority < priority)
3610 break;
3611
3612 skb = skb_dequeue(&chan->data_q);
3613
3614 blocks = __get_blocks(hdev, skb);
3615 if (blocks > hdev->block_cnt)
3616 return;
3617
3618 hci_conn_enter_active_mode(chan->conn,
3619 bt_cb(skb)->force_active);
3620
3621 hci_send_frame(hdev, skb);
3622 hdev->acl_last_tx = jiffies;
3623
3624 hdev->block_cnt -= blocks;
3625 quote -= blocks;
3626
3627 chan->sent += blocks;
3628 chan->conn->sent += blocks;
3629 }
3630 }
3631
3632 if (cnt != hdev->block_cnt)
3633 hci_prio_recalculate(hdev, type);
3634 }
3635
3636 static void hci_sched_acl(struct hci_dev *hdev)
3637 {
3638 BT_DBG("%s", hdev->name);
3639
3640
3641 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3642 return;
3643
3644
3645 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3646 return;
3647
3648 switch (hdev->flow_ctl_mode) {
3649 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3650 hci_sched_acl_pkt(hdev);
3651 break;
3652
3653 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3654 hci_sched_acl_blk(hdev);
3655 break;
3656 }
3657 }
3658
3659 static void hci_sched_le(struct hci_dev *hdev)
3660 {
3661 struct hci_chan *chan;
3662 struct sk_buff *skb;
3663 int quote, cnt, tmp;
3664
3665 BT_DBG("%s", hdev->name);
3666
3667 if (!hci_conn_num(hdev, LE_LINK))
3668 return;
3669
3670 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3671
3672 __check_timeout(hdev, cnt);
3673
3674 tmp = cnt;
3675 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3676 u32 priority = (skb_peek(&chan->data_q))->priority;
3677 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3678 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3679 skb->len, skb->priority);
3680
3681
3682 if (skb->priority < priority)
3683 break;
3684
3685 skb = skb_dequeue(&chan->data_q);
3686
3687 hci_send_frame(hdev, skb);
3688 hdev->le_last_tx = jiffies;
3689
3690 cnt--;
3691 chan->sent++;
3692 chan->conn->sent++;
3693
3694
3695 hci_sched_sco(hdev);
3696 hci_sched_esco(hdev);
3697 }
3698 }
3699
3700 if (hdev->le_pkts)
3701 hdev->le_cnt = cnt;
3702 else
3703 hdev->acl_cnt = cnt;
3704
3705 if (cnt != tmp)
3706 hci_prio_recalculate(hdev, LE_LINK);
3707 }
3708
3709
3710 static void hci_sched_iso(struct hci_dev *hdev)
3711 {
3712 struct hci_conn *conn;
3713 struct sk_buff *skb;
3714 int quote, *cnt;
3715
3716 BT_DBG("%s", hdev->name);
3717
3718 if (!hci_conn_num(hdev, ISO_LINK))
3719 return;
3720
3721 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3722 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3723 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3724 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3725 BT_DBG("skb %p len %d", skb, skb->len);
3726 hci_send_frame(hdev, skb);
3727
3728 conn->sent++;
3729 if (conn->sent == ~0)
3730 conn->sent = 0;
3731 (*cnt)--;
3732 }
3733 }
3734 }
3735
3736 static void hci_tx_work(struct work_struct *work)
3737 {
3738 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3739 struct sk_buff *skb;
3740
3741 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3742 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3743
3744 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3745
3746 hci_sched_sco(hdev);
3747 hci_sched_esco(hdev);
3748 hci_sched_iso(hdev);
3749 hci_sched_acl(hdev);
3750 hci_sched_le(hdev);
3751 }
3752
3753
3754 while ((skb = skb_dequeue(&hdev->raw_q)))
3755 hci_send_frame(hdev, skb);
3756 }
3757
3758
3759
3760
3761 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3762 {
3763 struct hci_acl_hdr *hdr = (void *) skb->data;
3764 struct hci_conn *conn;
3765 __u16 handle, flags;
3766
3767 skb_pull(skb, HCI_ACL_HDR_SIZE);
3768
3769 handle = __le16_to_cpu(hdr->handle);
3770 flags = hci_flags(handle);
3771 handle = hci_handle(handle);
3772
3773 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3774 handle, flags);
3775
3776 hdev->stat.acl_rx++;
3777
3778 hci_dev_lock(hdev);
3779 conn = hci_conn_hash_lookup_handle(hdev, handle);
3780 hci_dev_unlock(hdev);
3781
3782 if (conn) {
3783 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3784
3785
3786 l2cap_recv_acldata(conn, skb, flags);
3787 return;
3788 } else {
3789 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3790 handle);
3791 }
3792
3793 kfree_skb(skb);
3794 }
3795
3796
3797 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3798 {
3799 struct hci_sco_hdr *hdr = (void *) skb->data;
3800 struct hci_conn *conn;
3801 __u16 handle, flags;
3802
3803 skb_pull(skb, HCI_SCO_HDR_SIZE);
3804
3805 handle = __le16_to_cpu(hdr->handle);
3806 flags = hci_flags(handle);
3807 handle = hci_handle(handle);
3808
3809 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3810 handle, flags);
3811
3812 hdev->stat.sco_rx++;
3813
3814 hci_dev_lock(hdev);
3815 conn = hci_conn_hash_lookup_handle(hdev, handle);
3816 hci_dev_unlock(hdev);
3817
3818 if (conn) {
3819
3820 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3821 sco_recv_scodata(conn, skb);
3822 return;
3823 } else {
3824 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3825 handle);
3826 }
3827
3828 kfree_skb(skb);
3829 }
3830
3831 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3832 {
3833 struct hci_iso_hdr *hdr;
3834 struct hci_conn *conn;
3835 __u16 handle, flags;
3836
3837 hdr = skb_pull_data(skb, sizeof(*hdr));
3838 if (!hdr) {
3839 bt_dev_err(hdev, "ISO packet too small");
3840 goto drop;
3841 }
3842
3843 handle = __le16_to_cpu(hdr->handle);
3844 flags = hci_flags(handle);
3845 handle = hci_handle(handle);
3846
3847 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3848 handle, flags);
3849
3850 hci_dev_lock(hdev);
3851 conn = hci_conn_hash_lookup_handle(hdev, handle);
3852 hci_dev_unlock(hdev);
3853
3854 if (!conn) {
3855 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3856 handle);
3857 goto drop;
3858 }
3859
3860
3861 iso_recv(conn, skb, flags);
3862 return;
3863
3864 drop:
3865 kfree_skb(skb);
3866 }
3867
3868 static bool hci_req_is_complete(struct hci_dev *hdev)
3869 {
3870 struct sk_buff *skb;
3871
3872 skb = skb_peek(&hdev->cmd_q);
3873 if (!skb)
3874 return true;
3875
3876 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3877 }
3878
3879 static void hci_resend_last(struct hci_dev *hdev)
3880 {
3881 struct hci_command_hdr *sent;
3882 struct sk_buff *skb;
3883 u16 opcode;
3884
3885 if (!hdev->sent_cmd)
3886 return;
3887
3888 sent = (void *) hdev->sent_cmd->data;
3889 opcode = __le16_to_cpu(sent->opcode);
3890 if (opcode == HCI_OP_RESET)
3891 return;
3892
3893 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3894 if (!skb)
3895 return;
3896
3897 skb_queue_head(&hdev->cmd_q, skb);
3898 queue_work(hdev->workqueue, &hdev->cmd_work);
3899 }
3900
3901 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3902 hci_req_complete_t *req_complete,
3903 hci_req_complete_skb_t *req_complete_skb)
3904 {
3905 struct sk_buff *skb;
3906 unsigned long flags;
3907
3908 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3909
3910
3911
3912
3913 if (!hci_sent_cmd_data(hdev, opcode)) {
3914
3915
3916
3917
3918
3919
3920 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3921 hci_resend_last(hdev);
3922
3923 return;
3924 }
3925
3926
3927 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3928
3929
3930
3931
3932 if (!status && !hci_req_is_complete(hdev))
3933 return;
3934
3935
3936
3937
3938
3939 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3940 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3941 return;
3942 }
3943
3944 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3945 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3946 return;
3947 }
3948
3949
3950 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3951 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3952 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3953 __skb_queue_head(&hdev->cmd_q, skb);
3954 break;
3955 }
3956
3957 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3958 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3959 else
3960 *req_complete = bt_cb(skb)->hci.req_complete;
3961 kfree_skb(skb);
3962 }
3963 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3964 }
3965
3966 static void hci_rx_work(struct work_struct *work)
3967 {
3968 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3969 struct sk_buff *skb;
3970
3971 BT_DBG("%s", hdev->name);
3972
3973
3974
3975
3976
3977
3978 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3979 kcov_remote_start_common(skb_get_kcov_handle(skb));
3980
3981
3982 hci_send_to_monitor(hdev, skb);
3983
3984 if (atomic_read(&hdev->promisc)) {
3985
3986 hci_send_to_sock(hdev, skb);
3987 }
3988
3989
3990
3991
3992
3993
3994
3995 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3996 !test_bit(HCI_INIT, &hdev->flags)) {
3997 kfree_skb(skb);
3998 continue;
3999 }
4000
4001 if (test_bit(HCI_INIT, &hdev->flags)) {
4002
4003 switch (hci_skb_pkt_type(skb)) {
4004 case HCI_ACLDATA_PKT:
4005 case HCI_SCODATA_PKT:
4006 case HCI_ISODATA_PKT:
4007 kfree_skb(skb);
4008 continue;
4009 }
4010 }
4011
4012
4013 switch (hci_skb_pkt_type(skb)) {
4014 case HCI_EVENT_PKT:
4015 BT_DBG("%s Event packet", hdev->name);
4016 hci_event_packet(hdev, skb);
4017 break;
4018
4019 case HCI_ACLDATA_PKT:
4020 BT_DBG("%s ACL data packet", hdev->name);
4021 hci_acldata_packet(hdev, skb);
4022 break;
4023
4024 case HCI_SCODATA_PKT:
4025 BT_DBG("%s SCO data packet", hdev->name);
4026 hci_scodata_packet(hdev, skb);
4027 break;
4028
4029 case HCI_ISODATA_PKT:
4030 BT_DBG("%s ISO data packet", hdev->name);
4031 hci_isodata_packet(hdev, skb);
4032 break;
4033
4034 default:
4035 kfree_skb(skb);
4036 break;
4037 }
4038 }
4039 }
4040
4041 static void hci_cmd_work(struct work_struct *work)
4042 {
4043 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4044 struct sk_buff *skb;
4045
4046 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4047 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4048
4049
4050 if (atomic_read(&hdev->cmd_cnt)) {
4051 skb = skb_dequeue(&hdev->cmd_q);
4052 if (!skb)
4053 return;
4054
4055 kfree_skb(hdev->sent_cmd);
4056
4057 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4058 if (hdev->sent_cmd) {
4059 int res;
4060 if (hci_req_status_pend(hdev))
4061 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4062 atomic_dec(&hdev->cmd_cnt);
4063
4064 res = hci_send_frame(hdev, skb);
4065 if (res < 0)
4066 __hci_cmd_sync_cancel(hdev, -res);
4067
4068 rcu_read_lock();
4069 if (test_bit(HCI_RESET, &hdev->flags) ||
4070 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4071 cancel_delayed_work(&hdev->cmd_timer);
4072 else
4073 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4074 HCI_CMD_TIMEOUT);
4075 rcu_read_unlock();
4076 } else {
4077 skb_queue_head(&hdev->cmd_q, skb);
4078 queue_work(hdev->workqueue, &hdev->cmd_work);
4079 }
4080 }
4081 }