0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/sched/signal.h>
0025
0026 #include <net/bluetooth/bluetooth.h>
0027 #include <net/bluetooth/hci_core.h>
0028 #include <net/bluetooth/mgmt.h>
0029
0030 #include "smp.h"
0031 #include "hci_request.h"
0032 #include "msft.h"
0033 #include "eir.h"
0034
0035 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
0036 {
0037 skb_queue_head_init(&req->cmd_q);
0038 req->hdev = hdev;
0039 req->err = 0;
0040 }
0041
0042 void hci_req_purge(struct hci_request *req)
0043 {
0044 skb_queue_purge(&req->cmd_q);
0045 }
0046
0047 bool hci_req_status_pend(struct hci_dev *hdev)
0048 {
0049 return hdev->req_status == HCI_REQ_PEND;
0050 }
0051
0052 static int req_run(struct hci_request *req, hci_req_complete_t complete,
0053 hci_req_complete_skb_t complete_skb)
0054 {
0055 struct hci_dev *hdev = req->hdev;
0056 struct sk_buff *skb;
0057 unsigned long flags;
0058
0059 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
0060
0061
0062
0063
0064 if (req->err) {
0065 skb_queue_purge(&req->cmd_q);
0066 return req->err;
0067 }
0068
0069
0070 if (skb_queue_empty(&req->cmd_q))
0071 return -ENODATA;
0072
0073 skb = skb_peek_tail(&req->cmd_q);
0074 if (complete) {
0075 bt_cb(skb)->hci.req_complete = complete;
0076 } else if (complete_skb) {
0077 bt_cb(skb)->hci.req_complete_skb = complete_skb;
0078 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
0079 }
0080
0081 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
0082 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
0083 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
0084
0085 queue_work(hdev->workqueue, &hdev->cmd_work);
0086
0087 return 0;
0088 }
0089
0090 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
0091 {
0092 return req_run(req, complete, NULL);
0093 }
0094
0095 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
0096 {
0097 return req_run(req, NULL, complete);
0098 }
0099
0100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
0101 struct sk_buff *skb)
0102 {
0103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
0104
0105 if (hdev->req_status == HCI_REQ_PEND) {
0106 hdev->req_result = result;
0107 hdev->req_status = HCI_REQ_DONE;
0108 if (skb)
0109 hdev->req_skb = skb_get(skb);
0110 wake_up_interruptible(&hdev->req_wait_q);
0111 }
0112 }
0113
0114
0115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
0116 unsigned long opt),
0117 unsigned long opt, u32 timeout, u8 *hci_status)
0118 {
0119 struct hci_request req;
0120 int err = 0;
0121
0122 bt_dev_dbg(hdev, "start");
0123
0124 hci_req_init(&req, hdev);
0125
0126 hdev->req_status = HCI_REQ_PEND;
0127
0128 err = func(&req, opt);
0129 if (err) {
0130 if (hci_status)
0131 *hci_status = HCI_ERROR_UNSPECIFIED;
0132 return err;
0133 }
0134
0135 err = hci_req_run_skb(&req, hci_req_sync_complete);
0136 if (err < 0) {
0137 hdev->req_status = 0;
0138
0139
0140
0141
0142
0143
0144 if (err == -ENODATA) {
0145 if (hci_status)
0146 *hci_status = 0;
0147 return 0;
0148 }
0149
0150 if (hci_status)
0151 *hci_status = HCI_ERROR_UNSPECIFIED;
0152
0153 return err;
0154 }
0155
0156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
0157 hdev->req_status != HCI_REQ_PEND, timeout);
0158
0159 if (err == -ERESTARTSYS)
0160 return -EINTR;
0161
0162 switch (hdev->req_status) {
0163 case HCI_REQ_DONE:
0164 err = -bt_to_errno(hdev->req_result);
0165 if (hci_status)
0166 *hci_status = hdev->req_result;
0167 break;
0168
0169 case HCI_REQ_CANCELED:
0170 err = -hdev->req_result;
0171 if (hci_status)
0172 *hci_status = HCI_ERROR_UNSPECIFIED;
0173 break;
0174
0175 default:
0176 err = -ETIMEDOUT;
0177 if (hci_status)
0178 *hci_status = HCI_ERROR_UNSPECIFIED;
0179 break;
0180 }
0181
0182 kfree_skb(hdev->req_skb);
0183 hdev->req_skb = NULL;
0184 hdev->req_status = hdev->req_result = 0;
0185
0186 bt_dev_dbg(hdev, "end: err %d", err);
0187
0188 return err;
0189 }
0190
0191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
0192 unsigned long opt),
0193 unsigned long opt, u32 timeout, u8 *hci_status)
0194 {
0195 int ret;
0196
0197
0198 hci_req_sync_lock(hdev);
0199
0200
0201
0202
0203 if (test_bit(HCI_UP, &hdev->flags))
0204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
0205 else
0206 ret = -ENETDOWN;
0207 hci_req_sync_unlock(hdev);
0208
0209 return ret;
0210 }
0211
0212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
0213 const void *param)
0214 {
0215 int len = HCI_COMMAND_HDR_SIZE + plen;
0216 struct hci_command_hdr *hdr;
0217 struct sk_buff *skb;
0218
0219 skb = bt_skb_alloc(len, GFP_ATOMIC);
0220 if (!skb)
0221 return NULL;
0222
0223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
0224 hdr->opcode = cpu_to_le16(opcode);
0225 hdr->plen = plen;
0226
0227 if (plen)
0228 skb_put_data(skb, param, plen);
0229
0230 bt_dev_dbg(hdev, "skb len %d", skb->len);
0231
0232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
0233 hci_skb_opcode(skb) = opcode;
0234
0235 return skb;
0236 }
0237
0238
0239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
0240 const void *param, u8 event)
0241 {
0242 struct hci_dev *hdev = req->hdev;
0243 struct sk_buff *skb;
0244
0245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
0246
0247
0248
0249
0250 if (req->err)
0251 return;
0252
0253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
0254 if (!skb) {
0255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
0256 opcode);
0257 req->err = -ENOMEM;
0258 return;
0259 }
0260
0261 if (skb_queue_empty(&req->cmd_q))
0262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0263
0264 hci_skb_event(skb) = event;
0265
0266 skb_queue_tail(&req->cmd_q, skb);
0267 }
0268
0269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
0270 const void *param)
0271 {
0272 hci_req_add_ev(req, opcode, plen, param, 0);
0273 }
0274
0275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
0276 {
0277 struct hci_dev *hdev = req->hdev;
0278 struct hci_cp_write_page_scan_activity acp;
0279 u8 type;
0280
0281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
0282 return;
0283
0284 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
0285 return;
0286
0287 if (enable) {
0288 type = PAGE_SCAN_TYPE_INTERLACED;
0289
0290
0291 acp.interval = cpu_to_le16(0x0100);
0292 } else {
0293 type = hdev->def_page_scan_type;
0294 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
0295 }
0296
0297 acp.window = cpu_to_le16(hdev->def_page_scan_window);
0298
0299 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
0300 __cpu_to_le16(hdev->page_scan_window) != acp.window)
0301 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
0302 sizeof(acp), &acp);
0303
0304 if (hdev->page_scan_type != type)
0305 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
0306 }
0307
0308 static void start_interleave_scan(struct hci_dev *hdev)
0309 {
0310 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
0311 queue_delayed_work(hdev->req_workqueue,
0312 &hdev->interleave_scan, 0);
0313 }
0314
0315 static bool is_interleave_scanning(struct hci_dev *hdev)
0316 {
0317 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
0318 }
0319
0320 static void cancel_interleave_scan(struct hci_dev *hdev)
0321 {
0322 bt_dev_dbg(hdev, "cancelling interleave scan");
0323
0324 cancel_delayed_work_sync(&hdev->interleave_scan);
0325
0326 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
0327 }
0328
0329
0330
0331
0332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
0333 {
0334
0335
0336
0337
0338
0339
0340
0341 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
0342 !(list_empty(&hdev->pend_le_conns) &&
0343 list_empty(&hdev->pend_le_reports)) &&
0344 hci_get_adv_monitor_offload_ext(hdev) ==
0345 HCI_ADV_MONITOR_EXT_NONE;
0346 bool is_interleaving = is_interleave_scanning(hdev);
0347
0348 if (use_interleaving && !is_interleaving) {
0349 start_interleave_scan(hdev);
0350 bt_dev_dbg(hdev, "starting interleave scan");
0351 return true;
0352 }
0353
0354 if (!use_interleaving && is_interleaving)
0355 cancel_interleave_scan(hdev);
0356
0357 return false;
0358 }
0359
0360 void __hci_req_update_name(struct hci_request *req)
0361 {
0362 struct hci_dev *hdev = req->hdev;
0363 struct hci_cp_write_local_name cp;
0364
0365 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
0366
0367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
0368 }
0369
0370 void __hci_req_update_eir(struct hci_request *req)
0371 {
0372 struct hci_dev *hdev = req->hdev;
0373 struct hci_cp_write_eir cp;
0374
0375 if (!hdev_is_powered(hdev))
0376 return;
0377
0378 if (!lmp_ext_inq_capable(hdev))
0379 return;
0380
0381 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
0382 return;
0383
0384 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
0385 return;
0386
0387 memset(&cp, 0, sizeof(cp));
0388
0389 eir_create(hdev, cp.data);
0390
0391 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
0392 return;
0393
0394 memcpy(hdev->eir, cp.data, sizeof(cp.data));
0395
0396 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
0397 }
0398
0399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
0400 {
0401 struct hci_dev *hdev = req->hdev;
0402
0403 if (hdev->scanning_paused) {
0404 bt_dev_dbg(hdev, "Scanning is paused for suspend");
0405 return;
0406 }
0407
0408 if (use_ext_scan(hdev)) {
0409 struct hci_cp_le_set_ext_scan_enable cp;
0410
0411 memset(&cp, 0, sizeof(cp));
0412 cp.enable = LE_SCAN_DISABLE;
0413 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
0414 &cp);
0415 } else {
0416 struct hci_cp_le_set_scan_enable cp;
0417
0418 memset(&cp, 0, sizeof(cp));
0419 cp.enable = LE_SCAN_DISABLE;
0420 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
0421 }
0422
0423
0424 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
0425 __u8 enable = 0x00;
0426
0427 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
0428 }
0429 }
0430
0431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
0432 u8 bdaddr_type)
0433 {
0434 struct hci_cp_le_del_from_accept_list cp;
0435
0436 cp.bdaddr_type = bdaddr_type;
0437 bacpy(&cp.bdaddr, bdaddr);
0438
0439 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
0440 cp.bdaddr_type);
0441 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
0442
0443 if (use_ll_privacy(req->hdev)) {
0444 struct smp_irk *irk;
0445
0446 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
0447 if (irk) {
0448 struct hci_cp_le_del_from_resolv_list cp;
0449
0450 cp.bdaddr_type = bdaddr_type;
0451 bacpy(&cp.bdaddr, bdaddr);
0452
0453 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
0454 sizeof(cp), &cp);
0455 }
0456 }
0457 }
0458
0459
0460 static int add_to_accept_list(struct hci_request *req,
0461 struct hci_conn_params *params, u8 *num_entries,
0462 bool allow_rpa)
0463 {
0464 struct hci_cp_le_add_to_accept_list cp;
0465 struct hci_dev *hdev = req->hdev;
0466
0467
0468 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
0469 params->addr_type))
0470 return 0;
0471
0472
0473 if (*num_entries >= hdev->le_accept_list_size)
0474 return -1;
0475
0476
0477 if (!allow_rpa &&
0478 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
0479 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
0480 return -1;
0481 }
0482
0483
0484 if (hdev->suspended &&
0485 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
0486 return 0;
0487
0488 *num_entries += 1;
0489 cp.bdaddr_type = params->addr_type;
0490 bacpy(&cp.bdaddr, ¶ms->addr);
0491
0492 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
0493 cp.bdaddr_type);
0494 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
0495
0496 if (use_ll_privacy(hdev)) {
0497 struct smp_irk *irk;
0498
0499 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
0500 params->addr_type);
0501 if (irk) {
0502 struct hci_cp_le_add_to_resolv_list cp;
0503
0504 cp.bdaddr_type = params->addr_type;
0505 bacpy(&cp.bdaddr, ¶ms->addr);
0506 memcpy(cp.peer_irk, irk->val, 16);
0507
0508 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
0509 memcpy(cp.local_irk, hdev->irk, 16);
0510 else
0511 memset(cp.local_irk, 0, 16);
0512
0513 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
0514 sizeof(cp), &cp);
0515 }
0516 }
0517
0518 return 0;
0519 }
0520
0521 static u8 update_accept_list(struct hci_request *req)
0522 {
0523 struct hci_dev *hdev = req->hdev;
0524 struct hci_conn_params *params;
0525 struct bdaddr_list *b;
0526 u8 num_entries = 0;
0527 bool pend_conn, pend_report;
0528
0529
0530
0531
0532
0533 bool allow_rpa = hdev->suspended;
0534
0535 if (use_ll_privacy(hdev))
0536 allow_rpa = true;
0537
0538
0539
0540
0541
0542
0543
0544 list_for_each_entry(b, &hdev->le_accept_list, list) {
0545 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
0546 &b->bdaddr,
0547 b->bdaddr_type);
0548 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
0549 &b->bdaddr,
0550 b->bdaddr_type);
0551
0552
0553
0554
0555 if (!pend_conn && !pend_report) {
0556 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
0557 continue;
0558 }
0559
0560
0561 if (!allow_rpa &&
0562 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
0563 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
0564 return 0x00;
0565 }
0566
0567 num_entries++;
0568 }
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 list_for_each_entry(params, &hdev->pend_le_conns, action) {
0581 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
0582 return 0x00;
0583 }
0584
0585
0586
0587
0588
0589 list_for_each_entry(params, &hdev->pend_le_reports, action) {
0590 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
0591 return 0x00;
0592 }
0593
0594
0595
0596
0597
0598
0599 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
0600 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
0601 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
0602 return 0x00;
0603
0604
0605 return 0x01;
0606 }
0607
0608 static bool scan_use_rpa(struct hci_dev *hdev)
0609 {
0610 return hci_dev_test_flag(hdev, HCI_PRIVACY);
0611 }
0612
0613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
0614 u16 window, u8 own_addr_type, u8 filter_policy,
0615 bool filter_dup, bool addr_resolv)
0616 {
0617 struct hci_dev *hdev = req->hdev;
0618
0619 if (hdev->scanning_paused) {
0620 bt_dev_dbg(hdev, "Scanning is paused for suspend");
0621 return;
0622 }
0623
0624 if (use_ll_privacy(hdev) && addr_resolv) {
0625 u8 enable = 0x01;
0626
0627 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
0628 }
0629
0630
0631
0632
0633 if (use_ext_scan(hdev)) {
0634 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
0635 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
0636 struct hci_cp_le_scan_phy_params *phy_params;
0637 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
0638 u32 plen;
0639
0640 ext_param_cp = (void *)data;
0641 phy_params = (void *)ext_param_cp->data;
0642
0643 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
0644 ext_param_cp->own_addr_type = own_addr_type;
0645 ext_param_cp->filter_policy = filter_policy;
0646
0647 plen = sizeof(*ext_param_cp);
0648
0649 if (scan_1m(hdev) || scan_2m(hdev)) {
0650 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
0651
0652 memset(phy_params, 0, sizeof(*phy_params));
0653 phy_params->type = type;
0654 phy_params->interval = cpu_to_le16(interval);
0655 phy_params->window = cpu_to_le16(window);
0656
0657 plen += sizeof(*phy_params);
0658 phy_params++;
0659 }
0660
0661 if (scan_coded(hdev)) {
0662 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
0663
0664 memset(phy_params, 0, sizeof(*phy_params));
0665 phy_params->type = type;
0666 phy_params->interval = cpu_to_le16(interval);
0667 phy_params->window = cpu_to_le16(window);
0668
0669 plen += sizeof(*phy_params);
0670 phy_params++;
0671 }
0672
0673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
0674 plen, ext_param_cp);
0675
0676 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
0677 ext_enable_cp.enable = LE_SCAN_ENABLE;
0678 ext_enable_cp.filter_dup = filter_dup;
0679
0680 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
0681 sizeof(ext_enable_cp), &ext_enable_cp);
0682 } else {
0683 struct hci_cp_le_set_scan_param param_cp;
0684 struct hci_cp_le_set_scan_enable enable_cp;
0685
0686 memset(¶m_cp, 0, sizeof(param_cp));
0687 param_cp.type = type;
0688 param_cp.interval = cpu_to_le16(interval);
0689 param_cp.window = cpu_to_le16(window);
0690 param_cp.own_address_type = own_addr_type;
0691 param_cp.filter_policy = filter_policy;
0692 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
0693 ¶m_cp);
0694
0695 memset(&enable_cp, 0, sizeof(enable_cp));
0696 enable_cp.enable = LE_SCAN_ENABLE;
0697 enable_cp.filter_dup = filter_dup;
0698 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
0699 &enable_cp);
0700 }
0701 }
0702
0703
0704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
0705 {
0706 struct hci_conn_hash *h = &hdev->conn_hash;
0707 struct hci_conn *c;
0708
0709 rcu_read_lock();
0710
0711 list_for_each_entry_rcu(c, &h->list, list) {
0712 if (c->type == LE_LINK && c->state == BT_CONNECT &&
0713 test_bit(HCI_CONN_SCANNING, &c->flags)) {
0714 rcu_read_unlock();
0715 return true;
0716 }
0717 }
0718
0719 rcu_read_unlock();
0720
0721 return false;
0722 }
0723
0724
0725
0726
0727
0728 void hci_req_add_le_passive_scan(struct hci_request *req)
0729 {
0730 struct hci_dev *hdev = req->hdev;
0731 u8 own_addr_type;
0732 u8 filter_policy;
0733 u16 window, interval;
0734
0735 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
0736
0737 bool addr_resolv = true;
0738
0739 if (hdev->scanning_paused) {
0740 bt_dev_dbg(hdev, "Scanning is paused for suspend");
0741 return;
0742 }
0743
0744
0745
0746
0747
0748
0749
0750 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
0751 &own_addr_type))
0752 return;
0753
0754 if (hdev->enable_advmon_interleave_scan &&
0755 __hci_update_interleaved_scan(hdev))
0756 return;
0757
0758 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
0759
0760
0761
0762
0763 filter_policy = update_accept_list(req);
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0775 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
0776 filter_policy |= 0x02;
0777
0778 if (hdev->suspended) {
0779 window = hdev->le_scan_window_suspend;
0780 interval = hdev->le_scan_int_suspend;
0781 } else if (hci_is_le_conn_scanning(hdev)) {
0782 window = hdev->le_scan_window_connect;
0783 interval = hdev->le_scan_int_connect;
0784 } else if (hci_is_adv_monitoring(hdev)) {
0785 window = hdev->le_scan_window_adv_monitor;
0786 interval = hdev->le_scan_int_adv_monitor;
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
0801 } else {
0802 window = hdev->le_scan_window;
0803 interval = hdev->le_scan_interval;
0804 }
0805
0806 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
0807 filter_policy);
0808 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
0809 own_addr_type, filter_policy, filter_dup,
0810 addr_resolv);
0811 }
0812
0813 static void cancel_adv_timeout(struct hci_dev *hdev)
0814 {
0815 if (hdev->adv_instance_timeout) {
0816 hdev->adv_instance_timeout = 0;
0817 cancel_delayed_work(&hdev->adv_instance_expire);
0818 }
0819 }
0820
0821 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
0822 {
0823 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
0824 }
0825
0826 void __hci_req_disable_advertising(struct hci_request *req)
0827 {
0828 if (ext_adv_capable(req->hdev)) {
0829 __hci_req_disable_ext_adv_instance(req, 0x00);
0830 } else {
0831 u8 enable = 0x00;
0832
0833 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
0834 }
0835 }
0836
0837 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
0838 {
0839
0840 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
0841 return false;
0842
0843
0844 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
0845 return true;
0846
0847
0848
0849
0850 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
0851 hci_dev_test_flag(hdev, HCI_BONDABLE))
0852 return false;
0853
0854
0855
0856
0857 return true;
0858 }
0859
0860 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
0861 {
0862
0863 if (hci_conn_num(hdev, LE_LINK) == 0)
0864 return true;
0865
0866
0867 if (hdev->conn_hash.le_num_peripheral > 0) {
0868
0869
0870 if (!connectable && !(hdev->le_states[2] & 0x10))
0871 return false;
0872
0873
0874
0875
0876 if (connectable && (!(hdev->le_states[4] & 0x40) ||
0877 !(hdev->le_states[2] & 0x20)))
0878 return false;
0879 }
0880
0881
0882 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
0883
0884 if (!connectable && !(hdev->le_states[2] & 0x02))
0885 return false;
0886
0887
0888
0889
0890 if (connectable && (!(hdev->le_states[4] & 0x08) ||
0891 !(hdev->le_states[2] & 0x08)))
0892 return false;
0893 }
0894
0895 return true;
0896 }
0897
0898 void __hci_req_enable_advertising(struct hci_request *req)
0899 {
0900 struct hci_dev *hdev = req->hdev;
0901 struct adv_info *adv;
0902 struct hci_cp_le_set_adv_param cp;
0903 u8 own_addr_type, enable = 0x01;
0904 bool connectable;
0905 u16 adv_min_interval, adv_max_interval;
0906 u32 flags;
0907
0908 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
0909 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
0910
0911
0912
0913
0914 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
0915 mgmt_get_connectable(hdev);
0916
0917 if (!is_advertising_allowed(hdev, connectable))
0918 return;
0919
0920 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
0921 __hci_req_disable_advertising(req);
0922
0923
0924
0925
0926
0927
0928 hci_dev_clear_flag(hdev, HCI_LE_ADV);
0929
0930
0931
0932
0933
0934 if (hci_update_random_address(req, !connectable,
0935 adv_use_rpa(hdev, flags),
0936 &own_addr_type) < 0)
0937 return;
0938
0939 memset(&cp, 0, sizeof(cp));
0940
0941 if (adv) {
0942 adv_min_interval = adv->min_interval;
0943 adv_max_interval = adv->max_interval;
0944 } else {
0945 adv_min_interval = hdev->le_adv_min_interval;
0946 adv_max_interval = hdev->le_adv_max_interval;
0947 }
0948
0949 if (connectable) {
0950 cp.type = LE_ADV_IND;
0951 } else {
0952 if (adv_cur_instance_is_scannable(hdev))
0953 cp.type = LE_ADV_SCAN_IND;
0954 else
0955 cp.type = LE_ADV_NONCONN_IND;
0956
0957 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
0958 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
0959 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
0960 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
0961 }
0962 }
0963
0964 cp.min_interval = cpu_to_le16(adv_min_interval);
0965 cp.max_interval = cpu_to_le16(adv_max_interval);
0966 cp.own_address_type = own_addr_type;
0967 cp.channel_map = hdev->le_adv_channel_map;
0968
0969 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
0970
0971 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
0972 }
0973
0974 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
0975 {
0976 struct hci_dev *hdev = req->hdev;
0977 u8 len;
0978
0979 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
0980 return;
0981
0982 if (ext_adv_capable(hdev)) {
0983 struct {
0984 struct hci_cp_le_set_ext_scan_rsp_data cp;
0985 u8 data[HCI_MAX_EXT_AD_LENGTH];
0986 } pdu;
0987
0988 memset(&pdu, 0, sizeof(pdu));
0989
0990 len = eir_create_scan_rsp(hdev, instance, pdu.data);
0991
0992 if (hdev->scan_rsp_data_len == len &&
0993 !memcmp(pdu.data, hdev->scan_rsp_data, len))
0994 return;
0995
0996 memcpy(hdev->scan_rsp_data, pdu.data, len);
0997 hdev->scan_rsp_data_len = len;
0998
0999 pdu.cp.handle = instance;
1000 pdu.cp.length = len;
1001 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1002 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1003
1004 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1005 sizeof(pdu.cp) + len, &pdu.cp);
1006 } else {
1007 struct hci_cp_le_set_scan_rsp_data cp;
1008
1009 memset(&cp, 0, sizeof(cp));
1010
1011 len = eir_create_scan_rsp(hdev, instance, cp.data);
1012
1013 if (hdev->scan_rsp_data_len == len &&
1014 !memcmp(cp.data, hdev->scan_rsp_data, len))
1015 return;
1016
1017 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1018 hdev->scan_rsp_data_len = len;
1019
1020 cp.length = len;
1021
1022 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1023 }
1024 }
1025
1026 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1027 {
1028 struct hci_dev *hdev = req->hdev;
1029 u8 len;
1030
1031 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1032 return;
1033
1034 if (ext_adv_capable(hdev)) {
1035 struct {
1036 struct hci_cp_le_set_ext_adv_data cp;
1037 u8 data[HCI_MAX_EXT_AD_LENGTH];
1038 } pdu;
1039
1040 memset(&pdu, 0, sizeof(pdu));
1041
1042 len = eir_create_adv_data(hdev, instance, pdu.data);
1043
1044
1045 if (hdev->adv_data_len == len &&
1046 memcmp(pdu.data, hdev->adv_data, len) == 0)
1047 return;
1048
1049 memcpy(hdev->adv_data, pdu.data, len);
1050 hdev->adv_data_len = len;
1051
1052 pdu.cp.length = len;
1053 pdu.cp.handle = instance;
1054 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1055 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1056
1057 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1058 sizeof(pdu.cp) + len, &pdu.cp);
1059 } else {
1060 struct hci_cp_le_set_adv_data cp;
1061
1062 memset(&cp, 0, sizeof(cp));
1063
1064 len = eir_create_adv_data(hdev, instance, cp.data);
1065
1066
1067 if (hdev->adv_data_len == len &&
1068 memcmp(cp.data, hdev->adv_data, len) == 0)
1069 return;
1070
1071 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1072 hdev->adv_data_len = len;
1073
1074 cp.length = len;
1075
1076 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1077 }
1078 }
1079
1080 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1081 {
1082 struct hci_request req;
1083
1084 hci_req_init(&req, hdev);
1085 __hci_req_update_adv_data(&req, instance);
1086
1087 return hci_req_run(&req, NULL);
1088 }
1089
1090 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1091 u16 opcode)
1092 {
1093 BT_DBG("%s status %u", hdev->name, status);
1094 }
1095
1096 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1097 {
1098 struct hci_request req;
1099 __u8 enable = 0x00;
1100
1101 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1102 return;
1103
1104 hci_req_init(&req, hdev);
1105
1106 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1107
1108 hci_req_run(&req, enable_addr_resolution_complete);
1109 }
1110
1111 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1112 {
1113 bt_dev_dbg(hdev, "status %u", status);
1114 }
1115
1116 void hci_req_reenable_advertising(struct hci_dev *hdev)
1117 {
1118 struct hci_request req;
1119
1120 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1121 list_empty(&hdev->adv_instances))
1122 return;
1123
1124 hci_req_init(&req, hdev);
1125
1126 if (hdev->cur_adv_instance) {
1127 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1128 true);
1129 } else {
1130 if (ext_adv_capable(hdev)) {
1131 __hci_req_start_ext_adv(&req, 0x00);
1132 } else {
1133 __hci_req_update_adv_data(&req, 0x00);
1134 __hci_req_update_scan_rsp_data(&req, 0x00);
1135 __hci_req_enable_advertising(&req);
1136 }
1137 }
1138
1139 hci_req_run(&req, adv_enable_complete);
1140 }
1141
1142 static void adv_timeout_expire(struct work_struct *work)
1143 {
1144 struct hci_dev *hdev = container_of(work, struct hci_dev,
1145 adv_instance_expire.work);
1146
1147 struct hci_request req;
1148 u8 instance;
1149
1150 bt_dev_dbg(hdev, "");
1151
1152 hci_dev_lock(hdev);
1153
1154 hdev->adv_instance_timeout = 0;
1155
1156 instance = hdev->cur_adv_instance;
1157 if (instance == 0x00)
1158 goto unlock;
1159
1160 hci_req_init(&req, hdev);
1161
1162 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1163
1164 if (list_empty(&hdev->adv_instances))
1165 __hci_req_disable_advertising(&req);
1166
1167 hci_req_run(&req, NULL);
1168
1169 unlock:
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1174 unsigned long opt)
1175 {
1176 struct hci_dev *hdev = req->hdev;
1177 int ret = 0;
1178
1179 hci_dev_lock(hdev);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1182 hci_req_add_le_scan_disable(req, false);
1183 hci_req_add_le_passive_scan(req);
1184
1185 switch (hdev->interleave_scan_state) {
1186 case INTERLEAVE_SCAN_ALLOWLIST:
1187 bt_dev_dbg(hdev, "next state: allowlist");
1188 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1189 break;
1190 case INTERLEAVE_SCAN_NO_FILTER:
1191 bt_dev_dbg(hdev, "next state: no filter");
1192 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1193 break;
1194 case INTERLEAVE_SCAN_NONE:
1195 BT_ERR("unexpected error");
1196 ret = -1;
1197 }
1198
1199 hci_dev_unlock(hdev);
1200
1201 return ret;
1202 }
1203
1204 static void interleave_scan_work(struct work_struct *work)
1205 {
1206 struct hci_dev *hdev = container_of(work, struct hci_dev,
1207 interleave_scan.work);
1208 u8 status;
1209 unsigned long timeout;
1210
1211 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1212 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1213 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1214 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1215 } else {
1216 bt_dev_err(hdev, "unexpected error");
1217 return;
1218 }
1219
1220 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1221 HCI_CMD_TIMEOUT, &status);
1222
1223
1224 if (is_interleave_scanning(hdev))
1225 queue_delayed_work(hdev->req_workqueue,
1226 &hdev->interleave_scan, timeout);
1227 }
1228
1229 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1230 bool use_rpa, struct adv_info *adv_instance,
1231 u8 *own_addr_type, bdaddr_t *rand_addr)
1232 {
1233 int err;
1234
1235 bacpy(rand_addr, BDADDR_ANY);
1236
1237
1238
1239
1240 if (use_rpa) {
1241
1242
1243
1244 if (use_ll_privacy(hdev))
1245 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1246 else
1247 *own_addr_type = ADDR_LE_DEV_RANDOM;
1248
1249 if (adv_instance) {
1250 if (adv_rpa_valid(adv_instance))
1251 return 0;
1252 } else {
1253 if (rpa_valid(hdev))
1254 return 0;
1255 }
1256
1257 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1258 if (err < 0) {
1259 bt_dev_err(hdev, "failed to generate new RPA");
1260 return err;
1261 }
1262
1263 bacpy(rand_addr, &hdev->rpa);
1264
1265 return 0;
1266 }
1267
1268
1269
1270
1271
1272 if (require_privacy) {
1273 bdaddr_t nrpa;
1274
1275 while (true) {
1276
1277
1278
1279
1280 get_random_bytes(&nrpa, 6);
1281 nrpa.b[5] &= 0x3f;
1282
1283
1284
1285
1286 if (bacmp(&hdev->bdaddr, &nrpa))
1287 break;
1288 }
1289
1290 *own_addr_type = ADDR_LE_DEV_RANDOM;
1291 bacpy(rand_addr, &nrpa);
1292
1293 return 0;
1294 }
1295
1296
1297 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1298
1299 return 0;
1300 }
1301
1302 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1303 {
1304 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1305 }
1306
1307 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1308 {
1309 struct hci_dev *hdev = req->hdev;
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1322 hci_lookup_le_connect(hdev)) {
1323 bt_dev_dbg(hdev, "Deferring random address update");
1324 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1325 return;
1326 }
1327
1328 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1329 }
1330
1331 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1332 {
1333 struct hci_cp_le_set_ext_adv_params cp;
1334 struct hci_dev *hdev = req->hdev;
1335 bool connectable;
1336 u32 flags;
1337 bdaddr_t random_addr;
1338 u8 own_addr_type;
1339 int err;
1340 struct adv_info *adv;
1341 bool secondary_adv, require_privacy;
1342
1343 if (instance > 0) {
1344 adv = hci_find_adv_instance(hdev, instance);
1345 if (!adv)
1346 return -EINVAL;
1347 } else {
1348 adv = NULL;
1349 }
1350
1351 flags = hci_adv_instance_flags(hdev, instance);
1352
1353
1354
1355
1356 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1357 mgmt_get_connectable(hdev);
1358
1359 if (!is_advertising_allowed(hdev, connectable))
1360 return -EPERM;
1361
1362
1363
1364
1365
1366 require_privacy = !connectable;
1367
1368
1369 if (adv && adv->periodic)
1370 require_privacy = false;
1371
1372 err = hci_get_random_address(hdev, require_privacy,
1373 adv_use_rpa(hdev, flags), adv,
1374 &own_addr_type, &random_addr);
1375 if (err < 0)
1376 return err;
1377
1378 memset(&cp, 0, sizeof(cp));
1379
1380 if (adv) {
1381 hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1382 hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1383 cp.tx_power = adv->tx_power;
1384 } else {
1385 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1386 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1387 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1388 }
1389
1390 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1391
1392 if (connectable) {
1393 if (secondary_adv)
1394 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1395 else
1396 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1397 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1398 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1399 if (secondary_adv)
1400 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1401 else
1402 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1403 } else {
1404
1405 if (secondary_adv || (adv && adv->periodic))
1406 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1407 else
1408 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1409 }
1410
1411 cp.own_addr_type = own_addr_type;
1412 cp.channel_map = hdev->le_adv_channel_map;
1413 cp.handle = instance;
1414
1415 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1416 cp.primary_phy = HCI_ADV_PHY_1M;
1417 cp.secondary_phy = HCI_ADV_PHY_2M;
1418 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1419 cp.primary_phy = HCI_ADV_PHY_CODED;
1420 cp.secondary_phy = HCI_ADV_PHY_CODED;
1421 } else {
1422
1423 cp.primary_phy = HCI_ADV_PHY_1M;
1424 cp.secondary_phy = HCI_ADV_PHY_1M;
1425 }
1426
1427 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1428
1429 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1430 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1431 bacmp(&random_addr, BDADDR_ANY)) {
1432 struct hci_cp_le_set_adv_set_rand_addr cp;
1433
1434
1435 if (adv) {
1436 if (!bacmp(&random_addr, &adv->random_addr))
1437 return 0;
1438 } else {
1439 if (!bacmp(&random_addr, &hdev->random_addr))
1440 return 0;
1441
1442
1443
1444
1445
1446
1447 set_random_addr(req, &random_addr);
1448 }
1449
1450 memset(&cp, 0, sizeof(cp));
1451
1452 cp.handle = instance;
1453 bacpy(&cp.bdaddr, &random_addr);
1454
1455 hci_req_add(req,
1456 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1457 sizeof(cp), &cp);
1458 }
1459
1460 return 0;
1461 }
1462
1463 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1464 {
1465 struct hci_dev *hdev = req->hdev;
1466 struct hci_cp_le_set_ext_adv_enable *cp;
1467 struct hci_cp_ext_adv_set *adv_set;
1468 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1469 struct adv_info *adv_instance;
1470
1471 if (instance > 0) {
1472 adv_instance = hci_find_adv_instance(hdev, instance);
1473 if (!adv_instance)
1474 return -EINVAL;
1475 } else {
1476 adv_instance = NULL;
1477 }
1478
1479 cp = (void *) data;
1480 adv_set = (void *) cp->data;
1481
1482 memset(cp, 0, sizeof(*cp));
1483
1484 cp->enable = 0x01;
1485 cp->num_of_sets = 0x01;
1486
1487 memset(adv_set, 0, sizeof(*adv_set));
1488
1489 adv_set->handle = instance;
1490
1491
1492
1493
1494 if (adv_instance && adv_instance->duration) {
1495 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1496
1497
1498 adv_set->duration = cpu_to_le16(duration / 10);
1499 }
1500
1501 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1502 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1503 data);
1504
1505 return 0;
1506 }
1507
1508 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1509 {
1510 struct hci_dev *hdev = req->hdev;
1511 struct hci_cp_le_set_ext_adv_enable *cp;
1512 struct hci_cp_ext_adv_set *adv_set;
1513 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1514 u8 req_size;
1515
1516
1517 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1518 return -EINVAL;
1519
1520 memset(data, 0, sizeof(data));
1521
1522 cp = (void *)data;
1523 adv_set = (void *)cp->data;
1524
1525
1526 cp->num_of_sets = !!instance;
1527 cp->enable = 0x00;
1528
1529 adv_set->handle = instance;
1530
1531 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1532 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1533
1534 return 0;
1535 }
1536
1537 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1538 {
1539 struct hci_dev *hdev = req->hdev;
1540
1541
1542 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1543 return -EINVAL;
1544
1545 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1546
1547 return 0;
1548 }
1549
1550 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1551 {
1552 struct hci_dev *hdev = req->hdev;
1553 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1554 int err;
1555
1556
1557
1558
1559 if (adv_instance && !adv_instance->pending)
1560 __hci_req_disable_ext_adv_instance(req, instance);
1561
1562 err = __hci_req_setup_ext_adv_instance(req, instance);
1563 if (err < 0)
1564 return err;
1565
1566 __hci_req_update_scan_rsp_data(req, instance);
1567 __hci_req_enable_ext_advertising(req, instance);
1568
1569 return 0;
1570 }
1571
1572 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1573 bool force)
1574 {
1575 struct hci_dev *hdev = req->hdev;
1576 struct adv_info *adv_instance = NULL;
1577 u16 timeout;
1578
1579 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1580 list_empty(&hdev->adv_instances))
1581 return -EPERM;
1582
1583 if (hdev->adv_instance_timeout)
1584 return -EBUSY;
1585
1586 adv_instance = hci_find_adv_instance(hdev, instance);
1587 if (!adv_instance)
1588 return -ENOENT;
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (adv_instance->timeout == 0 ||
1599 adv_instance->duration <= adv_instance->remaining_time)
1600 timeout = adv_instance->duration;
1601 else
1602 timeout = adv_instance->remaining_time;
1603
1604
1605
1606
1607 if (adv_instance->timeout)
1608 adv_instance->remaining_time =
1609 adv_instance->remaining_time - timeout;
1610
1611
1612 if (!ext_adv_capable(hdev)) {
1613 hdev->adv_instance_timeout = timeout;
1614 queue_delayed_work(hdev->req_workqueue,
1615 &hdev->adv_instance_expire,
1616 msecs_to_jiffies(timeout * 1000));
1617 }
1618
1619
1620
1621
1622
1623 if (!force && hdev->cur_adv_instance == instance &&
1624 hci_dev_test_flag(hdev, HCI_LE_ADV))
1625 return 0;
1626
1627 hdev->cur_adv_instance = instance;
1628 if (ext_adv_capable(hdev)) {
1629 __hci_req_start_ext_adv(req, instance);
1630 } else {
1631 __hci_req_update_adv_data(req, instance);
1632 __hci_req_update_scan_rsp_data(req, instance);
1633 __hci_req_enable_advertising(req);
1634 }
1635
1636 return 0;
1637 }
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1651 struct hci_request *req, u8 instance,
1652 bool force)
1653 {
1654 struct adv_info *adv_instance, *n, *next_instance = NULL;
1655 int err;
1656 u8 rem_inst;
1657
1658
1659 if (!instance || hdev->cur_adv_instance == instance)
1660 cancel_adv_timeout(hdev);
1661
1662
1663
1664
1665
1666 if (instance && hdev->cur_adv_instance == instance)
1667 next_instance = hci_get_next_instance(hdev, instance);
1668
1669 if (instance == 0x00) {
1670 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1671 list) {
1672 if (!(force || adv_instance->timeout))
1673 continue;
1674
1675 rem_inst = adv_instance->instance;
1676 err = hci_remove_adv_instance(hdev, rem_inst);
1677 if (!err)
1678 mgmt_advertising_removed(sk, hdev, rem_inst);
1679 }
1680 } else {
1681 adv_instance = hci_find_adv_instance(hdev, instance);
1682
1683 if (force || (adv_instance && adv_instance->timeout &&
1684 !adv_instance->remaining_time)) {
1685
1686 if (next_instance &&
1687 next_instance->instance == instance)
1688 next_instance = NULL;
1689
1690 err = hci_remove_adv_instance(hdev, instance);
1691 if (!err)
1692 mgmt_advertising_removed(sk, hdev, instance);
1693 }
1694 }
1695
1696 if (!req || !hdev_is_powered(hdev) ||
1697 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1698 return;
1699
1700 if (next_instance && !ext_adv_capable(hdev))
1701 __hci_req_schedule_adv_instance(req, next_instance->instance,
1702 false);
1703 }
1704
1705 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1706 bool use_rpa, u8 *own_addr_type)
1707 {
1708 struct hci_dev *hdev = req->hdev;
1709 int err;
1710
1711
1712
1713
1714
1715 if (use_rpa) {
1716
1717
1718
1719 if (use_ll_privacy(hdev))
1720 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1721 else
1722 *own_addr_type = ADDR_LE_DEV_RANDOM;
1723
1724 if (rpa_valid(hdev))
1725 return 0;
1726
1727 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1728 if (err < 0) {
1729 bt_dev_err(hdev, "failed to generate new RPA");
1730 return err;
1731 }
1732
1733 set_random_addr(req, &hdev->rpa);
1734
1735 return 0;
1736 }
1737
1738
1739
1740
1741
1742 if (require_privacy) {
1743 bdaddr_t nrpa;
1744
1745 while (true) {
1746
1747
1748
1749
1750 get_random_bytes(&nrpa, 6);
1751 nrpa.b[5] &= 0x3f;
1752
1753
1754
1755
1756 if (bacmp(&hdev->bdaddr, &nrpa))
1757 break;
1758 }
1759
1760 *own_addr_type = ADDR_LE_DEV_RANDOM;
1761 set_random_addr(req, &nrpa);
1762 return 0;
1763 }
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1775 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1776 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1777 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1778 *own_addr_type = ADDR_LE_DEV_RANDOM;
1779 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1780 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1781 &hdev->static_addr);
1782 return 0;
1783 }
1784
1785
1786
1787
1788 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1789
1790 return 0;
1791 }
1792
1793 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1794 {
1795 struct bdaddr_list *b;
1796
1797 list_for_each_entry(b, &hdev->accept_list, list) {
1798 struct hci_conn *conn;
1799
1800 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1801 if (!conn)
1802 return true;
1803
1804 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1805 return true;
1806 }
1807
1808 return false;
1809 }
1810
1811 void __hci_req_update_scan(struct hci_request *req)
1812 {
1813 struct hci_dev *hdev = req->hdev;
1814 u8 scan;
1815
1816 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1817 return;
1818
1819 if (!hdev_is_powered(hdev))
1820 return;
1821
1822 if (mgmt_powering_down(hdev))
1823 return;
1824
1825 if (hdev->scanning_paused)
1826 return;
1827
1828 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1829 disconnected_accept_list_entries(hdev))
1830 scan = SCAN_PAGE;
1831 else
1832 scan = SCAN_DISABLED;
1833
1834 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1835 scan |= SCAN_INQUIRY;
1836
1837 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1838 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1839 return;
1840
1841 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1842 }
1843
1844 static u8 get_service_classes(struct hci_dev *hdev)
1845 {
1846 struct bt_uuid *uuid;
1847 u8 val = 0;
1848
1849 list_for_each_entry(uuid, &hdev->uuids, list)
1850 val |= uuid->svc_hint;
1851
1852 return val;
1853 }
1854
1855 void __hci_req_update_class(struct hci_request *req)
1856 {
1857 struct hci_dev *hdev = req->hdev;
1858 u8 cod[3];
1859
1860 bt_dev_dbg(hdev, "");
1861
1862 if (!hdev_is_powered(hdev))
1863 return;
1864
1865 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1866 return;
1867
1868 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1869 return;
1870
1871 cod[0] = hdev->minor_class;
1872 cod[1] = hdev->major_class;
1873 cod[2] = get_service_classes(hdev);
1874
1875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1876 cod[1] |= 0x20;
1877
1878 if (memcmp(cod, hdev->dev_class, 3) == 0)
1879 return;
1880
1881 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1882 }
1883
1884 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1885 u8 reason)
1886 {
1887 switch (conn->state) {
1888 case BT_CONNECTED:
1889 case BT_CONFIG:
1890 if (conn->type == AMP_LINK) {
1891 struct hci_cp_disconn_phy_link cp;
1892
1893 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1894 cp.reason = reason;
1895 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1896 &cp);
1897 } else {
1898 struct hci_cp_disconnect dc;
1899
1900 dc.handle = cpu_to_le16(conn->handle);
1901 dc.reason = reason;
1902 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1903 }
1904
1905 conn->state = BT_DISCONN;
1906
1907 break;
1908 case BT_CONNECT:
1909 if (conn->type == LE_LINK) {
1910 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1911 break;
1912 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1913 0, NULL);
1914 } else if (conn->type == ACL_LINK) {
1915 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1916 break;
1917 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1918 6, &conn->dst);
1919 }
1920 break;
1921 case BT_CONNECT2:
1922 if (conn->type == ACL_LINK) {
1923 struct hci_cp_reject_conn_req rej;
1924
1925 bacpy(&rej.bdaddr, &conn->dst);
1926 rej.reason = reason;
1927
1928 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1929 sizeof(rej), &rej);
1930 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1931 struct hci_cp_reject_sync_conn_req rej;
1932
1933 bacpy(&rej.bdaddr, &conn->dst);
1934
1935
1936
1937
1938
1939
1940
1941 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1942
1943 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1944 sizeof(rej), &rej);
1945 }
1946 break;
1947 default:
1948 conn->state = BT_CLOSED;
1949 break;
1950 }
1951 }
1952
1953 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1954 {
1955 if (status)
1956 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
1957 }
1958
1959 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1960 {
1961 struct hci_request req;
1962 int err;
1963
1964 hci_req_init(&req, conn->hdev);
1965
1966 __hci_abort_conn(&req, conn, reason);
1967
1968 err = hci_req_run(&req, abort_conn_complete);
1969 if (err && err != -ENODATA) {
1970 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1971 return err;
1972 }
1973
1974 return 0;
1975 }
1976
1977 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1978 {
1979 hci_req_add_le_scan_disable(req, false);
1980 return 0;
1981 }
1982
1983 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1984 {
1985 u8 length = opt;
1986 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1987 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1988 struct hci_cp_inquiry cp;
1989
1990 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
1991 return 0;
1992
1993 bt_dev_dbg(req->hdev, "");
1994
1995 hci_dev_lock(req->hdev);
1996 hci_inquiry_cache_flush(req->hdev);
1997 hci_dev_unlock(req->hdev);
1998
1999 memset(&cp, 0, sizeof(cp));
2000
2001 if (req->hdev->discovery.limited)
2002 memcpy(&cp.lap, liac, sizeof(cp.lap));
2003 else
2004 memcpy(&cp.lap, giac, sizeof(cp.lap));
2005
2006 cp.length = length;
2007
2008 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2009
2010 return 0;
2011 }
2012
2013 static void le_scan_disable_work(struct work_struct *work)
2014 {
2015 struct hci_dev *hdev = container_of(work, struct hci_dev,
2016 le_scan_disable.work);
2017 u8 status;
2018
2019 bt_dev_dbg(hdev, "");
2020
2021 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2022 return;
2023
2024 cancel_delayed_work(&hdev->le_scan_restart);
2025
2026 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2027 if (status) {
2028 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2029 status);
2030 return;
2031 }
2032
2033 hdev->discovery.scan_start = 0;
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 if (hdev->discovery.type == DISCOV_TYPE_LE)
2044 goto discov_stopped;
2045
2046 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2047 return;
2048
2049 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2050 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2051 hdev->discovery.state != DISCOVERY_RESOLVING)
2052 goto discov_stopped;
2053
2054 return;
2055 }
2056
2057 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2058 HCI_CMD_TIMEOUT, &status);
2059 if (status) {
2060 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2061 goto discov_stopped;
2062 }
2063
2064 return;
2065
2066 discov_stopped:
2067 hci_dev_lock(hdev);
2068 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069 hci_dev_unlock(hdev);
2070 }
2071
2072 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2073 {
2074 struct hci_dev *hdev = req->hdev;
2075
2076
2077 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2078 return 0;
2079
2080 if (hdev->scanning_paused) {
2081 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2082 return 0;
2083 }
2084
2085 hci_req_add_le_scan_disable(req, false);
2086
2087 if (use_ext_scan(hdev)) {
2088 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2089
2090 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2091 ext_enable_cp.enable = LE_SCAN_ENABLE;
2092 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2093
2094 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2095 sizeof(ext_enable_cp), &ext_enable_cp);
2096 } else {
2097 struct hci_cp_le_set_scan_enable cp;
2098
2099 memset(&cp, 0, sizeof(cp));
2100 cp.enable = LE_SCAN_ENABLE;
2101 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2102 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2103 }
2104
2105 return 0;
2106 }
2107
2108 static void le_scan_restart_work(struct work_struct *work)
2109 {
2110 struct hci_dev *hdev = container_of(work, struct hci_dev,
2111 le_scan_restart.work);
2112 unsigned long timeout, duration, scan_start, now;
2113 u8 status;
2114
2115 bt_dev_dbg(hdev, "");
2116
2117 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2118 if (status) {
2119 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2120 status);
2121 return;
2122 }
2123
2124 hci_dev_lock(hdev);
2125
2126 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2127 !hdev->discovery.scan_start)
2128 goto unlock;
2129
2130
2131
2132
2133
2134
2135 duration = hdev->discovery.scan_duration;
2136 scan_start = hdev->discovery.scan_start;
2137 now = jiffies;
2138 if (now - scan_start <= duration) {
2139 int elapsed;
2140
2141 if (now >= scan_start)
2142 elapsed = now - scan_start;
2143 else
2144 elapsed = ULONG_MAX - scan_start + now;
2145
2146 timeout = duration - elapsed;
2147 } else {
2148 timeout = 0;
2149 }
2150
2151 queue_delayed_work(hdev->req_workqueue,
2152 &hdev->le_scan_disable, timeout);
2153
2154 unlock:
2155 hci_dev_unlock(hdev);
2156 }
2157
2158 bool hci_req_stop_discovery(struct hci_request *req)
2159 {
2160 struct hci_dev *hdev = req->hdev;
2161 struct discovery_state *d = &hdev->discovery;
2162 struct hci_cp_remote_name_req_cancel cp;
2163 struct inquiry_entry *e;
2164 bool ret = false;
2165
2166 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2167
2168 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2169 if (test_bit(HCI_INQUIRY, &hdev->flags))
2170 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2171
2172 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2173 cancel_delayed_work(&hdev->le_scan_disable);
2174 cancel_delayed_work(&hdev->le_scan_restart);
2175 hci_req_add_le_scan_disable(req, false);
2176 }
2177
2178 ret = true;
2179 } else {
2180
2181 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2182 hci_req_add_le_scan_disable(req, false);
2183 ret = true;
2184 }
2185 }
2186
2187
2188 if (d->type == DISCOV_TYPE_LE)
2189 return ret;
2190
2191 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2192 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2193 NAME_PENDING);
2194 if (!e)
2195 return ret;
2196
2197 bacpy(&cp.bdaddr, &e->data.bdaddr);
2198 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2199 &cp);
2200 ret = true;
2201 }
2202
2203 return ret;
2204 }
2205
2206 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2207 u16 opcode)
2208 {
2209 bt_dev_dbg(hdev, "status %u", status);
2210 }
2211
2212 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2213 {
2214 struct hci_request req;
2215 int err;
2216 __u8 vnd_len, *vnd_data = NULL;
2217 struct hci_op_configure_data_path *cmd = NULL;
2218
2219 hci_req_init(&req, hdev);
2220
2221 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2222 &vnd_data);
2223 if (err < 0)
2224 goto error;
2225
2226 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2227 if (!cmd) {
2228 err = -ENOMEM;
2229 goto error;
2230 }
2231
2232 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2233 if (err < 0)
2234 goto error;
2235
2236 cmd->vnd_len = vnd_len;
2237 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2238
2239 cmd->direction = 0x00;
2240 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2241
2242 cmd->direction = 0x01;
2243 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2244
2245 err = hci_req_run(&req, config_data_path_complete);
2246 error:
2247
2248 kfree(cmd);
2249 kfree(vnd_data);
2250 return err;
2251 }
2252
2253 void hci_request_setup(struct hci_dev *hdev)
2254 {
2255 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2256 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2257 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2258 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2259 }
2260
2261 void hci_request_cancel_all(struct hci_dev *hdev)
2262 {
2263 __hci_cmd_sync_cancel(hdev, ENODEV);
2264
2265 cancel_delayed_work_sync(&hdev->le_scan_disable);
2266 cancel_delayed_work_sync(&hdev->le_scan_restart);
2267
2268 if (hdev->adv_instance_timeout) {
2269 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2270 hdev->adv_instance_timeout = 0;
2271 }
2272
2273 cancel_interleave_scan(hdev);
2274 }