Back to home page

OSCL-LXR

 
 

    


0001 /*
0002    BlueZ - Bluetooth protocol stack for Linux
0003 
0004    Copyright (C) 2014 Intel Corporation
0005 
0006    This program is free software; you can redistribute it and/or modify
0007    it under the terms of the GNU General Public License version 2 as
0008    published by the Free Software Foundation;
0009 
0010    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
0011    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0012    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
0013    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
0014    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
0015    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0016    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0017    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0018 
0019    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
0020    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
0021    SOFTWARE IS DISCLAIMED.
0022 */
0023 
0024 #include <linux/sched/signal.h>
0025 
0026 #include <net/bluetooth/bluetooth.h>
0027 #include <net/bluetooth/hci_core.h>
0028 #include <net/bluetooth/mgmt.h>
0029 
0030 #include "smp.h"
0031 #include "hci_request.h"
0032 #include "msft.h"
0033 #include "eir.h"
0034 
0035 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
0036 {
0037     skb_queue_head_init(&req->cmd_q);
0038     req->hdev = hdev;
0039     req->err = 0;
0040 }
0041 
0042 void hci_req_purge(struct hci_request *req)
0043 {
0044     skb_queue_purge(&req->cmd_q);
0045 }
0046 
0047 bool hci_req_status_pend(struct hci_dev *hdev)
0048 {
0049     return hdev->req_status == HCI_REQ_PEND;
0050 }
0051 
0052 static int req_run(struct hci_request *req, hci_req_complete_t complete,
0053            hci_req_complete_skb_t complete_skb)
0054 {
0055     struct hci_dev *hdev = req->hdev;
0056     struct sk_buff *skb;
0057     unsigned long flags;
0058 
0059     bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
0060 
0061     /* If an error occurred during request building, remove all HCI
0062      * commands queued on the HCI request queue.
0063      */
0064     if (req->err) {
0065         skb_queue_purge(&req->cmd_q);
0066         return req->err;
0067     }
0068 
0069     /* Do not allow empty requests */
0070     if (skb_queue_empty(&req->cmd_q))
0071         return -ENODATA;
0072 
0073     skb = skb_peek_tail(&req->cmd_q);
0074     if (complete) {
0075         bt_cb(skb)->hci.req_complete = complete;
0076     } else if (complete_skb) {
0077         bt_cb(skb)->hci.req_complete_skb = complete_skb;
0078         bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
0079     }
0080 
0081     spin_lock_irqsave(&hdev->cmd_q.lock, flags);
0082     skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
0083     spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
0084 
0085     queue_work(hdev->workqueue, &hdev->cmd_work);
0086 
0087     return 0;
0088 }
0089 
0090 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
0091 {
0092     return req_run(req, complete, NULL);
0093 }
0094 
0095 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
0096 {
0097     return req_run(req, NULL, complete);
0098 }
0099 
0100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
0101                struct sk_buff *skb)
0102 {
0103     bt_dev_dbg(hdev, "result 0x%2.2x", result);
0104 
0105     if (hdev->req_status == HCI_REQ_PEND) {
0106         hdev->req_result = result;
0107         hdev->req_status = HCI_REQ_DONE;
0108         if (skb)
0109             hdev->req_skb = skb_get(skb);
0110         wake_up_interruptible(&hdev->req_wait_q);
0111     }
0112 }
0113 
0114 /* Execute request and wait for completion. */
0115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
0116                              unsigned long opt),
0117            unsigned long opt, u32 timeout, u8 *hci_status)
0118 {
0119     struct hci_request req;
0120     int err = 0;
0121 
0122     bt_dev_dbg(hdev, "start");
0123 
0124     hci_req_init(&req, hdev);
0125 
0126     hdev->req_status = HCI_REQ_PEND;
0127 
0128     err = func(&req, opt);
0129     if (err) {
0130         if (hci_status)
0131             *hci_status = HCI_ERROR_UNSPECIFIED;
0132         return err;
0133     }
0134 
0135     err = hci_req_run_skb(&req, hci_req_sync_complete);
0136     if (err < 0) {
0137         hdev->req_status = 0;
0138 
0139         /* ENODATA means the HCI request command queue is empty.
0140          * This can happen when a request with conditionals doesn't
0141          * trigger any commands to be sent. This is normal behavior
0142          * and should not trigger an error return.
0143          */
0144         if (err == -ENODATA) {
0145             if (hci_status)
0146                 *hci_status = 0;
0147             return 0;
0148         }
0149 
0150         if (hci_status)
0151             *hci_status = HCI_ERROR_UNSPECIFIED;
0152 
0153         return err;
0154     }
0155 
0156     err = wait_event_interruptible_timeout(hdev->req_wait_q,
0157             hdev->req_status != HCI_REQ_PEND, timeout);
0158 
0159     if (err == -ERESTARTSYS)
0160         return -EINTR;
0161 
0162     switch (hdev->req_status) {
0163     case HCI_REQ_DONE:
0164         err = -bt_to_errno(hdev->req_result);
0165         if (hci_status)
0166             *hci_status = hdev->req_result;
0167         break;
0168 
0169     case HCI_REQ_CANCELED:
0170         err = -hdev->req_result;
0171         if (hci_status)
0172             *hci_status = HCI_ERROR_UNSPECIFIED;
0173         break;
0174 
0175     default:
0176         err = -ETIMEDOUT;
0177         if (hci_status)
0178             *hci_status = HCI_ERROR_UNSPECIFIED;
0179         break;
0180     }
0181 
0182     kfree_skb(hdev->req_skb);
0183     hdev->req_skb = NULL;
0184     hdev->req_status = hdev->req_result = 0;
0185 
0186     bt_dev_dbg(hdev, "end: err %d", err);
0187 
0188     return err;
0189 }
0190 
0191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
0192                           unsigned long opt),
0193          unsigned long opt, u32 timeout, u8 *hci_status)
0194 {
0195     int ret;
0196 
0197     /* Serialize all requests */
0198     hci_req_sync_lock(hdev);
0199     /* check the state after obtaing the lock to protect the HCI_UP
0200      * against any races from hci_dev_do_close when the controller
0201      * gets removed.
0202      */
0203     if (test_bit(HCI_UP, &hdev->flags))
0204         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
0205     else
0206         ret = -ENETDOWN;
0207     hci_req_sync_unlock(hdev);
0208 
0209     return ret;
0210 }
0211 
0212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
0213                 const void *param)
0214 {
0215     int len = HCI_COMMAND_HDR_SIZE + plen;
0216     struct hci_command_hdr *hdr;
0217     struct sk_buff *skb;
0218 
0219     skb = bt_skb_alloc(len, GFP_ATOMIC);
0220     if (!skb)
0221         return NULL;
0222 
0223     hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
0224     hdr->opcode = cpu_to_le16(opcode);
0225     hdr->plen   = plen;
0226 
0227     if (plen)
0228         skb_put_data(skb, param, plen);
0229 
0230     bt_dev_dbg(hdev, "skb len %d", skb->len);
0231 
0232     hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
0233     hci_skb_opcode(skb) = opcode;
0234 
0235     return skb;
0236 }
0237 
0238 /* Queue a command to an asynchronous HCI request */
0239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
0240             const void *param, u8 event)
0241 {
0242     struct hci_dev *hdev = req->hdev;
0243     struct sk_buff *skb;
0244 
0245     bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
0246 
0247     /* If an error occurred during request building, there is no point in
0248      * queueing the HCI command. We can simply return.
0249      */
0250     if (req->err)
0251         return;
0252 
0253     skb = hci_prepare_cmd(hdev, opcode, plen, param);
0254     if (!skb) {
0255         bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
0256                opcode);
0257         req->err = -ENOMEM;
0258         return;
0259     }
0260 
0261     if (skb_queue_empty(&req->cmd_q))
0262         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0263 
0264     hci_skb_event(skb) = event;
0265 
0266     skb_queue_tail(&req->cmd_q, skb);
0267 }
0268 
0269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
0270          const void *param)
0271 {
0272     hci_req_add_ev(req, opcode, plen, param, 0);
0273 }
0274 
0275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
0276 {
0277     struct hci_dev *hdev = req->hdev;
0278     struct hci_cp_write_page_scan_activity acp;
0279     u8 type;
0280 
0281     if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
0282         return;
0283 
0284     if (hdev->hci_ver < BLUETOOTH_VER_1_2)
0285         return;
0286 
0287     if (enable) {
0288         type = PAGE_SCAN_TYPE_INTERLACED;
0289 
0290         /* 160 msec page scan interval */
0291         acp.interval = cpu_to_le16(0x0100);
0292     } else {
0293         type = hdev->def_page_scan_type;
0294         acp.interval = cpu_to_le16(hdev->def_page_scan_int);
0295     }
0296 
0297     acp.window = cpu_to_le16(hdev->def_page_scan_window);
0298 
0299     if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
0300         __cpu_to_le16(hdev->page_scan_window) != acp.window)
0301         hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
0302                 sizeof(acp), &acp);
0303 
0304     if (hdev->page_scan_type != type)
0305         hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
0306 }
0307 
0308 static void start_interleave_scan(struct hci_dev *hdev)
0309 {
0310     hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
0311     queue_delayed_work(hdev->req_workqueue,
0312                &hdev->interleave_scan, 0);
0313 }
0314 
0315 static bool is_interleave_scanning(struct hci_dev *hdev)
0316 {
0317     return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
0318 }
0319 
0320 static void cancel_interleave_scan(struct hci_dev *hdev)
0321 {
0322     bt_dev_dbg(hdev, "cancelling interleave scan");
0323 
0324     cancel_delayed_work_sync(&hdev->interleave_scan);
0325 
0326     hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
0327 }
0328 
0329 /* Return true if interleave_scan wasn't started until exiting this function,
0330  * otherwise, return false
0331  */
0332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
0333 {
0334     /* Do interleaved scan only if all of the following are true:
0335      * - There is at least one ADV monitor
0336      * - At least one pending LE connection or one device to be scanned for
0337      * - Monitor offloading is not supported
0338      * If so, we should alternate between allowlist scan and one without
0339      * any filters to save power.
0340      */
0341     bool use_interleaving = hci_is_adv_monitoring(hdev) &&
0342                 !(list_empty(&hdev->pend_le_conns) &&
0343                   list_empty(&hdev->pend_le_reports)) &&
0344                 hci_get_adv_monitor_offload_ext(hdev) ==
0345                     HCI_ADV_MONITOR_EXT_NONE;
0346     bool is_interleaving = is_interleave_scanning(hdev);
0347 
0348     if (use_interleaving && !is_interleaving) {
0349         start_interleave_scan(hdev);
0350         bt_dev_dbg(hdev, "starting interleave scan");
0351         return true;
0352     }
0353 
0354     if (!use_interleaving && is_interleaving)
0355         cancel_interleave_scan(hdev);
0356 
0357     return false;
0358 }
0359 
0360 void __hci_req_update_name(struct hci_request *req)
0361 {
0362     struct hci_dev *hdev = req->hdev;
0363     struct hci_cp_write_local_name cp;
0364 
0365     memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
0366 
0367     hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
0368 }
0369 
0370 void __hci_req_update_eir(struct hci_request *req)
0371 {
0372     struct hci_dev *hdev = req->hdev;
0373     struct hci_cp_write_eir cp;
0374 
0375     if (!hdev_is_powered(hdev))
0376         return;
0377 
0378     if (!lmp_ext_inq_capable(hdev))
0379         return;
0380 
0381     if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
0382         return;
0383 
0384     if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
0385         return;
0386 
0387     memset(&cp, 0, sizeof(cp));
0388 
0389     eir_create(hdev, cp.data);
0390 
0391     if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
0392         return;
0393 
0394     memcpy(hdev->eir, cp.data, sizeof(cp.data));
0395 
0396     hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
0397 }
0398 
0399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
0400 {
0401     struct hci_dev *hdev = req->hdev;
0402 
0403     if (hdev->scanning_paused) {
0404         bt_dev_dbg(hdev, "Scanning is paused for suspend");
0405         return;
0406     }
0407 
0408     if (use_ext_scan(hdev)) {
0409         struct hci_cp_le_set_ext_scan_enable cp;
0410 
0411         memset(&cp, 0, sizeof(cp));
0412         cp.enable = LE_SCAN_DISABLE;
0413         hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
0414                 &cp);
0415     } else {
0416         struct hci_cp_le_set_scan_enable cp;
0417 
0418         memset(&cp, 0, sizeof(cp));
0419         cp.enable = LE_SCAN_DISABLE;
0420         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
0421     }
0422 
0423     /* Disable address resolution */
0424     if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
0425         __u8 enable = 0x00;
0426 
0427         hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
0428     }
0429 }
0430 
0431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
0432                  u8 bdaddr_type)
0433 {
0434     struct hci_cp_le_del_from_accept_list cp;
0435 
0436     cp.bdaddr_type = bdaddr_type;
0437     bacpy(&cp.bdaddr, bdaddr);
0438 
0439     bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
0440            cp.bdaddr_type);
0441     hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
0442 
0443     if (use_ll_privacy(req->hdev)) {
0444         struct smp_irk *irk;
0445 
0446         irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
0447         if (irk) {
0448             struct hci_cp_le_del_from_resolv_list cp;
0449 
0450             cp.bdaddr_type = bdaddr_type;
0451             bacpy(&cp.bdaddr, bdaddr);
0452 
0453             hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
0454                     sizeof(cp), &cp);
0455         }
0456     }
0457 }
0458 
0459 /* Adds connection to accept list if needed. On error, returns -1. */
0460 static int add_to_accept_list(struct hci_request *req,
0461                   struct hci_conn_params *params, u8 *num_entries,
0462                   bool allow_rpa)
0463 {
0464     struct hci_cp_le_add_to_accept_list cp;
0465     struct hci_dev *hdev = req->hdev;
0466 
0467     /* Already in accept list */
0468     if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
0469                    params->addr_type))
0470         return 0;
0471 
0472     /* Select filter policy to accept all advertising */
0473     if (*num_entries >= hdev->le_accept_list_size)
0474         return -1;
0475 
0476     /* Accept list can not be used with RPAs */
0477     if (!allow_rpa &&
0478         !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
0479         hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
0480         return -1;
0481     }
0482 
0483     /* During suspend, only wakeable devices can be in accept list */
0484     if (hdev->suspended &&
0485         !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
0486         return 0;
0487 
0488     *num_entries += 1;
0489     cp.bdaddr_type = params->addr_type;
0490     bacpy(&cp.bdaddr, &params->addr);
0491 
0492     bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
0493            cp.bdaddr_type);
0494     hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
0495 
0496     if (use_ll_privacy(hdev)) {
0497         struct smp_irk *irk;
0498 
0499         irk = hci_find_irk_by_addr(hdev, &params->addr,
0500                        params->addr_type);
0501         if (irk) {
0502             struct hci_cp_le_add_to_resolv_list cp;
0503 
0504             cp.bdaddr_type = params->addr_type;
0505             bacpy(&cp.bdaddr, &params->addr);
0506             memcpy(cp.peer_irk, irk->val, 16);
0507 
0508             if (hci_dev_test_flag(hdev, HCI_PRIVACY))
0509                 memcpy(cp.local_irk, hdev->irk, 16);
0510             else
0511                 memset(cp.local_irk, 0, 16);
0512 
0513             hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
0514                     sizeof(cp), &cp);
0515         }
0516     }
0517 
0518     return 0;
0519 }
0520 
0521 static u8 update_accept_list(struct hci_request *req)
0522 {
0523     struct hci_dev *hdev = req->hdev;
0524     struct hci_conn_params *params;
0525     struct bdaddr_list *b;
0526     u8 num_entries = 0;
0527     bool pend_conn, pend_report;
0528     /* We allow usage of accept list even with RPAs in suspend. In the worst
0529      * case, we won't be able to wake from devices that use the privacy1.2
0530      * features. Additionally, once we support privacy1.2 and IRK
0531      * offloading, we can update this to also check for those conditions.
0532      */
0533     bool allow_rpa = hdev->suspended;
0534 
0535     if (use_ll_privacy(hdev))
0536         allow_rpa = true;
0537 
0538     /* Go through the current accept list programmed into the
0539      * controller one by one and check if that address is still
0540      * in the list of pending connections or list of devices to
0541      * report. If not present in either list, then queue the
0542      * command to remove it from the controller.
0543      */
0544     list_for_each_entry(b, &hdev->le_accept_list, list) {
0545         pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
0546                               &b->bdaddr,
0547                               b->bdaddr_type);
0548         pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
0549                             &b->bdaddr,
0550                             b->bdaddr_type);
0551 
0552         /* If the device is not likely to connect or report,
0553          * remove it from the accept list.
0554          */
0555         if (!pend_conn && !pend_report) {
0556             del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
0557             continue;
0558         }
0559 
0560         /* Accept list can not be used with RPAs */
0561         if (!allow_rpa &&
0562             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
0563             hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
0564             return 0x00;
0565         }
0566 
0567         num_entries++;
0568     }
0569 
0570     /* Since all no longer valid accept list entries have been
0571      * removed, walk through the list of pending connections
0572      * and ensure that any new device gets programmed into
0573      * the controller.
0574      *
0575      * If the list of the devices is larger than the list of
0576      * available accept list entries in the controller, then
0577      * just abort and return filer policy value to not use the
0578      * accept list.
0579      */
0580     list_for_each_entry(params, &hdev->pend_le_conns, action) {
0581         if (add_to_accept_list(req, params, &num_entries, allow_rpa))
0582             return 0x00;
0583     }
0584 
0585     /* After adding all new pending connections, walk through
0586      * the list of pending reports and also add these to the
0587      * accept list if there is still space. Abort if space runs out.
0588      */
0589     list_for_each_entry(params, &hdev->pend_le_reports, action) {
0590         if (add_to_accept_list(req, params, &num_entries, allow_rpa))
0591             return 0x00;
0592     }
0593 
0594     /* Use the allowlist unless the following conditions are all true:
0595      * - We are not currently suspending
0596      * - There are 1 or more ADV monitors registered and it's not offloaded
0597      * - Interleaved scanning is not currently using the allowlist
0598      */
0599     if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
0600         hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
0601         hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
0602         return 0x00;
0603 
0604     /* Select filter policy to use accept list */
0605     return 0x01;
0606 }
0607 
0608 static bool scan_use_rpa(struct hci_dev *hdev)
0609 {
0610     return hci_dev_test_flag(hdev, HCI_PRIVACY);
0611 }
0612 
0613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
0614                    u16 window, u8 own_addr_type, u8 filter_policy,
0615                    bool filter_dup, bool addr_resolv)
0616 {
0617     struct hci_dev *hdev = req->hdev;
0618 
0619     if (hdev->scanning_paused) {
0620         bt_dev_dbg(hdev, "Scanning is paused for suspend");
0621         return;
0622     }
0623 
0624     if (use_ll_privacy(hdev) && addr_resolv) {
0625         u8 enable = 0x01;
0626 
0627         hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
0628     }
0629 
0630     /* Use ext scanning if set ext scan param and ext scan enable is
0631      * supported
0632      */
0633     if (use_ext_scan(hdev)) {
0634         struct hci_cp_le_set_ext_scan_params *ext_param_cp;
0635         struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
0636         struct hci_cp_le_scan_phy_params *phy_params;
0637         u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
0638         u32 plen;
0639 
0640         ext_param_cp = (void *)data;
0641         phy_params = (void *)ext_param_cp->data;
0642 
0643         memset(ext_param_cp, 0, sizeof(*ext_param_cp));
0644         ext_param_cp->own_addr_type = own_addr_type;
0645         ext_param_cp->filter_policy = filter_policy;
0646 
0647         plen = sizeof(*ext_param_cp);
0648 
0649         if (scan_1m(hdev) || scan_2m(hdev)) {
0650             ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
0651 
0652             memset(phy_params, 0, sizeof(*phy_params));
0653             phy_params->type = type;
0654             phy_params->interval = cpu_to_le16(interval);
0655             phy_params->window = cpu_to_le16(window);
0656 
0657             plen += sizeof(*phy_params);
0658             phy_params++;
0659         }
0660 
0661         if (scan_coded(hdev)) {
0662             ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
0663 
0664             memset(phy_params, 0, sizeof(*phy_params));
0665             phy_params->type = type;
0666             phy_params->interval = cpu_to_le16(interval);
0667             phy_params->window = cpu_to_le16(window);
0668 
0669             plen += sizeof(*phy_params);
0670             phy_params++;
0671         }
0672 
0673         hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
0674                 plen, ext_param_cp);
0675 
0676         memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
0677         ext_enable_cp.enable = LE_SCAN_ENABLE;
0678         ext_enable_cp.filter_dup = filter_dup;
0679 
0680         hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
0681                 sizeof(ext_enable_cp), &ext_enable_cp);
0682     } else {
0683         struct hci_cp_le_set_scan_param param_cp;
0684         struct hci_cp_le_set_scan_enable enable_cp;
0685 
0686         memset(&param_cp, 0, sizeof(param_cp));
0687         param_cp.type = type;
0688         param_cp.interval = cpu_to_le16(interval);
0689         param_cp.window = cpu_to_le16(window);
0690         param_cp.own_address_type = own_addr_type;
0691         param_cp.filter_policy = filter_policy;
0692         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
0693                 &param_cp);
0694 
0695         memset(&enable_cp, 0, sizeof(enable_cp));
0696         enable_cp.enable = LE_SCAN_ENABLE;
0697         enable_cp.filter_dup = filter_dup;
0698         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
0699                 &enable_cp);
0700     }
0701 }
0702 
0703 /* Returns true if an le connection is in the scanning state */
0704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
0705 {
0706     struct hci_conn_hash *h = &hdev->conn_hash;
0707     struct hci_conn  *c;
0708 
0709     rcu_read_lock();
0710 
0711     list_for_each_entry_rcu(c, &h->list, list) {
0712         if (c->type == LE_LINK && c->state == BT_CONNECT &&
0713             test_bit(HCI_CONN_SCANNING, &c->flags)) {
0714             rcu_read_unlock();
0715             return true;
0716         }
0717     }
0718 
0719     rcu_read_unlock();
0720 
0721     return false;
0722 }
0723 
0724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
0725  * controller based address resolution to be able to reconfigure
0726  * resolving list.
0727  */
0728 void hci_req_add_le_passive_scan(struct hci_request *req)
0729 {
0730     struct hci_dev *hdev = req->hdev;
0731     u8 own_addr_type;
0732     u8 filter_policy;
0733     u16 window, interval;
0734     /* Default is to enable duplicates filter */
0735     u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
0736     /* Background scanning should run with address resolution */
0737     bool addr_resolv = true;
0738 
0739     if (hdev->scanning_paused) {
0740         bt_dev_dbg(hdev, "Scanning is paused for suspend");
0741         return;
0742     }
0743 
0744     /* Set require_privacy to false since no SCAN_REQ are send
0745      * during passive scanning. Not using an non-resolvable address
0746      * here is important so that peer devices using direct
0747      * advertising with our address will be correctly reported
0748      * by the controller.
0749      */
0750     if (hci_update_random_address(req, false, scan_use_rpa(hdev),
0751                       &own_addr_type))
0752         return;
0753 
0754     if (hdev->enable_advmon_interleave_scan &&
0755         __hci_update_interleaved_scan(hdev))
0756         return;
0757 
0758     bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
0759     /* Adding or removing entries from the accept list must
0760      * happen before enabling scanning. The controller does
0761      * not allow accept list modification while scanning.
0762      */
0763     filter_policy = update_accept_list(req);
0764 
0765     /* When the controller is using random resolvable addresses and
0766      * with that having LE privacy enabled, then controllers with
0767      * Extended Scanner Filter Policies support can now enable support
0768      * for handling directed advertising.
0769      *
0770      * So instead of using filter polices 0x00 (no accept list)
0771      * and 0x01 (accept list enabled) use the new filter policies
0772      * 0x02 (no accept list) and 0x03 (accept list enabled).
0773      */
0774     if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0775         (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
0776         filter_policy |= 0x02;
0777 
0778     if (hdev->suspended) {
0779         window = hdev->le_scan_window_suspend;
0780         interval = hdev->le_scan_int_suspend;
0781     } else if (hci_is_le_conn_scanning(hdev)) {
0782         window = hdev->le_scan_window_connect;
0783         interval = hdev->le_scan_int_connect;
0784     } else if (hci_is_adv_monitoring(hdev)) {
0785         window = hdev->le_scan_window_adv_monitor;
0786         interval = hdev->le_scan_int_adv_monitor;
0787 
0788         /* Disable duplicates filter when scanning for advertisement
0789          * monitor for the following reasons.
0790          *
0791          * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
0792          * controllers ignore RSSI_Sampling_Period when the duplicates
0793          * filter is enabled.
0794          *
0795          * For SW pattern filtering, when we're not doing interleaved
0796          * scanning, it is necessary to disable duplicates filter,
0797          * otherwise hosts can only receive one advertisement and it's
0798          * impossible to know if a peer is still in range.
0799          */
0800         filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
0801     } else {
0802         window = hdev->le_scan_window;
0803         interval = hdev->le_scan_interval;
0804     }
0805 
0806     bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
0807            filter_policy);
0808     hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
0809                own_addr_type, filter_policy, filter_dup,
0810                addr_resolv);
0811 }
0812 
0813 static void cancel_adv_timeout(struct hci_dev *hdev)
0814 {
0815     if (hdev->adv_instance_timeout) {
0816         hdev->adv_instance_timeout = 0;
0817         cancel_delayed_work(&hdev->adv_instance_expire);
0818     }
0819 }
0820 
0821 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
0822 {
0823     return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
0824 }
0825 
0826 void __hci_req_disable_advertising(struct hci_request *req)
0827 {
0828     if (ext_adv_capable(req->hdev)) {
0829         __hci_req_disable_ext_adv_instance(req, 0x00);
0830     } else {
0831         u8 enable = 0x00;
0832 
0833         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
0834     }
0835 }
0836 
0837 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
0838 {
0839     /* If privacy is not enabled don't use RPA */
0840     if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
0841         return false;
0842 
0843     /* If basic privacy mode is enabled use RPA */
0844     if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
0845         return true;
0846 
0847     /* If limited privacy mode is enabled don't use RPA if we're
0848      * both discoverable and bondable.
0849      */
0850     if ((flags & MGMT_ADV_FLAG_DISCOV) &&
0851         hci_dev_test_flag(hdev, HCI_BONDABLE))
0852         return false;
0853 
0854     /* We're neither bondable nor discoverable in the limited
0855      * privacy mode, therefore use RPA.
0856      */
0857     return true;
0858 }
0859 
0860 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
0861 {
0862     /* If there is no connection we are OK to advertise. */
0863     if (hci_conn_num(hdev, LE_LINK) == 0)
0864         return true;
0865 
0866     /* Check le_states if there is any connection in peripheral role. */
0867     if (hdev->conn_hash.le_num_peripheral > 0) {
0868         /* Peripheral connection state and non connectable mode bit 20.
0869          */
0870         if (!connectable && !(hdev->le_states[2] & 0x10))
0871             return false;
0872 
0873         /* Peripheral connection state and connectable mode bit 38
0874          * and scannable bit 21.
0875          */
0876         if (connectable && (!(hdev->le_states[4] & 0x40) ||
0877                     !(hdev->le_states[2] & 0x20)))
0878             return false;
0879     }
0880 
0881     /* Check le_states if there is any connection in central role. */
0882     if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
0883         /* Central connection state and non connectable mode bit 18. */
0884         if (!connectable && !(hdev->le_states[2] & 0x02))
0885             return false;
0886 
0887         /* Central connection state and connectable mode bit 35 and
0888          * scannable 19.
0889          */
0890         if (connectable && (!(hdev->le_states[4] & 0x08) ||
0891                     !(hdev->le_states[2] & 0x08)))
0892             return false;
0893     }
0894 
0895     return true;
0896 }
0897 
0898 void __hci_req_enable_advertising(struct hci_request *req)
0899 {
0900     struct hci_dev *hdev = req->hdev;
0901     struct adv_info *adv;
0902     struct hci_cp_le_set_adv_param cp;
0903     u8 own_addr_type, enable = 0x01;
0904     bool connectable;
0905     u16 adv_min_interval, adv_max_interval;
0906     u32 flags;
0907 
0908     flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
0909     adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
0910 
0911     /* If the "connectable" instance flag was not set, then choose between
0912      * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
0913      */
0914     connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
0915               mgmt_get_connectable(hdev);
0916 
0917     if (!is_advertising_allowed(hdev, connectable))
0918         return;
0919 
0920     if (hci_dev_test_flag(hdev, HCI_LE_ADV))
0921         __hci_req_disable_advertising(req);
0922 
0923     /* Clear the HCI_LE_ADV bit temporarily so that the
0924      * hci_update_random_address knows that it's safe to go ahead
0925      * and write a new random address. The flag will be set back on
0926      * as soon as the SET_ADV_ENABLE HCI command completes.
0927      */
0928     hci_dev_clear_flag(hdev, HCI_LE_ADV);
0929 
0930     /* Set require_privacy to true only when non-connectable
0931      * advertising is used. In that case it is fine to use a
0932      * non-resolvable private address.
0933      */
0934     if (hci_update_random_address(req, !connectable,
0935                       adv_use_rpa(hdev, flags),
0936                       &own_addr_type) < 0)
0937         return;
0938 
0939     memset(&cp, 0, sizeof(cp));
0940 
0941     if (adv) {
0942         adv_min_interval = adv->min_interval;
0943         adv_max_interval = adv->max_interval;
0944     } else {
0945         adv_min_interval = hdev->le_adv_min_interval;
0946         adv_max_interval = hdev->le_adv_max_interval;
0947     }
0948 
0949     if (connectable) {
0950         cp.type = LE_ADV_IND;
0951     } else {
0952         if (adv_cur_instance_is_scannable(hdev))
0953             cp.type = LE_ADV_SCAN_IND;
0954         else
0955             cp.type = LE_ADV_NONCONN_IND;
0956 
0957         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
0958             hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
0959             adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
0960             adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
0961         }
0962     }
0963 
0964     cp.min_interval = cpu_to_le16(adv_min_interval);
0965     cp.max_interval = cpu_to_le16(adv_max_interval);
0966     cp.own_address_type = own_addr_type;
0967     cp.channel_map = hdev->le_adv_channel_map;
0968 
0969     hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
0970 
0971     hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
0972 }
0973 
0974 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
0975 {
0976     struct hci_dev *hdev = req->hdev;
0977     u8 len;
0978 
0979     if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
0980         return;
0981 
0982     if (ext_adv_capable(hdev)) {
0983         struct {
0984             struct hci_cp_le_set_ext_scan_rsp_data cp;
0985             u8 data[HCI_MAX_EXT_AD_LENGTH];
0986         } pdu;
0987 
0988         memset(&pdu, 0, sizeof(pdu));
0989 
0990         len = eir_create_scan_rsp(hdev, instance, pdu.data);
0991 
0992         if (hdev->scan_rsp_data_len == len &&
0993             !memcmp(pdu.data, hdev->scan_rsp_data, len))
0994             return;
0995 
0996         memcpy(hdev->scan_rsp_data, pdu.data, len);
0997         hdev->scan_rsp_data_len = len;
0998 
0999         pdu.cp.handle = instance;
1000         pdu.cp.length = len;
1001         pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1002         pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1003 
1004         hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1005                 sizeof(pdu.cp) + len, &pdu.cp);
1006     } else {
1007         struct hci_cp_le_set_scan_rsp_data cp;
1008 
1009         memset(&cp, 0, sizeof(cp));
1010 
1011         len = eir_create_scan_rsp(hdev, instance, cp.data);
1012 
1013         if (hdev->scan_rsp_data_len == len &&
1014             !memcmp(cp.data, hdev->scan_rsp_data, len))
1015             return;
1016 
1017         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1018         hdev->scan_rsp_data_len = len;
1019 
1020         cp.length = len;
1021 
1022         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1023     }
1024 }
1025 
1026 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1027 {
1028     struct hci_dev *hdev = req->hdev;
1029     u8 len;
1030 
1031     if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1032         return;
1033 
1034     if (ext_adv_capable(hdev)) {
1035         struct {
1036             struct hci_cp_le_set_ext_adv_data cp;
1037             u8 data[HCI_MAX_EXT_AD_LENGTH];
1038         } pdu;
1039 
1040         memset(&pdu, 0, sizeof(pdu));
1041 
1042         len = eir_create_adv_data(hdev, instance, pdu.data);
1043 
1044         /* There's nothing to do if the data hasn't changed */
1045         if (hdev->adv_data_len == len &&
1046             memcmp(pdu.data, hdev->adv_data, len) == 0)
1047             return;
1048 
1049         memcpy(hdev->adv_data, pdu.data, len);
1050         hdev->adv_data_len = len;
1051 
1052         pdu.cp.length = len;
1053         pdu.cp.handle = instance;
1054         pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1055         pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1056 
1057         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1058                 sizeof(pdu.cp) + len, &pdu.cp);
1059     } else {
1060         struct hci_cp_le_set_adv_data cp;
1061 
1062         memset(&cp, 0, sizeof(cp));
1063 
1064         len = eir_create_adv_data(hdev, instance, cp.data);
1065 
1066         /* There's nothing to do if the data hasn't changed */
1067         if (hdev->adv_data_len == len &&
1068             memcmp(cp.data, hdev->adv_data, len) == 0)
1069             return;
1070 
1071         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1072         hdev->adv_data_len = len;
1073 
1074         cp.length = len;
1075 
1076         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1077     }
1078 }
1079 
1080 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1081 {
1082     struct hci_request req;
1083 
1084     hci_req_init(&req, hdev);
1085     __hci_req_update_adv_data(&req, instance);
1086 
1087     return hci_req_run(&req, NULL);
1088 }
1089 
1090 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1091                         u16 opcode)
1092 {
1093     BT_DBG("%s status %u", hdev->name, status);
1094 }
1095 
1096 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1097 {
1098     struct hci_request req;
1099     __u8 enable = 0x00;
1100 
1101     if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1102         return;
1103 
1104     hci_req_init(&req, hdev);
1105 
1106     hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1107 
1108     hci_req_run(&req, enable_addr_resolution_complete);
1109 }
1110 
1111 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1112 {
1113     bt_dev_dbg(hdev, "status %u", status);
1114 }
1115 
1116 void hci_req_reenable_advertising(struct hci_dev *hdev)
1117 {
1118     struct hci_request req;
1119 
1120     if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1121         list_empty(&hdev->adv_instances))
1122         return;
1123 
1124     hci_req_init(&req, hdev);
1125 
1126     if (hdev->cur_adv_instance) {
1127         __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1128                         true);
1129     } else {
1130         if (ext_adv_capable(hdev)) {
1131             __hci_req_start_ext_adv(&req, 0x00);
1132         } else {
1133             __hci_req_update_adv_data(&req, 0x00);
1134             __hci_req_update_scan_rsp_data(&req, 0x00);
1135             __hci_req_enable_advertising(&req);
1136         }
1137     }
1138 
1139     hci_req_run(&req, adv_enable_complete);
1140 }
1141 
1142 static void adv_timeout_expire(struct work_struct *work)
1143 {
1144     struct hci_dev *hdev = container_of(work, struct hci_dev,
1145                         adv_instance_expire.work);
1146 
1147     struct hci_request req;
1148     u8 instance;
1149 
1150     bt_dev_dbg(hdev, "");
1151 
1152     hci_dev_lock(hdev);
1153 
1154     hdev->adv_instance_timeout = 0;
1155 
1156     instance = hdev->cur_adv_instance;
1157     if (instance == 0x00)
1158         goto unlock;
1159 
1160     hci_req_init(&req, hdev);
1161 
1162     hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1163 
1164     if (list_empty(&hdev->adv_instances))
1165         __hci_req_disable_advertising(&req);
1166 
1167     hci_req_run(&req, NULL);
1168 
1169 unlock:
1170     hci_dev_unlock(hdev);
1171 }
1172 
1173 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1174                        unsigned long opt)
1175 {
1176     struct hci_dev *hdev = req->hdev;
1177     int ret = 0;
1178 
1179     hci_dev_lock(hdev);
1180 
1181     if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1182         hci_req_add_le_scan_disable(req, false);
1183     hci_req_add_le_passive_scan(req);
1184 
1185     switch (hdev->interleave_scan_state) {
1186     case INTERLEAVE_SCAN_ALLOWLIST:
1187         bt_dev_dbg(hdev, "next state: allowlist");
1188         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1189         break;
1190     case INTERLEAVE_SCAN_NO_FILTER:
1191         bt_dev_dbg(hdev, "next state: no filter");
1192         hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1193         break;
1194     case INTERLEAVE_SCAN_NONE:
1195         BT_ERR("unexpected error");
1196         ret = -1;
1197     }
1198 
1199     hci_dev_unlock(hdev);
1200 
1201     return ret;
1202 }
1203 
1204 static void interleave_scan_work(struct work_struct *work)
1205 {
1206     struct hci_dev *hdev = container_of(work, struct hci_dev,
1207                         interleave_scan.work);
1208     u8 status;
1209     unsigned long timeout;
1210 
1211     if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1212         timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1213     } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1214         timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1215     } else {
1216         bt_dev_err(hdev, "unexpected error");
1217         return;
1218     }
1219 
1220     hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1221              HCI_CMD_TIMEOUT, &status);
1222 
1223     /* Don't continue interleaving if it was canceled */
1224     if (is_interleave_scanning(hdev))
1225         queue_delayed_work(hdev->req_workqueue,
1226                    &hdev->interleave_scan, timeout);
1227 }
1228 
1229 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1230                bool use_rpa, struct adv_info *adv_instance,
1231                u8 *own_addr_type, bdaddr_t *rand_addr)
1232 {
1233     int err;
1234 
1235     bacpy(rand_addr, BDADDR_ANY);
1236 
1237     /* If privacy is enabled use a resolvable private address. If
1238      * current RPA has expired then generate a new one.
1239      */
1240     if (use_rpa) {
1241         /* If Controller supports LL Privacy use own address type is
1242          * 0x03
1243          */
1244         if (use_ll_privacy(hdev))
1245             *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1246         else
1247             *own_addr_type = ADDR_LE_DEV_RANDOM;
1248 
1249         if (adv_instance) {
1250             if (adv_rpa_valid(adv_instance))
1251                 return 0;
1252         } else {
1253             if (rpa_valid(hdev))
1254                 return 0;
1255         }
1256 
1257         err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1258         if (err < 0) {
1259             bt_dev_err(hdev, "failed to generate new RPA");
1260             return err;
1261         }
1262 
1263         bacpy(rand_addr, &hdev->rpa);
1264 
1265         return 0;
1266     }
1267 
1268     /* In case of required privacy without resolvable private address,
1269      * use an non-resolvable private address. This is useful for
1270      * non-connectable advertising.
1271      */
1272     if (require_privacy) {
1273         bdaddr_t nrpa;
1274 
1275         while (true) {
1276             /* The non-resolvable private address is generated
1277              * from random six bytes with the two most significant
1278              * bits cleared.
1279              */
1280             get_random_bytes(&nrpa, 6);
1281             nrpa.b[5] &= 0x3f;
1282 
1283             /* The non-resolvable private address shall not be
1284              * equal to the public address.
1285              */
1286             if (bacmp(&hdev->bdaddr, &nrpa))
1287                 break;
1288         }
1289 
1290         *own_addr_type = ADDR_LE_DEV_RANDOM;
1291         bacpy(rand_addr, &nrpa);
1292 
1293         return 0;
1294     }
1295 
1296     /* No privacy so use a public address. */
1297     *own_addr_type = ADDR_LE_DEV_PUBLIC;
1298 
1299     return 0;
1300 }
1301 
1302 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1303 {
1304     hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1305 }
1306 
1307 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1308 {
1309     struct hci_dev *hdev = req->hdev;
1310 
1311     /* If we're advertising or initiating an LE connection we can't
1312      * go ahead and change the random address at this time. This is
1313      * because the eventual initiator address used for the
1314      * subsequently created connection will be undefined (some
1315      * controllers use the new address and others the one we had
1316      * when the operation started).
1317      *
1318      * In this kind of scenario skip the update and let the random
1319      * address be updated at the next cycle.
1320      */
1321     if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1322         hci_lookup_le_connect(hdev)) {
1323         bt_dev_dbg(hdev, "Deferring random address update");
1324         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1325         return;
1326     }
1327 
1328     hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1329 }
1330 
1331 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1332 {
1333     struct hci_cp_le_set_ext_adv_params cp;
1334     struct hci_dev *hdev = req->hdev;
1335     bool connectable;
1336     u32 flags;
1337     bdaddr_t random_addr;
1338     u8 own_addr_type;
1339     int err;
1340     struct adv_info *adv;
1341     bool secondary_adv, require_privacy;
1342 
1343     if (instance > 0) {
1344         adv = hci_find_adv_instance(hdev, instance);
1345         if (!adv)
1346             return -EINVAL;
1347     } else {
1348         adv = NULL;
1349     }
1350 
1351     flags = hci_adv_instance_flags(hdev, instance);
1352 
1353     /* If the "connectable" instance flag was not set, then choose between
1354      * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1355      */
1356     connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1357               mgmt_get_connectable(hdev);
1358 
1359     if (!is_advertising_allowed(hdev, connectable))
1360         return -EPERM;
1361 
1362     /* Set require_privacy to true only when non-connectable
1363      * advertising is used. In that case it is fine to use a
1364      * non-resolvable private address.
1365      */
1366     require_privacy = !connectable;
1367 
1368     /* Don't require privacy for periodic adv? */
1369     if (adv && adv->periodic)
1370         require_privacy = false;
1371 
1372     err = hci_get_random_address(hdev, require_privacy,
1373                      adv_use_rpa(hdev, flags), adv,
1374                      &own_addr_type, &random_addr);
1375     if (err < 0)
1376         return err;
1377 
1378     memset(&cp, 0, sizeof(cp));
1379 
1380     if (adv) {
1381         hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1382         hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1383         cp.tx_power = adv->tx_power;
1384     } else {
1385         hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1386         hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1387         cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1388     }
1389 
1390     secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1391 
1392     if (connectable) {
1393         if (secondary_adv)
1394             cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1395         else
1396             cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1397     } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1398            (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1399         if (secondary_adv)
1400             cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1401         else
1402             cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1403     } else {
1404         /* Secondary and periodic cannot use legacy PDUs */
1405         if (secondary_adv || (adv && adv->periodic))
1406             cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1407         else
1408             cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1409     }
1410 
1411     cp.own_addr_type = own_addr_type;
1412     cp.channel_map = hdev->le_adv_channel_map;
1413     cp.handle = instance;
1414 
1415     if (flags & MGMT_ADV_FLAG_SEC_2M) {
1416         cp.primary_phy = HCI_ADV_PHY_1M;
1417         cp.secondary_phy = HCI_ADV_PHY_2M;
1418     } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1419         cp.primary_phy = HCI_ADV_PHY_CODED;
1420         cp.secondary_phy = HCI_ADV_PHY_CODED;
1421     } else {
1422         /* In all other cases use 1M */
1423         cp.primary_phy = HCI_ADV_PHY_1M;
1424         cp.secondary_phy = HCI_ADV_PHY_1M;
1425     }
1426 
1427     hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1428 
1429     if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1430          own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1431         bacmp(&random_addr, BDADDR_ANY)) {
1432         struct hci_cp_le_set_adv_set_rand_addr cp;
1433 
1434         /* Check if random address need to be updated */
1435         if (adv) {
1436             if (!bacmp(&random_addr, &adv->random_addr))
1437                 return 0;
1438         } else {
1439             if (!bacmp(&random_addr, &hdev->random_addr))
1440                 return 0;
1441             /* Instance 0x00 doesn't have an adv_info, instead it
1442              * uses hdev->random_addr to track its address so
1443              * whenever it needs to be updated this also set the
1444              * random address since hdev->random_addr is shared with
1445              * scan state machine.
1446              */
1447             set_random_addr(req, &random_addr);
1448         }
1449 
1450         memset(&cp, 0, sizeof(cp));
1451 
1452         cp.handle = instance;
1453         bacpy(&cp.bdaddr, &random_addr);
1454 
1455         hci_req_add(req,
1456                 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1457                 sizeof(cp), &cp);
1458     }
1459 
1460     return 0;
1461 }
1462 
1463 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1464 {
1465     struct hci_dev *hdev = req->hdev;
1466     struct hci_cp_le_set_ext_adv_enable *cp;
1467     struct hci_cp_ext_adv_set *adv_set;
1468     u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1469     struct adv_info *adv_instance;
1470 
1471     if (instance > 0) {
1472         adv_instance = hci_find_adv_instance(hdev, instance);
1473         if (!adv_instance)
1474             return -EINVAL;
1475     } else {
1476         adv_instance = NULL;
1477     }
1478 
1479     cp = (void *) data;
1480     adv_set = (void *) cp->data;
1481 
1482     memset(cp, 0, sizeof(*cp));
1483 
1484     cp->enable = 0x01;
1485     cp->num_of_sets = 0x01;
1486 
1487     memset(adv_set, 0, sizeof(*adv_set));
1488 
1489     adv_set->handle = instance;
1490 
1491     /* Set duration per instance since controller is responsible for
1492      * scheduling it.
1493      */
1494     if (adv_instance && adv_instance->duration) {
1495         u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1496 
1497         /* Time = N * 10 ms */
1498         adv_set->duration = cpu_to_le16(duration / 10);
1499     }
1500 
1501     hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1502             sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1503             data);
1504 
1505     return 0;
1506 }
1507 
1508 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1509 {
1510     struct hci_dev *hdev = req->hdev;
1511     struct hci_cp_le_set_ext_adv_enable *cp;
1512     struct hci_cp_ext_adv_set *adv_set;
1513     u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1514     u8 req_size;
1515 
1516     /* If request specifies an instance that doesn't exist, fail */
1517     if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1518         return -EINVAL;
1519 
1520     memset(data, 0, sizeof(data));
1521 
1522     cp = (void *)data;
1523     adv_set = (void *)cp->data;
1524 
1525     /* Instance 0x00 indicates all advertising instances will be disabled */
1526     cp->num_of_sets = !!instance;
1527     cp->enable = 0x00;
1528 
1529     adv_set->handle = instance;
1530 
1531     req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1532     hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1533 
1534     return 0;
1535 }
1536 
1537 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1538 {
1539     struct hci_dev *hdev = req->hdev;
1540 
1541     /* If request specifies an instance that doesn't exist, fail */
1542     if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1543         return -EINVAL;
1544 
1545     hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1546 
1547     return 0;
1548 }
1549 
1550 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1551 {
1552     struct hci_dev *hdev = req->hdev;
1553     struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1554     int err;
1555 
1556     /* If instance isn't pending, the chip knows about it, and it's safe to
1557      * disable
1558      */
1559     if (adv_instance && !adv_instance->pending)
1560         __hci_req_disable_ext_adv_instance(req, instance);
1561 
1562     err = __hci_req_setup_ext_adv_instance(req, instance);
1563     if (err < 0)
1564         return err;
1565 
1566     __hci_req_update_scan_rsp_data(req, instance);
1567     __hci_req_enable_ext_advertising(req, instance);
1568 
1569     return 0;
1570 }
1571 
1572 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1573                     bool force)
1574 {
1575     struct hci_dev *hdev = req->hdev;
1576     struct adv_info *adv_instance = NULL;
1577     u16 timeout;
1578 
1579     if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1580         list_empty(&hdev->adv_instances))
1581         return -EPERM;
1582 
1583     if (hdev->adv_instance_timeout)
1584         return -EBUSY;
1585 
1586     adv_instance = hci_find_adv_instance(hdev, instance);
1587     if (!adv_instance)
1588         return -ENOENT;
1589 
1590     /* A zero timeout means unlimited advertising. As long as there is
1591      * only one instance, duration should be ignored. We still set a timeout
1592      * in case further instances are being added later on.
1593      *
1594      * If the remaining lifetime of the instance is more than the duration
1595      * then the timeout corresponds to the duration, otherwise it will be
1596      * reduced to the remaining instance lifetime.
1597      */
1598     if (adv_instance->timeout == 0 ||
1599         adv_instance->duration <= adv_instance->remaining_time)
1600         timeout = adv_instance->duration;
1601     else
1602         timeout = adv_instance->remaining_time;
1603 
1604     /* The remaining time is being reduced unless the instance is being
1605      * advertised without time limit.
1606      */
1607     if (adv_instance->timeout)
1608         adv_instance->remaining_time =
1609                 adv_instance->remaining_time - timeout;
1610 
1611     /* Only use work for scheduling instances with legacy advertising */
1612     if (!ext_adv_capable(hdev)) {
1613         hdev->adv_instance_timeout = timeout;
1614         queue_delayed_work(hdev->req_workqueue,
1615                &hdev->adv_instance_expire,
1616                msecs_to_jiffies(timeout * 1000));
1617     }
1618 
1619     /* If we're just re-scheduling the same instance again then do not
1620      * execute any HCI commands. This happens when a single instance is
1621      * being advertised.
1622      */
1623     if (!force && hdev->cur_adv_instance == instance &&
1624         hci_dev_test_flag(hdev, HCI_LE_ADV))
1625         return 0;
1626 
1627     hdev->cur_adv_instance = instance;
1628     if (ext_adv_capable(hdev)) {
1629         __hci_req_start_ext_adv(req, instance);
1630     } else {
1631         __hci_req_update_adv_data(req, instance);
1632         __hci_req_update_scan_rsp_data(req, instance);
1633         __hci_req_enable_advertising(req);
1634     }
1635 
1636     return 0;
1637 }
1638 
1639 /* For a single instance:
1640  * - force == true: The instance will be removed even when its remaining
1641  *   lifetime is not zero.
1642  * - force == false: the instance will be deactivated but kept stored unless
1643  *   the remaining lifetime is zero.
1644  *
1645  * For instance == 0x00:
1646  * - force == true: All instances will be removed regardless of their timeout
1647  *   setting.
1648  * - force == false: Only instances that have a timeout will be removed.
1649  */
1650 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1651                 struct hci_request *req, u8 instance,
1652                 bool force)
1653 {
1654     struct adv_info *adv_instance, *n, *next_instance = NULL;
1655     int err;
1656     u8 rem_inst;
1657 
1658     /* Cancel any timeout concerning the removed instance(s). */
1659     if (!instance || hdev->cur_adv_instance == instance)
1660         cancel_adv_timeout(hdev);
1661 
1662     /* Get the next instance to advertise BEFORE we remove
1663      * the current one. This can be the same instance again
1664      * if there is only one instance.
1665      */
1666     if (instance && hdev->cur_adv_instance == instance)
1667         next_instance = hci_get_next_instance(hdev, instance);
1668 
1669     if (instance == 0x00) {
1670         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1671                      list) {
1672             if (!(force || adv_instance->timeout))
1673                 continue;
1674 
1675             rem_inst = adv_instance->instance;
1676             err = hci_remove_adv_instance(hdev, rem_inst);
1677             if (!err)
1678                 mgmt_advertising_removed(sk, hdev, rem_inst);
1679         }
1680     } else {
1681         adv_instance = hci_find_adv_instance(hdev, instance);
1682 
1683         if (force || (adv_instance && adv_instance->timeout &&
1684                   !adv_instance->remaining_time)) {
1685             /* Don't advertise a removed instance. */
1686             if (next_instance &&
1687                 next_instance->instance == instance)
1688                 next_instance = NULL;
1689 
1690             err = hci_remove_adv_instance(hdev, instance);
1691             if (!err)
1692                 mgmt_advertising_removed(sk, hdev, instance);
1693         }
1694     }
1695 
1696     if (!req || !hdev_is_powered(hdev) ||
1697         hci_dev_test_flag(hdev, HCI_ADVERTISING))
1698         return;
1699 
1700     if (next_instance && !ext_adv_capable(hdev))
1701         __hci_req_schedule_adv_instance(req, next_instance->instance,
1702                         false);
1703 }
1704 
1705 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1706                   bool use_rpa, u8 *own_addr_type)
1707 {
1708     struct hci_dev *hdev = req->hdev;
1709     int err;
1710 
1711     /* If privacy is enabled use a resolvable private address. If
1712      * current RPA has expired or there is something else than
1713      * the current RPA in use, then generate a new one.
1714      */
1715     if (use_rpa) {
1716         /* If Controller supports LL Privacy use own address type is
1717          * 0x03
1718          */
1719         if (use_ll_privacy(hdev))
1720             *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1721         else
1722             *own_addr_type = ADDR_LE_DEV_RANDOM;
1723 
1724         if (rpa_valid(hdev))
1725             return 0;
1726 
1727         err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1728         if (err < 0) {
1729             bt_dev_err(hdev, "failed to generate new RPA");
1730             return err;
1731         }
1732 
1733         set_random_addr(req, &hdev->rpa);
1734 
1735         return 0;
1736     }
1737 
1738     /* In case of required privacy without resolvable private address,
1739      * use an non-resolvable private address. This is useful for active
1740      * scanning and non-connectable advertising.
1741      */
1742     if (require_privacy) {
1743         bdaddr_t nrpa;
1744 
1745         while (true) {
1746             /* The non-resolvable private address is generated
1747              * from random six bytes with the two most significant
1748              * bits cleared.
1749              */
1750             get_random_bytes(&nrpa, 6);
1751             nrpa.b[5] &= 0x3f;
1752 
1753             /* The non-resolvable private address shall not be
1754              * equal to the public address.
1755              */
1756             if (bacmp(&hdev->bdaddr, &nrpa))
1757                 break;
1758         }
1759 
1760         *own_addr_type = ADDR_LE_DEV_RANDOM;
1761         set_random_addr(req, &nrpa);
1762         return 0;
1763     }
1764 
1765     /* If forcing static address is in use or there is no public
1766      * address use the static address as random address (but skip
1767      * the HCI command if the current random address is already the
1768      * static one.
1769      *
1770      * In case BR/EDR has been disabled on a dual-mode controller
1771      * and a static address has been configured, then use that
1772      * address instead of the public BR/EDR address.
1773      */
1774     if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1775         !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1776         (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1777          bacmp(&hdev->static_addr, BDADDR_ANY))) {
1778         *own_addr_type = ADDR_LE_DEV_RANDOM;
1779         if (bacmp(&hdev->static_addr, &hdev->random_addr))
1780             hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1781                     &hdev->static_addr);
1782         return 0;
1783     }
1784 
1785     /* Neither privacy nor static address is being used so use a
1786      * public address.
1787      */
1788     *own_addr_type = ADDR_LE_DEV_PUBLIC;
1789 
1790     return 0;
1791 }
1792 
1793 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1794 {
1795     struct bdaddr_list *b;
1796 
1797     list_for_each_entry(b, &hdev->accept_list, list) {
1798         struct hci_conn *conn;
1799 
1800         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1801         if (!conn)
1802             return true;
1803 
1804         if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1805             return true;
1806     }
1807 
1808     return false;
1809 }
1810 
1811 void __hci_req_update_scan(struct hci_request *req)
1812 {
1813     struct hci_dev *hdev = req->hdev;
1814     u8 scan;
1815 
1816     if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1817         return;
1818 
1819     if (!hdev_is_powered(hdev))
1820         return;
1821 
1822     if (mgmt_powering_down(hdev))
1823         return;
1824 
1825     if (hdev->scanning_paused)
1826         return;
1827 
1828     if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1829         disconnected_accept_list_entries(hdev))
1830         scan = SCAN_PAGE;
1831     else
1832         scan = SCAN_DISABLED;
1833 
1834     if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1835         scan |= SCAN_INQUIRY;
1836 
1837     if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1838         test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1839         return;
1840 
1841     hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1842 }
1843 
1844 static u8 get_service_classes(struct hci_dev *hdev)
1845 {
1846     struct bt_uuid *uuid;
1847     u8 val = 0;
1848 
1849     list_for_each_entry(uuid, &hdev->uuids, list)
1850         val |= uuid->svc_hint;
1851 
1852     return val;
1853 }
1854 
1855 void __hci_req_update_class(struct hci_request *req)
1856 {
1857     struct hci_dev *hdev = req->hdev;
1858     u8 cod[3];
1859 
1860     bt_dev_dbg(hdev, "");
1861 
1862     if (!hdev_is_powered(hdev))
1863         return;
1864 
1865     if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1866         return;
1867 
1868     if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1869         return;
1870 
1871     cod[0] = hdev->minor_class;
1872     cod[1] = hdev->major_class;
1873     cod[2] = get_service_classes(hdev);
1874 
1875     if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1876         cod[1] |= 0x20;
1877 
1878     if (memcmp(cod, hdev->dev_class, 3) == 0)
1879         return;
1880 
1881     hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1882 }
1883 
1884 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1885               u8 reason)
1886 {
1887     switch (conn->state) {
1888     case BT_CONNECTED:
1889     case BT_CONFIG:
1890         if (conn->type == AMP_LINK) {
1891             struct hci_cp_disconn_phy_link cp;
1892 
1893             cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1894             cp.reason = reason;
1895             hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1896                     &cp);
1897         } else {
1898             struct hci_cp_disconnect dc;
1899 
1900             dc.handle = cpu_to_le16(conn->handle);
1901             dc.reason = reason;
1902             hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1903         }
1904 
1905         conn->state = BT_DISCONN;
1906 
1907         break;
1908     case BT_CONNECT:
1909         if (conn->type == LE_LINK) {
1910             if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1911                 break;
1912             hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1913                     0, NULL);
1914         } else if (conn->type == ACL_LINK) {
1915             if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1916                 break;
1917             hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1918                     6, &conn->dst);
1919         }
1920         break;
1921     case BT_CONNECT2:
1922         if (conn->type == ACL_LINK) {
1923             struct hci_cp_reject_conn_req rej;
1924 
1925             bacpy(&rej.bdaddr, &conn->dst);
1926             rej.reason = reason;
1927 
1928             hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1929                     sizeof(rej), &rej);
1930         } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1931             struct hci_cp_reject_sync_conn_req rej;
1932 
1933             bacpy(&rej.bdaddr, &conn->dst);
1934 
1935             /* SCO rejection has its own limited set of
1936              * allowed error values (0x0D-0x0F) which isn't
1937              * compatible with most values passed to this
1938              * function. To be safe hard-code one of the
1939              * values that's suitable for SCO.
1940              */
1941             rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1942 
1943             hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1944                     sizeof(rej), &rej);
1945         }
1946         break;
1947     default:
1948         conn->state = BT_CLOSED;
1949         break;
1950     }
1951 }
1952 
1953 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1954 {
1955     if (status)
1956         bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
1957 }
1958 
1959 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1960 {
1961     struct hci_request req;
1962     int err;
1963 
1964     hci_req_init(&req, conn->hdev);
1965 
1966     __hci_abort_conn(&req, conn, reason);
1967 
1968     err = hci_req_run(&req, abort_conn_complete);
1969     if (err && err != -ENODATA) {
1970         bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1971         return err;
1972     }
1973 
1974     return 0;
1975 }
1976 
1977 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1978 {
1979     hci_req_add_le_scan_disable(req, false);
1980     return 0;
1981 }
1982 
1983 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1984 {
1985     u8 length = opt;
1986     const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1987     const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1988     struct hci_cp_inquiry cp;
1989 
1990     if (test_bit(HCI_INQUIRY, &req->hdev->flags))
1991         return 0;
1992 
1993     bt_dev_dbg(req->hdev, "");
1994 
1995     hci_dev_lock(req->hdev);
1996     hci_inquiry_cache_flush(req->hdev);
1997     hci_dev_unlock(req->hdev);
1998 
1999     memset(&cp, 0, sizeof(cp));
2000 
2001     if (req->hdev->discovery.limited)
2002         memcpy(&cp.lap, liac, sizeof(cp.lap));
2003     else
2004         memcpy(&cp.lap, giac, sizeof(cp.lap));
2005 
2006     cp.length = length;
2007 
2008     hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2009 
2010     return 0;
2011 }
2012 
2013 static void le_scan_disable_work(struct work_struct *work)
2014 {
2015     struct hci_dev *hdev = container_of(work, struct hci_dev,
2016                         le_scan_disable.work);
2017     u8 status;
2018 
2019     bt_dev_dbg(hdev, "");
2020 
2021     if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2022         return;
2023 
2024     cancel_delayed_work(&hdev->le_scan_restart);
2025 
2026     hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2027     if (status) {
2028         bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2029                status);
2030         return;
2031     }
2032 
2033     hdev->discovery.scan_start = 0;
2034 
2035     /* If we were running LE only scan, change discovery state. If
2036      * we were running both LE and BR/EDR inquiry simultaneously,
2037      * and BR/EDR inquiry is already finished, stop discovery,
2038      * otherwise BR/EDR inquiry will stop discovery when finished.
2039      * If we will resolve remote device name, do not change
2040      * discovery state.
2041      */
2042 
2043     if (hdev->discovery.type == DISCOV_TYPE_LE)
2044         goto discov_stopped;
2045 
2046     if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2047         return;
2048 
2049     if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2050         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2051             hdev->discovery.state != DISCOVERY_RESOLVING)
2052             goto discov_stopped;
2053 
2054         return;
2055     }
2056 
2057     hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2058              HCI_CMD_TIMEOUT, &status);
2059     if (status) {
2060         bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2061         goto discov_stopped;
2062     }
2063 
2064     return;
2065 
2066 discov_stopped:
2067     hci_dev_lock(hdev);
2068     hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2069     hci_dev_unlock(hdev);
2070 }
2071 
2072 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2073 {
2074     struct hci_dev *hdev = req->hdev;
2075 
2076     /* If controller is not scanning we are done. */
2077     if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2078         return 0;
2079 
2080     if (hdev->scanning_paused) {
2081         bt_dev_dbg(hdev, "Scanning is paused for suspend");
2082         return 0;
2083     }
2084 
2085     hci_req_add_le_scan_disable(req, false);
2086 
2087     if (use_ext_scan(hdev)) {
2088         struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2089 
2090         memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2091         ext_enable_cp.enable = LE_SCAN_ENABLE;
2092         ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2093 
2094         hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2095                 sizeof(ext_enable_cp), &ext_enable_cp);
2096     } else {
2097         struct hci_cp_le_set_scan_enable cp;
2098 
2099         memset(&cp, 0, sizeof(cp));
2100         cp.enable = LE_SCAN_ENABLE;
2101         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2102         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2103     }
2104 
2105     return 0;
2106 }
2107 
2108 static void le_scan_restart_work(struct work_struct *work)
2109 {
2110     struct hci_dev *hdev = container_of(work, struct hci_dev,
2111                         le_scan_restart.work);
2112     unsigned long timeout, duration, scan_start, now;
2113     u8 status;
2114 
2115     bt_dev_dbg(hdev, "");
2116 
2117     hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2118     if (status) {
2119         bt_dev_err(hdev, "failed to restart LE scan: status %d",
2120                status);
2121         return;
2122     }
2123 
2124     hci_dev_lock(hdev);
2125 
2126     if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2127         !hdev->discovery.scan_start)
2128         goto unlock;
2129 
2130     /* When the scan was started, hdev->le_scan_disable has been queued
2131      * after duration from scan_start. During scan restart this job
2132      * has been canceled, and we need to queue it again after proper
2133      * timeout, to make sure that scan does not run indefinitely.
2134      */
2135     duration = hdev->discovery.scan_duration;
2136     scan_start = hdev->discovery.scan_start;
2137     now = jiffies;
2138     if (now - scan_start <= duration) {
2139         int elapsed;
2140 
2141         if (now >= scan_start)
2142             elapsed = now - scan_start;
2143         else
2144             elapsed = ULONG_MAX - scan_start + now;
2145 
2146         timeout = duration - elapsed;
2147     } else {
2148         timeout = 0;
2149     }
2150 
2151     queue_delayed_work(hdev->req_workqueue,
2152                &hdev->le_scan_disable, timeout);
2153 
2154 unlock:
2155     hci_dev_unlock(hdev);
2156 }
2157 
2158 bool hci_req_stop_discovery(struct hci_request *req)
2159 {
2160     struct hci_dev *hdev = req->hdev;
2161     struct discovery_state *d = &hdev->discovery;
2162     struct hci_cp_remote_name_req_cancel cp;
2163     struct inquiry_entry *e;
2164     bool ret = false;
2165 
2166     bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2167 
2168     if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2169         if (test_bit(HCI_INQUIRY, &hdev->flags))
2170             hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2171 
2172         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2173             cancel_delayed_work(&hdev->le_scan_disable);
2174             cancel_delayed_work(&hdev->le_scan_restart);
2175             hci_req_add_le_scan_disable(req, false);
2176         }
2177 
2178         ret = true;
2179     } else {
2180         /* Passive scanning */
2181         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2182             hci_req_add_le_scan_disable(req, false);
2183             ret = true;
2184         }
2185     }
2186 
2187     /* No further actions needed for LE-only discovery */
2188     if (d->type == DISCOV_TYPE_LE)
2189         return ret;
2190 
2191     if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2192         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2193                              NAME_PENDING);
2194         if (!e)
2195             return ret;
2196 
2197         bacpy(&cp.bdaddr, &e->data.bdaddr);
2198         hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2199                 &cp);
2200         ret = true;
2201     }
2202 
2203     return ret;
2204 }
2205 
2206 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2207                       u16 opcode)
2208 {
2209     bt_dev_dbg(hdev, "status %u", status);
2210 }
2211 
2212 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2213 {
2214     struct hci_request req;
2215     int err;
2216     __u8 vnd_len, *vnd_data = NULL;
2217     struct hci_op_configure_data_path *cmd = NULL;
2218 
2219     hci_req_init(&req, hdev);
2220 
2221     err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2222                       &vnd_data);
2223     if (err < 0)
2224         goto error;
2225 
2226     cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2227     if (!cmd) {
2228         err = -ENOMEM;
2229         goto error;
2230     }
2231 
2232     err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2233     if (err < 0)
2234         goto error;
2235 
2236     cmd->vnd_len = vnd_len;
2237     memcpy(cmd->vnd_data, vnd_data, vnd_len);
2238 
2239     cmd->direction = 0x00;
2240     hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2241 
2242     cmd->direction = 0x01;
2243     hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2244 
2245     err = hci_req_run(&req, config_data_path_complete);
2246 error:
2247 
2248     kfree(cmd);
2249     kfree(vnd_data);
2250     return err;
2251 }
2252 
2253 void hci_request_setup(struct hci_dev *hdev)
2254 {
2255     INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2256     INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2257     INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2258     INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2259 }
2260 
2261 void hci_request_cancel_all(struct hci_dev *hdev)
2262 {
2263     __hci_cmd_sync_cancel(hdev, ENODEV);
2264 
2265     cancel_delayed_work_sync(&hdev->le_scan_disable);
2266     cancel_delayed_work_sync(&hdev->le_scan_restart);
2267 
2268     if (hdev->adv_instance_timeout) {
2269         cancel_delayed_work_sync(&hdev->adv_instance_expire);
2270         hdev->adv_instance_timeout = 0;
2271     }
2272 
2273     cancel_interleave_scan(hdev);
2274 }