Back to home page

OSCL-LXR

 
 

    


0001 /*
0002    BlueZ - Bluetooth protocol stack for Linux
0003    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
0004 
0005    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
0006 
0007    This program is free software; you can redistribute it and/or modify
0008    it under the terms of the GNU General Public License version 2 as
0009    published by the Free Software Foundation;
0010 
0011    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
0012    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0013    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
0014    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
0015    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
0016    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0017    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0018    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0019 
0020    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
0021    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
0022    SOFTWARE IS DISCLAIMED.
0023 */
0024 
0025 /* Bluetooth HCI event handling. */
0026 
0027 #include <asm/unaligned.h>
0028 
0029 #include <net/bluetooth/bluetooth.h>
0030 #include <net/bluetooth/hci_core.h>
0031 #include <net/bluetooth/mgmt.h>
0032 
0033 #include "hci_request.h"
0034 #include "hci_debugfs.h"
0035 #include "a2mp.h"
0036 #include "amp.h"
0037 #include "smp.h"
0038 #include "msft.h"
0039 #include "eir.h"
0040 
0041 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
0042          "\x00\x00\x00\x00\x00\x00\x00\x00"
0043 
0044 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
0045 
0046 /* Handle HCI Event packets */
0047 
0048 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
0049                  u8 ev, size_t len)
0050 {
0051     void *data;
0052 
0053     data = skb_pull_data(skb, len);
0054     if (!data)
0055         bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
0056 
0057     return data;
0058 }
0059 
0060 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
0061                  u16 op, size_t len)
0062 {
0063     void *data;
0064 
0065     data = skb_pull_data(skb, len);
0066     if (!data)
0067         bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
0068 
0069     return data;
0070 }
0071 
0072 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
0073                 u8 ev, size_t len)
0074 {
0075     void *data;
0076 
0077     data = skb_pull_data(skb, len);
0078     if (!data)
0079         bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
0080 
0081     return data;
0082 }
0083 
0084 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
0085                 struct sk_buff *skb)
0086 {
0087     struct hci_ev_status *rp = data;
0088 
0089     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0090 
0091     /* It is possible that we receive Inquiry Complete event right
0092      * before we receive Inquiry Cancel Command Complete event, in
0093      * which case the latter event should have status of Command
0094      * Disallowed (0x0c). This should not be treated as error, since
0095      * we actually achieve what Inquiry Cancel wants to achieve,
0096      * which is to end the last Inquiry session.
0097      */
0098     if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
0099         bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
0100         rp->status = 0x00;
0101     }
0102 
0103     if (rp->status)
0104         return rp->status;
0105 
0106     clear_bit(HCI_INQUIRY, &hdev->flags);
0107     smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
0108     wake_up_bit(&hdev->flags, HCI_INQUIRY);
0109 
0110     hci_dev_lock(hdev);
0111     /* Set discovery state to stopped if we're not doing LE active
0112      * scanning.
0113      */
0114     if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
0115         hdev->le_scan_type != LE_SCAN_ACTIVE)
0116         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
0117     hci_dev_unlock(hdev);
0118 
0119     hci_conn_check_pending(hdev);
0120 
0121     return rp->status;
0122 }
0123 
0124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
0125                   struct sk_buff *skb)
0126 {
0127     struct hci_ev_status *rp = data;
0128 
0129     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0130 
0131     if (rp->status)
0132         return rp->status;
0133 
0134     hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
0135 
0136     return rp->status;
0137 }
0138 
0139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
0140                    struct sk_buff *skb)
0141 {
0142     struct hci_ev_status *rp = data;
0143 
0144     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0145 
0146     if (rp->status)
0147         return rp->status;
0148 
0149     hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
0150 
0151     hci_conn_check_pending(hdev);
0152 
0153     return rp->status;
0154 }
0155 
0156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
0157                     struct sk_buff *skb)
0158 {
0159     struct hci_ev_status *rp = data;
0160 
0161     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0162 
0163     return rp->status;
0164 }
0165 
0166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
0167                 struct sk_buff *skb)
0168 {
0169     struct hci_rp_role_discovery *rp = data;
0170     struct hci_conn *conn;
0171 
0172     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0173 
0174     if (rp->status)
0175         return rp->status;
0176 
0177     hci_dev_lock(hdev);
0178 
0179     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
0180     if (conn)
0181         conn->role = rp->role;
0182 
0183     hci_dev_unlock(hdev);
0184 
0185     return rp->status;
0186 }
0187 
0188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
0189                   struct sk_buff *skb)
0190 {
0191     struct hci_rp_read_link_policy *rp = data;
0192     struct hci_conn *conn;
0193 
0194     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0195 
0196     if (rp->status)
0197         return rp->status;
0198 
0199     hci_dev_lock(hdev);
0200 
0201     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
0202     if (conn)
0203         conn->link_policy = __le16_to_cpu(rp->policy);
0204 
0205     hci_dev_unlock(hdev);
0206 
0207     return rp->status;
0208 }
0209 
0210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
0211                    struct sk_buff *skb)
0212 {
0213     struct hci_rp_write_link_policy *rp = data;
0214     struct hci_conn *conn;
0215     void *sent;
0216 
0217     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0218 
0219     if (rp->status)
0220         return rp->status;
0221 
0222     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
0223     if (!sent)
0224         return rp->status;
0225 
0226     hci_dev_lock(hdev);
0227 
0228     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
0229     if (conn)
0230         conn->link_policy = get_unaligned_le16(sent + 2);
0231 
0232     hci_dev_unlock(hdev);
0233 
0234     return rp->status;
0235 }
0236 
0237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
0238                       struct sk_buff *skb)
0239 {
0240     struct hci_rp_read_def_link_policy *rp = data;
0241 
0242     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0243 
0244     if (rp->status)
0245         return rp->status;
0246 
0247     hdev->link_policy = __le16_to_cpu(rp->policy);
0248 
0249     return rp->status;
0250 }
0251 
0252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
0253                        struct sk_buff *skb)
0254 {
0255     struct hci_ev_status *rp = data;
0256     void *sent;
0257 
0258     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0259 
0260     if (rp->status)
0261         return rp->status;
0262 
0263     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
0264     if (!sent)
0265         return rp->status;
0266 
0267     hdev->link_policy = get_unaligned_le16(sent);
0268 
0269     return rp->status;
0270 }
0271 
0272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
0273 {
0274     struct hci_ev_status *rp = data;
0275 
0276     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0277 
0278     clear_bit(HCI_RESET, &hdev->flags);
0279 
0280     if (rp->status)
0281         return rp->status;
0282 
0283     /* Reset all non-persistent flags */
0284     hci_dev_clear_volatile_flags(hdev);
0285 
0286     hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
0287 
0288     hdev->inq_tx_power = HCI_TX_POWER_INVALID;
0289     hdev->adv_tx_power = HCI_TX_POWER_INVALID;
0290 
0291     memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
0292     hdev->adv_data_len = 0;
0293 
0294     memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
0295     hdev->scan_rsp_data_len = 0;
0296 
0297     hdev->le_scan_type = LE_SCAN_PASSIVE;
0298 
0299     hdev->ssp_debug_mode = 0;
0300 
0301     hci_bdaddr_list_clear(&hdev->le_accept_list);
0302     hci_bdaddr_list_clear(&hdev->le_resolv_list);
0303 
0304     return rp->status;
0305 }
0306 
0307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
0308                       struct sk_buff *skb)
0309 {
0310     struct hci_rp_read_stored_link_key *rp = data;
0311     struct hci_cp_read_stored_link_key *sent;
0312 
0313     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0314 
0315     sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
0316     if (!sent)
0317         return rp->status;
0318 
0319     if (!rp->status && sent->read_all == 0x01) {
0320         hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
0321         hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
0322     }
0323 
0324     return rp->status;
0325 }
0326 
0327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
0328                     struct sk_buff *skb)
0329 {
0330     struct hci_rp_delete_stored_link_key *rp = data;
0331     u16 num_keys;
0332 
0333     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0334 
0335     if (rp->status)
0336         return rp->status;
0337 
0338     num_keys = le16_to_cpu(rp->num_keys);
0339 
0340     if (num_keys <= hdev->stored_num_keys)
0341         hdev->stored_num_keys -= num_keys;
0342     else
0343         hdev->stored_num_keys = 0;
0344 
0345     return rp->status;
0346 }
0347 
0348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
0349                   struct sk_buff *skb)
0350 {
0351     struct hci_ev_status *rp = data;
0352     void *sent;
0353 
0354     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0355 
0356     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
0357     if (!sent)
0358         return rp->status;
0359 
0360     hci_dev_lock(hdev);
0361 
0362     if (hci_dev_test_flag(hdev, HCI_MGMT))
0363         mgmt_set_local_name_complete(hdev, sent, rp->status);
0364     else if (!rp->status)
0365         memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
0366 
0367     hci_dev_unlock(hdev);
0368 
0369     return rp->status;
0370 }
0371 
0372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
0373                  struct sk_buff *skb)
0374 {
0375     struct hci_rp_read_local_name *rp = data;
0376 
0377     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0378 
0379     if (rp->status)
0380         return rp->status;
0381 
0382     if (hci_dev_test_flag(hdev, HCI_SETUP) ||
0383         hci_dev_test_flag(hdev, HCI_CONFIG))
0384         memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
0385 
0386     return rp->status;
0387 }
0388 
0389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
0390                    struct sk_buff *skb)
0391 {
0392     struct hci_ev_status *rp = data;
0393     void *sent;
0394 
0395     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0396 
0397     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
0398     if (!sent)
0399         return rp->status;
0400 
0401     hci_dev_lock(hdev);
0402 
0403     if (!rp->status) {
0404         __u8 param = *((__u8 *) sent);
0405 
0406         if (param == AUTH_ENABLED)
0407             set_bit(HCI_AUTH, &hdev->flags);
0408         else
0409             clear_bit(HCI_AUTH, &hdev->flags);
0410     }
0411 
0412     if (hci_dev_test_flag(hdev, HCI_MGMT))
0413         mgmt_auth_enable_complete(hdev, rp->status);
0414 
0415     hci_dev_unlock(hdev);
0416 
0417     return rp->status;
0418 }
0419 
0420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
0421                     struct sk_buff *skb)
0422 {
0423     struct hci_ev_status *rp = data;
0424     __u8 param;
0425     void *sent;
0426 
0427     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0428 
0429     if (rp->status)
0430         return rp->status;
0431 
0432     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
0433     if (!sent)
0434         return rp->status;
0435 
0436     param = *((__u8 *) sent);
0437 
0438     if (param)
0439         set_bit(HCI_ENCRYPT, &hdev->flags);
0440     else
0441         clear_bit(HCI_ENCRYPT, &hdev->flags);
0442 
0443     return rp->status;
0444 }
0445 
0446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
0447                    struct sk_buff *skb)
0448 {
0449     struct hci_ev_status *rp = data;
0450     __u8 param;
0451     void *sent;
0452 
0453     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0454 
0455     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
0456     if (!sent)
0457         return rp->status;
0458 
0459     param = *((__u8 *) sent);
0460 
0461     hci_dev_lock(hdev);
0462 
0463     if (rp->status) {
0464         hdev->discov_timeout = 0;
0465         goto done;
0466     }
0467 
0468     if (param & SCAN_INQUIRY)
0469         set_bit(HCI_ISCAN, &hdev->flags);
0470     else
0471         clear_bit(HCI_ISCAN, &hdev->flags);
0472 
0473     if (param & SCAN_PAGE)
0474         set_bit(HCI_PSCAN, &hdev->flags);
0475     else
0476         clear_bit(HCI_PSCAN, &hdev->flags);
0477 
0478 done:
0479     hci_dev_unlock(hdev);
0480 
0481     return rp->status;
0482 }
0483 
0484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
0485                   struct sk_buff *skb)
0486 {
0487     struct hci_ev_status *rp = data;
0488     struct hci_cp_set_event_filter *cp;
0489     void *sent;
0490 
0491     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0492 
0493     if (rp->status)
0494         return rp->status;
0495 
0496     sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
0497     if (!sent)
0498         return rp->status;
0499 
0500     cp = (struct hci_cp_set_event_filter *)sent;
0501 
0502     if (cp->flt_type == HCI_FLT_CLEAR_ALL)
0503         hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
0504     else
0505         hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
0506 
0507     return rp->status;
0508 }
0509 
0510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
0511                    struct sk_buff *skb)
0512 {
0513     struct hci_rp_read_class_of_dev *rp = data;
0514 
0515     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0516 
0517     if (rp->status)
0518         return rp->status;
0519 
0520     memcpy(hdev->dev_class, rp->dev_class, 3);
0521 
0522     bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
0523            hdev->dev_class[1], hdev->dev_class[0]);
0524 
0525     return rp->status;
0526 }
0527 
0528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
0529                     struct sk_buff *skb)
0530 {
0531     struct hci_ev_status *rp = data;
0532     void *sent;
0533 
0534     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0535 
0536     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
0537     if (!sent)
0538         return rp->status;
0539 
0540     hci_dev_lock(hdev);
0541 
0542     if (!rp->status)
0543         memcpy(hdev->dev_class, sent, 3);
0544 
0545     if (hci_dev_test_flag(hdev, HCI_MGMT))
0546         mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
0547 
0548     hci_dev_unlock(hdev);
0549 
0550     return rp->status;
0551 }
0552 
0553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
0554                     struct sk_buff *skb)
0555 {
0556     struct hci_rp_read_voice_setting *rp = data;
0557     __u16 setting;
0558 
0559     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0560 
0561     if (rp->status)
0562         return rp->status;
0563 
0564     setting = __le16_to_cpu(rp->voice_setting);
0565 
0566     if (hdev->voice_setting == setting)
0567         return rp->status;
0568 
0569     hdev->voice_setting = setting;
0570 
0571     bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
0572 
0573     if (hdev->notify)
0574         hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
0575 
0576     return rp->status;
0577 }
0578 
0579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
0580                      struct sk_buff *skb)
0581 {
0582     struct hci_ev_status *rp = data;
0583     __u16 setting;
0584     void *sent;
0585 
0586     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0587 
0588     if (rp->status)
0589         return rp->status;
0590 
0591     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
0592     if (!sent)
0593         return rp->status;
0594 
0595     setting = get_unaligned_le16(sent);
0596 
0597     if (hdev->voice_setting == setting)
0598         return rp->status;
0599 
0600     hdev->voice_setting = setting;
0601 
0602     bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
0603 
0604     if (hdev->notify)
0605         hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
0606 
0607     return rp->status;
0608 }
0609 
0610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
0611                     struct sk_buff *skb)
0612 {
0613     struct hci_rp_read_num_supported_iac *rp = data;
0614 
0615     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0616 
0617     if (rp->status)
0618         return rp->status;
0619 
0620     hdev->num_iac = rp->num_iac;
0621 
0622     bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
0623 
0624     return rp->status;
0625 }
0626 
0627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
0628                 struct sk_buff *skb)
0629 {
0630     struct hci_ev_status *rp = data;
0631     struct hci_cp_write_ssp_mode *sent;
0632 
0633     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0634 
0635     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
0636     if (!sent)
0637         return rp->status;
0638 
0639     hci_dev_lock(hdev);
0640 
0641     if (!rp->status) {
0642         if (sent->mode)
0643             hdev->features[1][0] |= LMP_HOST_SSP;
0644         else
0645             hdev->features[1][0] &= ~LMP_HOST_SSP;
0646     }
0647 
0648     if (!rp->status) {
0649         if (sent->mode)
0650             hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
0651         else
0652             hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
0653     }
0654 
0655     hci_dev_unlock(hdev);
0656 
0657     return rp->status;
0658 }
0659 
0660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
0661                   struct sk_buff *skb)
0662 {
0663     struct hci_ev_status *rp = data;
0664     struct hci_cp_write_sc_support *sent;
0665 
0666     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0667 
0668     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
0669     if (!sent)
0670         return rp->status;
0671 
0672     hci_dev_lock(hdev);
0673 
0674     if (!rp->status) {
0675         if (sent->support)
0676             hdev->features[1][0] |= LMP_HOST_SC;
0677         else
0678             hdev->features[1][0] &= ~LMP_HOST_SC;
0679     }
0680 
0681     if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
0682         if (sent->support)
0683             hci_dev_set_flag(hdev, HCI_SC_ENABLED);
0684         else
0685             hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
0686     }
0687 
0688     hci_dev_unlock(hdev);
0689 
0690     return rp->status;
0691 }
0692 
0693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
0694                     struct sk_buff *skb)
0695 {
0696     struct hci_rp_read_local_version *rp = data;
0697 
0698     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0699 
0700     if (rp->status)
0701         return rp->status;
0702 
0703     if (hci_dev_test_flag(hdev, HCI_SETUP) ||
0704         hci_dev_test_flag(hdev, HCI_CONFIG)) {
0705         hdev->hci_ver = rp->hci_ver;
0706         hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
0707         hdev->lmp_ver = rp->lmp_ver;
0708         hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
0709         hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
0710     }
0711 
0712     return rp->status;
0713 }
0714 
0715 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
0716                      struct sk_buff *skb)
0717 {
0718     struct hci_rp_read_local_commands *rp = data;
0719 
0720     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0721 
0722     if (rp->status)
0723         return rp->status;
0724 
0725     if (hci_dev_test_flag(hdev, HCI_SETUP) ||
0726         hci_dev_test_flag(hdev, HCI_CONFIG))
0727         memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
0728 
0729     return rp->status;
0730 }
0731 
0732 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
0733                        struct sk_buff *skb)
0734 {
0735     struct hci_rp_read_auth_payload_to *rp = data;
0736     struct hci_conn *conn;
0737 
0738     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0739 
0740     if (rp->status)
0741         return rp->status;
0742 
0743     hci_dev_lock(hdev);
0744 
0745     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
0746     if (conn)
0747         conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
0748 
0749     hci_dev_unlock(hdev);
0750 
0751     return rp->status;
0752 }
0753 
0754 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
0755                         struct sk_buff *skb)
0756 {
0757     struct hci_rp_write_auth_payload_to *rp = data;
0758     struct hci_conn *conn;
0759     void *sent;
0760 
0761     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0762 
0763     if (rp->status)
0764         return rp->status;
0765 
0766     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
0767     if (!sent)
0768         return rp->status;
0769 
0770     hci_dev_lock(hdev);
0771 
0772     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
0773     if (conn)
0774         conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
0775 
0776     hci_dev_unlock(hdev);
0777 
0778     return rp->status;
0779 }
0780 
0781 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
0782                      struct sk_buff *skb)
0783 {
0784     struct hci_rp_read_local_features *rp = data;
0785 
0786     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0787 
0788     if (rp->status)
0789         return rp->status;
0790 
0791     memcpy(hdev->features, rp->features, 8);
0792 
0793     /* Adjust default settings according to features
0794      * supported by device. */
0795 
0796     if (hdev->features[0][0] & LMP_3SLOT)
0797         hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
0798 
0799     if (hdev->features[0][0] & LMP_5SLOT)
0800         hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
0801 
0802     if (hdev->features[0][1] & LMP_HV2) {
0803         hdev->pkt_type  |= (HCI_HV2);
0804         hdev->esco_type |= (ESCO_HV2);
0805     }
0806 
0807     if (hdev->features[0][1] & LMP_HV3) {
0808         hdev->pkt_type  |= (HCI_HV3);
0809         hdev->esco_type |= (ESCO_HV3);
0810     }
0811 
0812     if (lmp_esco_capable(hdev))
0813         hdev->esco_type |= (ESCO_EV3);
0814 
0815     if (hdev->features[0][4] & LMP_EV4)
0816         hdev->esco_type |= (ESCO_EV4);
0817 
0818     if (hdev->features[0][4] & LMP_EV5)
0819         hdev->esco_type |= (ESCO_EV5);
0820 
0821     if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
0822         hdev->esco_type |= (ESCO_2EV3);
0823 
0824     if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
0825         hdev->esco_type |= (ESCO_3EV3);
0826 
0827     if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
0828         hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
0829 
0830     return rp->status;
0831 }
0832 
0833 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
0834                      struct sk_buff *skb)
0835 {
0836     struct hci_rp_read_local_ext_features *rp = data;
0837 
0838     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0839 
0840     if (rp->status)
0841         return rp->status;
0842 
0843     if (hdev->max_page < rp->max_page)
0844         hdev->max_page = rp->max_page;
0845 
0846     if (rp->page < HCI_MAX_PAGES)
0847         memcpy(hdev->features[rp->page], rp->features, 8);
0848 
0849     return rp->status;
0850 }
0851 
0852 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
0853                     struct sk_buff *skb)
0854 {
0855     struct hci_rp_read_flow_control_mode *rp = data;
0856 
0857     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0858 
0859     if (rp->status)
0860         return rp->status;
0861 
0862     hdev->flow_ctl_mode = rp->mode;
0863 
0864     return rp->status;
0865 }
0866 
0867 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
0868                   struct sk_buff *skb)
0869 {
0870     struct hci_rp_read_buffer_size *rp = data;
0871 
0872     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0873 
0874     if (rp->status)
0875         return rp->status;
0876 
0877     hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
0878     hdev->sco_mtu  = rp->sco_mtu;
0879     hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
0880     hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
0881 
0882     if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
0883         hdev->sco_mtu  = 64;
0884         hdev->sco_pkts = 8;
0885     }
0886 
0887     hdev->acl_cnt = hdev->acl_pkts;
0888     hdev->sco_cnt = hdev->sco_pkts;
0889 
0890     BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
0891            hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
0892 
0893     return rp->status;
0894 }
0895 
0896 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
0897                   struct sk_buff *skb)
0898 {
0899     struct hci_rp_read_bd_addr *rp = data;
0900 
0901     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0902 
0903     if (rp->status)
0904         return rp->status;
0905 
0906     if (test_bit(HCI_INIT, &hdev->flags))
0907         bacpy(&hdev->bdaddr, &rp->bdaddr);
0908 
0909     if (hci_dev_test_flag(hdev, HCI_SETUP))
0910         bacpy(&hdev->setup_addr, &rp->bdaddr);
0911 
0912     return rp->status;
0913 }
0914 
0915 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
0916                      struct sk_buff *skb)
0917 {
0918     struct hci_rp_read_local_pairing_opts *rp = data;
0919 
0920     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0921 
0922     if (rp->status)
0923         return rp->status;
0924 
0925     if (hci_dev_test_flag(hdev, HCI_SETUP) ||
0926         hci_dev_test_flag(hdev, HCI_CONFIG)) {
0927         hdev->pairing_opts = rp->pairing_opts;
0928         hdev->max_enc_key_size = rp->max_key_size;
0929     }
0930 
0931     return rp->status;
0932 }
0933 
0934 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
0935                      struct sk_buff *skb)
0936 {
0937     struct hci_rp_read_page_scan_activity *rp = data;
0938 
0939     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0940 
0941     if (rp->status)
0942         return rp->status;
0943 
0944     if (test_bit(HCI_INIT, &hdev->flags)) {
0945         hdev->page_scan_interval = __le16_to_cpu(rp->interval);
0946         hdev->page_scan_window = __le16_to_cpu(rp->window);
0947     }
0948 
0949     return rp->status;
0950 }
0951 
0952 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
0953                       struct sk_buff *skb)
0954 {
0955     struct hci_ev_status *rp = data;
0956     struct hci_cp_write_page_scan_activity *sent;
0957 
0958     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0959 
0960     if (rp->status)
0961         return rp->status;
0962 
0963     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
0964     if (!sent)
0965         return rp->status;
0966 
0967     hdev->page_scan_interval = __le16_to_cpu(sent->interval);
0968     hdev->page_scan_window = __le16_to_cpu(sent->window);
0969 
0970     return rp->status;
0971 }
0972 
0973 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
0974                      struct sk_buff *skb)
0975 {
0976     struct hci_rp_read_page_scan_type *rp = data;
0977 
0978     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0979 
0980     if (rp->status)
0981         return rp->status;
0982 
0983     if (test_bit(HCI_INIT, &hdev->flags))
0984         hdev->page_scan_type = rp->type;
0985 
0986     return rp->status;
0987 }
0988 
0989 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
0990                       struct sk_buff *skb)
0991 {
0992     struct hci_ev_status *rp = data;
0993     u8 *type;
0994 
0995     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
0996 
0997     if (rp->status)
0998         return rp->status;
0999 
1000     type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1001     if (type)
1002         hdev->page_scan_type = *type;
1003 
1004     return rp->status;
1005 }
1006 
1007 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1008                       struct sk_buff *skb)
1009 {
1010     struct hci_rp_read_data_block_size *rp = data;
1011 
1012     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1013 
1014     if (rp->status)
1015         return rp->status;
1016 
1017     hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1018     hdev->block_len = __le16_to_cpu(rp->block_len);
1019     hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1020 
1021     hdev->block_cnt = hdev->num_blocks;
1022 
1023     BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1024            hdev->block_cnt, hdev->block_len);
1025 
1026     return rp->status;
1027 }
1028 
1029 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1030                 struct sk_buff *skb)
1031 {
1032     struct hci_rp_read_clock *rp = data;
1033     struct hci_cp_read_clock *cp;
1034     struct hci_conn *conn;
1035 
1036     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1037 
1038     if (rp->status)
1039         return rp->status;
1040 
1041     hci_dev_lock(hdev);
1042 
1043     cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1044     if (!cp)
1045         goto unlock;
1046 
1047     if (cp->which == 0x00) {
1048         hdev->clock = le32_to_cpu(rp->clock);
1049         goto unlock;
1050     }
1051 
1052     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1053     if (conn) {
1054         conn->clock = le32_to_cpu(rp->clock);
1055         conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1056     }
1057 
1058 unlock:
1059     hci_dev_unlock(hdev);
1060     return rp->status;
1061 }
1062 
1063 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1064                      struct sk_buff *skb)
1065 {
1066     struct hci_rp_read_local_amp_info *rp = data;
1067 
1068     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1069 
1070     if (rp->status)
1071         return rp->status;
1072 
1073     hdev->amp_status = rp->amp_status;
1074     hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1075     hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1076     hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1077     hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1078     hdev->amp_type = rp->amp_type;
1079     hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1080     hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1081     hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1082     hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1083 
1084     return rp->status;
1085 }
1086 
1087 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1088                        struct sk_buff *skb)
1089 {
1090     struct hci_rp_read_inq_rsp_tx_power *rp = data;
1091 
1092     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1093 
1094     if (rp->status)
1095         return rp->status;
1096 
1097     hdev->inq_tx_power = rp->tx_power;
1098 
1099     return rp->status;
1100 }
1101 
1102 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1103                          struct sk_buff *skb)
1104 {
1105     struct hci_rp_read_def_err_data_reporting *rp = data;
1106 
1107     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1108 
1109     if (rp->status)
1110         return rp->status;
1111 
1112     hdev->err_data_reporting = rp->err_data_reporting;
1113 
1114     return rp->status;
1115 }
1116 
1117 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1118                           struct sk_buff *skb)
1119 {
1120     struct hci_ev_status *rp = data;
1121     struct hci_cp_write_def_err_data_reporting *cp;
1122 
1123     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1124 
1125     if (rp->status)
1126         return rp->status;
1127 
1128     cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1129     if (!cp)
1130         return rp->status;
1131 
1132     hdev->err_data_reporting = cp->err_data_reporting;
1133 
1134     return rp->status;
1135 }
1136 
1137 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1138                 struct sk_buff *skb)
1139 {
1140     struct hci_rp_pin_code_reply *rp = data;
1141     struct hci_cp_pin_code_reply *cp;
1142     struct hci_conn *conn;
1143 
1144     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1145 
1146     hci_dev_lock(hdev);
1147 
1148     if (hci_dev_test_flag(hdev, HCI_MGMT))
1149         mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1150 
1151     if (rp->status)
1152         goto unlock;
1153 
1154     cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1155     if (!cp)
1156         goto unlock;
1157 
1158     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1159     if (conn)
1160         conn->pin_length = cp->pin_len;
1161 
1162 unlock:
1163     hci_dev_unlock(hdev);
1164     return rp->status;
1165 }
1166 
1167 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1168                     struct sk_buff *skb)
1169 {
1170     struct hci_rp_pin_code_neg_reply *rp = data;
1171 
1172     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1173 
1174     hci_dev_lock(hdev);
1175 
1176     if (hci_dev_test_flag(hdev, HCI_MGMT))
1177         mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1178                          rp->status);
1179 
1180     hci_dev_unlock(hdev);
1181 
1182     return rp->status;
1183 }
1184 
1185 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1186                      struct sk_buff *skb)
1187 {
1188     struct hci_rp_le_read_buffer_size *rp = data;
1189 
1190     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1191 
1192     if (rp->status)
1193         return rp->status;
1194 
1195     hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1196     hdev->le_pkts = rp->le_max_pkt;
1197 
1198     hdev->le_cnt = hdev->le_pkts;
1199 
1200     BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1201 
1202     return rp->status;
1203 }
1204 
1205 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1206                     struct sk_buff *skb)
1207 {
1208     struct hci_rp_le_read_local_features *rp = data;
1209 
1210     BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1211 
1212     if (rp->status)
1213         return rp->status;
1214 
1215     memcpy(hdev->le_features, rp->features, 8);
1216 
1217     return rp->status;
1218 }
1219 
1220 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1221                       struct sk_buff *skb)
1222 {
1223     struct hci_rp_le_read_adv_tx_power *rp = data;
1224 
1225     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1226 
1227     if (rp->status)
1228         return rp->status;
1229 
1230     hdev->adv_tx_power = rp->tx_power;
1231 
1232     return rp->status;
1233 }
1234 
1235 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1236                     struct sk_buff *skb)
1237 {
1238     struct hci_rp_user_confirm_reply *rp = data;
1239 
1240     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1241 
1242     hci_dev_lock(hdev);
1243 
1244     if (hci_dev_test_flag(hdev, HCI_MGMT))
1245         mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1246                          rp->status);
1247 
1248     hci_dev_unlock(hdev);
1249 
1250     return rp->status;
1251 }
1252 
1253 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1254                     struct sk_buff *skb)
1255 {
1256     struct hci_rp_user_confirm_reply *rp = data;
1257 
1258     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1259 
1260     hci_dev_lock(hdev);
1261 
1262     if (hci_dev_test_flag(hdev, HCI_MGMT))
1263         mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1264                              ACL_LINK, 0, rp->status);
1265 
1266     hci_dev_unlock(hdev);
1267 
1268     return rp->status;
1269 }
1270 
1271 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1272                     struct sk_buff *skb)
1273 {
1274     struct hci_rp_user_confirm_reply *rp = data;
1275 
1276     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1277 
1278     hci_dev_lock(hdev);
1279 
1280     if (hci_dev_test_flag(hdev, HCI_MGMT))
1281         mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1282                          0, rp->status);
1283 
1284     hci_dev_unlock(hdev);
1285 
1286     return rp->status;
1287 }
1288 
1289 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1290                     struct sk_buff *skb)
1291 {
1292     struct hci_rp_user_confirm_reply *rp = data;
1293 
1294     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1295 
1296     hci_dev_lock(hdev);
1297 
1298     if (hci_dev_test_flag(hdev, HCI_MGMT))
1299         mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1300                              ACL_LINK, 0, rp->status);
1301 
1302     hci_dev_unlock(hdev);
1303 
1304     return rp->status;
1305 }
1306 
1307 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1308                      struct sk_buff *skb)
1309 {
1310     struct hci_rp_read_local_oob_data *rp = data;
1311 
1312     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1313 
1314     return rp->status;
1315 }
1316 
1317 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1318                      struct sk_buff *skb)
1319 {
1320     struct hci_rp_read_local_oob_ext_data *rp = data;
1321 
1322     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323 
1324     return rp->status;
1325 }
1326 
1327 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1328                     struct sk_buff *skb)
1329 {
1330     struct hci_ev_status *rp = data;
1331     bdaddr_t *sent;
1332 
1333     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1334 
1335     if (rp->status)
1336         return rp->status;
1337 
1338     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1339     if (!sent)
1340         return rp->status;
1341 
1342     hci_dev_lock(hdev);
1343 
1344     bacpy(&hdev->random_addr, sent);
1345 
1346     if (!bacmp(&hdev->rpa, sent)) {
1347         hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1348         queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1349                    secs_to_jiffies(hdev->rpa_timeout));
1350     }
1351 
1352     hci_dev_unlock(hdev);
1353 
1354     return rp->status;
1355 }
1356 
1357 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1358                     struct sk_buff *skb)
1359 {
1360     struct hci_ev_status *rp = data;
1361     struct hci_cp_le_set_default_phy *cp;
1362 
1363     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1364 
1365     if (rp->status)
1366         return rp->status;
1367 
1368     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1369     if (!cp)
1370         return rp->status;
1371 
1372     hci_dev_lock(hdev);
1373 
1374     hdev->le_tx_def_phys = cp->tx_phys;
1375     hdev->le_rx_def_phys = cp->rx_phys;
1376 
1377     hci_dev_unlock(hdev);
1378 
1379     return rp->status;
1380 }
1381 
1382 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1383                         struct sk_buff *skb)
1384 {
1385     struct hci_ev_status *rp = data;
1386     struct hci_cp_le_set_adv_set_rand_addr *cp;
1387     struct adv_info *adv;
1388 
1389     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1390 
1391     if (rp->status)
1392         return rp->status;
1393 
1394     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1395     /* Update only in case the adv instance since handle 0x00 shall be using
1396      * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1397      * non-extended adverting.
1398      */
1399     if (!cp || !cp->handle)
1400         return rp->status;
1401 
1402     hci_dev_lock(hdev);
1403 
1404     adv = hci_find_adv_instance(hdev, cp->handle);
1405     if (adv) {
1406         bacpy(&adv->random_addr, &cp->bdaddr);
1407         if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1408             adv->rpa_expired = false;
1409             queue_delayed_work(hdev->workqueue,
1410                        &adv->rpa_expired_cb,
1411                        secs_to_jiffies(hdev->rpa_timeout));
1412         }
1413     }
1414 
1415     hci_dev_unlock(hdev);
1416 
1417     return rp->status;
1418 }
1419 
1420 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1421                    struct sk_buff *skb)
1422 {
1423     struct hci_ev_status *rp = data;
1424     u8 *instance;
1425     int err;
1426 
1427     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1428 
1429     if (rp->status)
1430         return rp->status;
1431 
1432     instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1433     if (!instance)
1434         return rp->status;
1435 
1436     hci_dev_lock(hdev);
1437 
1438     err = hci_remove_adv_instance(hdev, *instance);
1439     if (!err)
1440         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1441                      *instance);
1442 
1443     hci_dev_unlock(hdev);
1444 
1445     return rp->status;
1446 }
1447 
1448 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1449                    struct sk_buff *skb)
1450 {
1451     struct hci_ev_status *rp = data;
1452     struct adv_info *adv, *n;
1453     int err;
1454 
1455     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1456 
1457     if (rp->status)
1458         return rp->status;
1459 
1460     if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1461         return rp->status;
1462 
1463     hci_dev_lock(hdev);
1464 
1465     list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1466         u8 instance = adv->instance;
1467 
1468         err = hci_remove_adv_instance(hdev, instance);
1469         if (!err)
1470             mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1471                          hdev, instance);
1472     }
1473 
1474     hci_dev_unlock(hdev);
1475 
1476     return rp->status;
1477 }
1478 
1479 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1480                     struct sk_buff *skb)
1481 {
1482     struct hci_rp_le_read_transmit_power *rp = data;
1483 
1484     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1485 
1486     if (rp->status)
1487         return rp->status;
1488 
1489     hdev->min_le_tx_power = rp->min_le_tx_power;
1490     hdev->max_le_tx_power = rp->max_le_tx_power;
1491 
1492     return rp->status;
1493 }
1494 
1495 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1496                      struct sk_buff *skb)
1497 {
1498     struct hci_ev_status *rp = data;
1499     struct hci_cp_le_set_privacy_mode *cp;
1500     struct hci_conn_params *params;
1501 
1502     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1503 
1504     if (rp->status)
1505         return rp->status;
1506 
1507     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1508     if (!cp)
1509         return rp->status;
1510 
1511     hci_dev_lock(hdev);
1512 
1513     params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1514     if (params)
1515         params->privacy_mode = cp->mode;
1516 
1517     hci_dev_unlock(hdev);
1518 
1519     return rp->status;
1520 }
1521 
1522 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1523                    struct sk_buff *skb)
1524 {
1525     struct hci_ev_status *rp = data;
1526     __u8 *sent;
1527 
1528     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1529 
1530     if (rp->status)
1531         return rp->status;
1532 
1533     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1534     if (!sent)
1535         return rp->status;
1536 
1537     hci_dev_lock(hdev);
1538 
1539     /* If we're doing connection initiation as peripheral. Set a
1540      * timeout in case something goes wrong.
1541      */
1542     if (*sent) {
1543         struct hci_conn *conn;
1544 
1545         hci_dev_set_flag(hdev, HCI_LE_ADV);
1546 
1547         conn = hci_lookup_le_connect(hdev);
1548         if (conn)
1549             queue_delayed_work(hdev->workqueue,
1550                        &conn->le_conn_timeout,
1551                        conn->conn_timeout);
1552     } else {
1553         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1554     }
1555 
1556     hci_dev_unlock(hdev);
1557 
1558     return rp->status;
1559 }
1560 
1561 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1562                        struct sk_buff *skb)
1563 {
1564     struct hci_cp_le_set_ext_adv_enable *cp;
1565     struct hci_cp_ext_adv_set *set;
1566     struct adv_info *adv = NULL, *n;
1567     struct hci_ev_status *rp = data;
1568 
1569     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1570 
1571     if (rp->status)
1572         return rp->status;
1573 
1574     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1575     if (!cp)
1576         return rp->status;
1577 
1578     set = (void *)cp->data;
1579 
1580     hci_dev_lock(hdev);
1581 
1582     if (cp->num_of_sets)
1583         adv = hci_find_adv_instance(hdev, set->handle);
1584 
1585     if (cp->enable) {
1586         struct hci_conn *conn;
1587 
1588         hci_dev_set_flag(hdev, HCI_LE_ADV);
1589 
1590         if (adv)
1591             adv->enabled = true;
1592 
1593         conn = hci_lookup_le_connect(hdev);
1594         if (conn)
1595             queue_delayed_work(hdev->workqueue,
1596                        &conn->le_conn_timeout,
1597                        conn->conn_timeout);
1598     } else {
1599         if (cp->num_of_sets) {
1600             if (adv)
1601                 adv->enabled = false;
1602 
1603             /* If just one instance was disabled check if there are
1604              * any other instance enabled before clearing HCI_LE_ADV
1605              */
1606             list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1607                          list) {
1608                 if (adv->enabled)
1609                     goto unlock;
1610             }
1611         } else {
1612             /* All instances shall be considered disabled */
1613             list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1614                          list)
1615                 adv->enabled = false;
1616         }
1617 
1618         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1619     }
1620 
1621 unlock:
1622     hci_dev_unlock(hdev);
1623     return rp->status;
1624 }
1625 
1626 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1627                    struct sk_buff *skb)
1628 {
1629     struct hci_cp_le_set_scan_param *cp;
1630     struct hci_ev_status *rp = data;
1631 
1632     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1633 
1634     if (rp->status)
1635         return rp->status;
1636 
1637     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1638     if (!cp)
1639         return rp->status;
1640 
1641     hci_dev_lock(hdev);
1642 
1643     hdev->le_scan_type = cp->type;
1644 
1645     hci_dev_unlock(hdev);
1646 
1647     return rp->status;
1648 }
1649 
1650 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1651                        struct sk_buff *skb)
1652 {
1653     struct hci_cp_le_set_ext_scan_params *cp;
1654     struct hci_ev_status *rp = data;
1655     struct hci_cp_le_scan_phy_params *phy_param;
1656 
1657     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1658 
1659     if (rp->status)
1660         return rp->status;
1661 
1662     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1663     if (!cp)
1664         return rp->status;
1665 
1666     phy_param = (void *)cp->data;
1667 
1668     hci_dev_lock(hdev);
1669 
1670     hdev->le_scan_type = phy_param->type;
1671 
1672     hci_dev_unlock(hdev);
1673 
1674     return rp->status;
1675 }
1676 
1677 static bool has_pending_adv_report(struct hci_dev *hdev)
1678 {
1679     struct discovery_state *d = &hdev->discovery;
1680 
1681     return bacmp(&d->last_adv_addr, BDADDR_ANY);
1682 }
1683 
1684 static void clear_pending_adv_report(struct hci_dev *hdev)
1685 {
1686     struct discovery_state *d = &hdev->discovery;
1687 
1688     bacpy(&d->last_adv_addr, BDADDR_ANY);
1689     d->last_adv_data_len = 0;
1690 }
1691 
1692 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1693                      u8 bdaddr_type, s8 rssi, u32 flags,
1694                      u8 *data, u8 len)
1695 {
1696     struct discovery_state *d = &hdev->discovery;
1697 
1698     if (len > HCI_MAX_AD_LENGTH)
1699         return;
1700 
1701     bacpy(&d->last_adv_addr, bdaddr);
1702     d->last_adv_addr_type = bdaddr_type;
1703     d->last_adv_rssi = rssi;
1704     d->last_adv_flags = flags;
1705     memcpy(d->last_adv_data, data, len);
1706     d->last_adv_data_len = len;
1707 }
1708 
1709 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1710 {
1711     hci_dev_lock(hdev);
1712 
1713     switch (enable) {
1714     case LE_SCAN_ENABLE:
1715         hci_dev_set_flag(hdev, HCI_LE_SCAN);
1716         if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1717             clear_pending_adv_report(hdev);
1718         break;
1719 
1720     case LE_SCAN_DISABLE:
1721         /* We do this here instead of when setting DISCOVERY_STOPPED
1722          * since the latter would potentially require waiting for
1723          * inquiry to stop too.
1724          */
1725         if (has_pending_adv_report(hdev)) {
1726             struct discovery_state *d = &hdev->discovery;
1727 
1728             mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1729                       d->last_adv_addr_type, NULL,
1730                       d->last_adv_rssi, d->last_adv_flags,
1731                       d->last_adv_data,
1732                       d->last_adv_data_len, NULL, 0);
1733         }
1734 
1735         /* Cancel this timer so that we don't try to disable scanning
1736          * when it's already disabled.
1737          */
1738         cancel_delayed_work(&hdev->le_scan_disable);
1739 
1740         hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1741 
1742         /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1743          * interrupted scanning due to a connect request. Mark
1744          * therefore discovery as stopped.
1745          */
1746         if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1747             hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1748 
1749         break;
1750 
1751     default:
1752         bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1753                enable);
1754         break;
1755     }
1756 
1757     hci_dev_unlock(hdev);
1758 }
1759 
1760 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1761                     struct sk_buff *skb)
1762 {
1763     struct hci_cp_le_set_scan_enable *cp;
1764     struct hci_ev_status *rp = data;
1765 
1766     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1767 
1768     if (rp->status)
1769         return rp->status;
1770 
1771     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1772     if (!cp)
1773         return rp->status;
1774 
1775     le_set_scan_enable_complete(hdev, cp->enable);
1776 
1777     return rp->status;
1778 }
1779 
1780 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1781                     struct sk_buff *skb)
1782 {
1783     struct hci_cp_le_set_ext_scan_enable *cp;
1784     struct hci_ev_status *rp = data;
1785 
1786     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1787 
1788     if (rp->status)
1789         return rp->status;
1790 
1791     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1792     if (!cp)
1793         return rp->status;
1794 
1795     le_set_scan_enable_complete(hdev, cp->enable);
1796 
1797     return rp->status;
1798 }
1799 
1800 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1801                       struct sk_buff *skb)
1802 {
1803     struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1804 
1805     bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1806            rp->num_of_sets);
1807 
1808     if (rp->status)
1809         return rp->status;
1810 
1811     hdev->le_num_of_adv_sets = rp->num_of_sets;
1812 
1813     return rp->status;
1814 }
1815 
1816 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1817                       struct sk_buff *skb)
1818 {
1819     struct hci_rp_le_read_accept_list_size *rp = data;
1820 
1821     bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1822 
1823     if (rp->status)
1824         return rp->status;
1825 
1826     hdev->le_accept_list_size = rp->size;
1827 
1828     return rp->status;
1829 }
1830 
1831 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1832                       struct sk_buff *skb)
1833 {
1834     struct hci_ev_status *rp = data;
1835 
1836     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1837 
1838     if (rp->status)
1839         return rp->status;
1840 
1841     hci_dev_lock(hdev);
1842     hci_bdaddr_list_clear(&hdev->le_accept_list);
1843     hci_dev_unlock(hdev);
1844 
1845     return rp->status;
1846 }
1847 
1848 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1849                        struct sk_buff *skb)
1850 {
1851     struct hci_cp_le_add_to_accept_list *sent;
1852     struct hci_ev_status *rp = data;
1853 
1854     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1855 
1856     if (rp->status)
1857         return rp->status;
1858 
1859     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1860     if (!sent)
1861         return rp->status;
1862 
1863     hci_dev_lock(hdev);
1864     hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1865                 sent->bdaddr_type);
1866     hci_dev_unlock(hdev);
1867 
1868     return rp->status;
1869 }
1870 
1871 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1872                      struct sk_buff *skb)
1873 {
1874     struct hci_cp_le_del_from_accept_list *sent;
1875     struct hci_ev_status *rp = data;
1876 
1877     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1878 
1879     if (rp->status)
1880         return rp->status;
1881 
1882     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1883     if (!sent)
1884         return rp->status;
1885 
1886     hci_dev_lock(hdev);
1887     hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1888                 sent->bdaddr_type);
1889     hci_dev_unlock(hdev);
1890 
1891     return rp->status;
1892 }
1893 
1894 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1895                       struct sk_buff *skb)
1896 {
1897     struct hci_rp_le_read_supported_states *rp = data;
1898 
1899     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1900 
1901     if (rp->status)
1902         return rp->status;
1903 
1904     memcpy(hdev->le_states, rp->le_states, 8);
1905 
1906     return rp->status;
1907 }
1908 
1909 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1910                       struct sk_buff *skb)
1911 {
1912     struct hci_rp_le_read_def_data_len *rp = data;
1913 
1914     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1915 
1916     if (rp->status)
1917         return rp->status;
1918 
1919     hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1920     hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1921 
1922     return rp->status;
1923 }
1924 
1925 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1926                        struct sk_buff *skb)
1927 {
1928     struct hci_cp_le_write_def_data_len *sent;
1929     struct hci_ev_status *rp = data;
1930 
1931     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1932 
1933     if (rp->status)
1934         return rp->status;
1935 
1936     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1937     if (!sent)
1938         return rp->status;
1939 
1940     hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1941     hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1942 
1943     return rp->status;
1944 }
1945 
1946 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1947                        struct sk_buff *skb)
1948 {
1949     struct hci_cp_le_add_to_resolv_list *sent;
1950     struct hci_ev_status *rp = data;
1951 
1952     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1953 
1954     if (rp->status)
1955         return rp->status;
1956 
1957     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1958     if (!sent)
1959         return rp->status;
1960 
1961     hci_dev_lock(hdev);
1962     hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1963                 sent->bdaddr_type, sent->peer_irk,
1964                 sent->local_irk);
1965     hci_dev_unlock(hdev);
1966 
1967     return rp->status;
1968 }
1969 
1970 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
1971                      struct sk_buff *skb)
1972 {
1973     struct hci_cp_le_del_from_resolv_list *sent;
1974     struct hci_ev_status *rp = data;
1975 
1976     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1977 
1978     if (rp->status)
1979         return rp->status;
1980 
1981     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1982     if (!sent)
1983         return rp->status;
1984 
1985     hci_dev_lock(hdev);
1986     hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1987                 sent->bdaddr_type);
1988     hci_dev_unlock(hdev);
1989 
1990     return rp->status;
1991 }
1992 
1993 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
1994                       struct sk_buff *skb)
1995 {
1996     struct hci_ev_status *rp = data;
1997 
1998     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1999 
2000     if (rp->status)
2001         return rp->status;
2002 
2003     hci_dev_lock(hdev);
2004     hci_bdaddr_list_clear(&hdev->le_resolv_list);
2005     hci_dev_unlock(hdev);
2006 
2007     return rp->status;
2008 }
2009 
2010 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2011                       struct sk_buff *skb)
2012 {
2013     struct hci_rp_le_read_resolv_list_size *rp = data;
2014 
2015     bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2016 
2017     if (rp->status)
2018         return rp->status;
2019 
2020     hdev->le_resolv_list_size = rp->size;
2021 
2022     return rp->status;
2023 }
2024 
2025 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2026                            struct sk_buff *skb)
2027 {
2028     struct hci_ev_status *rp = data;
2029     __u8 *sent;
2030 
2031     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032 
2033     if (rp->status)
2034         return rp->status;
2035 
2036     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2037     if (!sent)
2038         return rp->status;
2039 
2040     hci_dev_lock(hdev);
2041 
2042     if (*sent)
2043         hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2044     else
2045         hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2046 
2047     hci_dev_unlock(hdev);
2048 
2049     return rp->status;
2050 }
2051 
2052 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2053                       struct sk_buff *skb)
2054 {
2055     struct hci_rp_le_read_max_data_len *rp = data;
2056 
2057     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2058 
2059     if (rp->status)
2060         return rp->status;
2061 
2062     hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2063     hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2064     hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2065     hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2066 
2067     return rp->status;
2068 }
2069 
2070 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2071                      struct sk_buff *skb)
2072 {
2073     struct hci_cp_write_le_host_supported *sent;
2074     struct hci_ev_status *rp = data;
2075 
2076     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2077 
2078     if (rp->status)
2079         return rp->status;
2080 
2081     sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2082     if (!sent)
2083         return rp->status;
2084 
2085     hci_dev_lock(hdev);
2086 
2087     if (sent->le) {
2088         hdev->features[1][0] |= LMP_HOST_LE;
2089         hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2090     } else {
2091         hdev->features[1][0] &= ~LMP_HOST_LE;
2092         hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2093         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2094     }
2095 
2096     if (sent->simul)
2097         hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2098     else
2099         hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2100 
2101     hci_dev_unlock(hdev);
2102 
2103     return rp->status;
2104 }
2105 
2106 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2107                    struct sk_buff *skb)
2108 {
2109     struct hci_cp_le_set_adv_param *cp;
2110     struct hci_ev_status *rp = data;
2111 
2112     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2113 
2114     if (rp->status)
2115         return rp->status;
2116 
2117     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2118     if (!cp)
2119         return rp->status;
2120 
2121     hci_dev_lock(hdev);
2122     hdev->adv_addr_type = cp->own_address_type;
2123     hci_dev_unlock(hdev);
2124 
2125     return rp->status;
2126 }
2127 
2128 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2129                    struct sk_buff *skb)
2130 {
2131     struct hci_rp_le_set_ext_adv_params *rp = data;
2132     struct hci_cp_le_set_ext_adv_params *cp;
2133     struct adv_info *adv_instance;
2134 
2135     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2136 
2137     if (rp->status)
2138         return rp->status;
2139 
2140     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2141     if (!cp)
2142         return rp->status;
2143 
2144     hci_dev_lock(hdev);
2145     hdev->adv_addr_type = cp->own_addr_type;
2146     if (!cp->handle) {
2147         /* Store in hdev for instance 0 */
2148         hdev->adv_tx_power = rp->tx_power;
2149     } else {
2150         adv_instance = hci_find_adv_instance(hdev, cp->handle);
2151         if (adv_instance)
2152             adv_instance->tx_power = rp->tx_power;
2153     }
2154     /* Update adv data as tx power is known now */
2155     hci_req_update_adv_data(hdev, cp->handle);
2156 
2157     hci_dev_unlock(hdev);
2158 
2159     return rp->status;
2160 }
2161 
2162 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2163                struct sk_buff *skb)
2164 {
2165     struct hci_rp_read_rssi *rp = data;
2166     struct hci_conn *conn;
2167 
2168     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2169 
2170     if (rp->status)
2171         return rp->status;
2172 
2173     hci_dev_lock(hdev);
2174 
2175     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2176     if (conn)
2177         conn->rssi = rp->rssi;
2178 
2179     hci_dev_unlock(hdev);
2180 
2181     return rp->status;
2182 }
2183 
2184 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2185                    struct sk_buff *skb)
2186 {
2187     struct hci_cp_read_tx_power *sent;
2188     struct hci_rp_read_tx_power *rp = data;
2189     struct hci_conn *conn;
2190 
2191     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2192 
2193     if (rp->status)
2194         return rp->status;
2195 
2196     sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2197     if (!sent)
2198         return rp->status;
2199 
2200     hci_dev_lock(hdev);
2201 
2202     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2203     if (!conn)
2204         goto unlock;
2205 
2206     switch (sent->type) {
2207     case 0x00:
2208         conn->tx_power = rp->tx_power;
2209         break;
2210     case 0x01:
2211         conn->max_tx_power = rp->tx_power;
2212         break;
2213     }
2214 
2215 unlock:
2216     hci_dev_unlock(hdev);
2217     return rp->status;
2218 }
2219 
2220 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2221                       struct sk_buff *skb)
2222 {
2223     struct hci_ev_status *rp = data;
2224     u8 *mode;
2225 
2226     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2227 
2228     if (rp->status)
2229         return rp->status;
2230 
2231     mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2232     if (mode)
2233         hdev->ssp_debug_mode = *mode;
2234 
2235     return rp->status;
2236 }
2237 
2238 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2239 {
2240     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2241 
2242     if (status) {
2243         hci_conn_check_pending(hdev);
2244         return;
2245     }
2246 
2247     set_bit(HCI_INQUIRY, &hdev->flags);
2248 }
2249 
2250 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2251 {
2252     struct hci_cp_create_conn *cp;
2253     struct hci_conn *conn;
2254 
2255     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2256 
2257     cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2258     if (!cp)
2259         return;
2260 
2261     hci_dev_lock(hdev);
2262 
2263     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2264 
2265     bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2266 
2267     if (status) {
2268         if (conn && conn->state == BT_CONNECT) {
2269             if (status != 0x0c || conn->attempt > 2) {
2270                 conn->state = BT_CLOSED;
2271                 hci_connect_cfm(conn, status);
2272                 hci_conn_del(conn);
2273             } else
2274                 conn->state = BT_CONNECT2;
2275         }
2276     } else {
2277         if (!conn) {
2278             conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2279                         HCI_ROLE_MASTER);
2280             if (!conn)
2281                 bt_dev_err(hdev, "no memory for new connection");
2282         }
2283     }
2284 
2285     hci_dev_unlock(hdev);
2286 }
2287 
2288 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2289 {
2290     struct hci_cp_add_sco *cp;
2291     struct hci_conn *acl, *sco;
2292     __u16 handle;
2293 
2294     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2295 
2296     if (!status)
2297         return;
2298 
2299     cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2300     if (!cp)
2301         return;
2302 
2303     handle = __le16_to_cpu(cp->handle);
2304 
2305     bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2306 
2307     hci_dev_lock(hdev);
2308 
2309     acl = hci_conn_hash_lookup_handle(hdev, handle);
2310     if (acl) {
2311         sco = acl->link;
2312         if (sco) {
2313             sco->state = BT_CLOSED;
2314 
2315             hci_connect_cfm(sco, status);
2316             hci_conn_del(sco);
2317         }
2318     }
2319 
2320     hci_dev_unlock(hdev);
2321 }
2322 
2323 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2324 {
2325     struct hci_cp_auth_requested *cp;
2326     struct hci_conn *conn;
2327 
2328     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2329 
2330     if (!status)
2331         return;
2332 
2333     cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2334     if (!cp)
2335         return;
2336 
2337     hci_dev_lock(hdev);
2338 
2339     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2340     if (conn) {
2341         if (conn->state == BT_CONFIG) {
2342             hci_connect_cfm(conn, status);
2343             hci_conn_drop(conn);
2344         }
2345     }
2346 
2347     hci_dev_unlock(hdev);
2348 }
2349 
2350 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2351 {
2352     struct hci_cp_set_conn_encrypt *cp;
2353     struct hci_conn *conn;
2354 
2355     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2356 
2357     if (!status)
2358         return;
2359 
2360     cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2361     if (!cp)
2362         return;
2363 
2364     hci_dev_lock(hdev);
2365 
2366     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2367     if (conn) {
2368         if (conn->state == BT_CONFIG) {
2369             hci_connect_cfm(conn, status);
2370             hci_conn_drop(conn);
2371         }
2372     }
2373 
2374     hci_dev_unlock(hdev);
2375 }
2376 
2377 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2378                     struct hci_conn *conn)
2379 {
2380     if (conn->state != BT_CONFIG || !conn->out)
2381         return 0;
2382 
2383     if (conn->pending_sec_level == BT_SECURITY_SDP)
2384         return 0;
2385 
2386     /* Only request authentication for SSP connections or non-SSP
2387      * devices with sec_level MEDIUM or HIGH or if MITM protection
2388      * is requested.
2389      */
2390     if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2391         conn->pending_sec_level != BT_SECURITY_FIPS &&
2392         conn->pending_sec_level != BT_SECURITY_HIGH &&
2393         conn->pending_sec_level != BT_SECURITY_MEDIUM)
2394         return 0;
2395 
2396     return 1;
2397 }
2398 
2399 static int hci_resolve_name(struct hci_dev *hdev,
2400                    struct inquiry_entry *e)
2401 {
2402     struct hci_cp_remote_name_req cp;
2403 
2404     memset(&cp, 0, sizeof(cp));
2405 
2406     bacpy(&cp.bdaddr, &e->data.bdaddr);
2407     cp.pscan_rep_mode = e->data.pscan_rep_mode;
2408     cp.pscan_mode = e->data.pscan_mode;
2409     cp.clock_offset = e->data.clock_offset;
2410 
2411     return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2412 }
2413 
2414 static bool hci_resolve_next_name(struct hci_dev *hdev)
2415 {
2416     struct discovery_state *discov = &hdev->discovery;
2417     struct inquiry_entry *e;
2418 
2419     if (list_empty(&discov->resolve))
2420         return false;
2421 
2422     /* We should stop if we already spent too much time resolving names. */
2423     if (time_after(jiffies, discov->name_resolve_timeout)) {
2424         bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2425         return false;
2426     }
2427 
2428     e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2429     if (!e)
2430         return false;
2431 
2432     if (hci_resolve_name(hdev, e) == 0) {
2433         e->name_state = NAME_PENDING;
2434         return true;
2435     }
2436 
2437     return false;
2438 }
2439 
2440 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2441                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2442 {
2443     struct discovery_state *discov = &hdev->discovery;
2444     struct inquiry_entry *e;
2445 
2446     /* Update the mgmt connected state if necessary. Be careful with
2447      * conn objects that exist but are not (yet) connected however.
2448      * Only those in BT_CONFIG or BT_CONNECTED states can be
2449      * considered connected.
2450      */
2451     if (conn &&
2452         (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2453         !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2454         mgmt_device_connected(hdev, conn, name, name_len);
2455 
2456     if (discov->state == DISCOVERY_STOPPED)
2457         return;
2458 
2459     if (discov->state == DISCOVERY_STOPPING)
2460         goto discov_complete;
2461 
2462     if (discov->state != DISCOVERY_RESOLVING)
2463         return;
2464 
2465     e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2466     /* If the device was not found in a list of found devices names of which
2467      * are pending. there is no need to continue resolving a next name as it
2468      * will be done upon receiving another Remote Name Request Complete
2469      * Event */
2470     if (!e)
2471         return;
2472 
2473     list_del(&e->list);
2474 
2475     e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2476     mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2477              name, name_len);
2478 
2479     if (hci_resolve_next_name(hdev))
2480         return;
2481 
2482 discov_complete:
2483     hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2484 }
2485 
2486 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2487 {
2488     struct hci_cp_remote_name_req *cp;
2489     struct hci_conn *conn;
2490 
2491     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2492 
2493     /* If successful wait for the name req complete event before
2494      * checking for the need to do authentication */
2495     if (!status)
2496         return;
2497 
2498     cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2499     if (!cp)
2500         return;
2501 
2502     hci_dev_lock(hdev);
2503 
2504     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2505 
2506     if (hci_dev_test_flag(hdev, HCI_MGMT))
2507         hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2508 
2509     if (!conn)
2510         goto unlock;
2511 
2512     if (!hci_outgoing_auth_needed(hdev, conn))
2513         goto unlock;
2514 
2515     if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2516         struct hci_cp_auth_requested auth_cp;
2517 
2518         set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2519 
2520         auth_cp.handle = __cpu_to_le16(conn->handle);
2521         hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2522                  sizeof(auth_cp), &auth_cp);
2523     }
2524 
2525 unlock:
2526     hci_dev_unlock(hdev);
2527 }
2528 
2529 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2530 {
2531     struct hci_cp_read_remote_features *cp;
2532     struct hci_conn *conn;
2533 
2534     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2535 
2536     if (!status)
2537         return;
2538 
2539     cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2540     if (!cp)
2541         return;
2542 
2543     hci_dev_lock(hdev);
2544 
2545     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2546     if (conn) {
2547         if (conn->state == BT_CONFIG) {
2548             hci_connect_cfm(conn, status);
2549             hci_conn_drop(conn);
2550         }
2551     }
2552 
2553     hci_dev_unlock(hdev);
2554 }
2555 
2556 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2557 {
2558     struct hci_cp_read_remote_ext_features *cp;
2559     struct hci_conn *conn;
2560 
2561     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2562 
2563     if (!status)
2564         return;
2565 
2566     cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2567     if (!cp)
2568         return;
2569 
2570     hci_dev_lock(hdev);
2571 
2572     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2573     if (conn) {
2574         if (conn->state == BT_CONFIG) {
2575             hci_connect_cfm(conn, status);
2576             hci_conn_drop(conn);
2577         }
2578     }
2579 
2580     hci_dev_unlock(hdev);
2581 }
2582 
2583 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2584 {
2585     struct hci_cp_setup_sync_conn *cp;
2586     struct hci_conn *acl, *sco;
2587     __u16 handle;
2588 
2589     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2590 
2591     if (!status)
2592         return;
2593 
2594     cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2595     if (!cp)
2596         return;
2597 
2598     handle = __le16_to_cpu(cp->handle);
2599 
2600     bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2601 
2602     hci_dev_lock(hdev);
2603 
2604     acl = hci_conn_hash_lookup_handle(hdev, handle);
2605     if (acl) {
2606         sco = acl->link;
2607         if (sco) {
2608             sco->state = BT_CLOSED;
2609 
2610             hci_connect_cfm(sco, status);
2611             hci_conn_del(sco);
2612         }
2613     }
2614 
2615     hci_dev_unlock(hdev);
2616 }
2617 
2618 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2619 {
2620     struct hci_cp_enhanced_setup_sync_conn *cp;
2621     struct hci_conn *acl, *sco;
2622     __u16 handle;
2623 
2624     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2625 
2626     if (!status)
2627         return;
2628 
2629     cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2630     if (!cp)
2631         return;
2632 
2633     handle = __le16_to_cpu(cp->handle);
2634 
2635     bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2636 
2637     hci_dev_lock(hdev);
2638 
2639     acl = hci_conn_hash_lookup_handle(hdev, handle);
2640     if (acl) {
2641         sco = acl->link;
2642         if (sco) {
2643             sco->state = BT_CLOSED;
2644 
2645             hci_connect_cfm(sco, status);
2646             hci_conn_del(sco);
2647         }
2648     }
2649 
2650     hci_dev_unlock(hdev);
2651 }
2652 
2653 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2654 {
2655     struct hci_cp_sniff_mode *cp;
2656     struct hci_conn *conn;
2657 
2658     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2659 
2660     if (!status)
2661         return;
2662 
2663     cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2664     if (!cp)
2665         return;
2666 
2667     hci_dev_lock(hdev);
2668 
2669     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2670     if (conn) {
2671         clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2672 
2673         if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2674             hci_sco_setup(conn, status);
2675     }
2676 
2677     hci_dev_unlock(hdev);
2678 }
2679 
2680 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2681 {
2682     struct hci_cp_exit_sniff_mode *cp;
2683     struct hci_conn *conn;
2684 
2685     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2686 
2687     if (!status)
2688         return;
2689 
2690     cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2691     if (!cp)
2692         return;
2693 
2694     hci_dev_lock(hdev);
2695 
2696     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2697     if (conn) {
2698         clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2699 
2700         if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2701             hci_sco_setup(conn, status);
2702     }
2703 
2704     hci_dev_unlock(hdev);
2705 }
2706 
2707 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2708 {
2709     struct hci_cp_disconnect *cp;
2710     struct hci_conn_params *params;
2711     struct hci_conn *conn;
2712     bool mgmt_conn;
2713 
2714     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2715 
2716     /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2717      * otherwise cleanup the connection immediately.
2718      */
2719     if (!status && !hdev->suspended)
2720         return;
2721 
2722     cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2723     if (!cp)
2724         return;
2725 
2726     hci_dev_lock(hdev);
2727 
2728     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2729     if (!conn)
2730         goto unlock;
2731 
2732     if (status) {
2733         mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2734                        conn->dst_type, status);
2735 
2736         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2737             hdev->cur_adv_instance = conn->adv_instance;
2738             hci_enable_advertising(hdev);
2739         }
2740 
2741         goto done;
2742     }
2743 
2744     mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2745 
2746     if (conn->type == ACL_LINK) {
2747         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2748             hci_remove_link_key(hdev, &conn->dst);
2749     }
2750 
2751     params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2752     if (params) {
2753         switch (params->auto_connect) {
2754         case HCI_AUTO_CONN_LINK_LOSS:
2755             if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2756                 break;
2757             fallthrough;
2758 
2759         case HCI_AUTO_CONN_DIRECT:
2760         case HCI_AUTO_CONN_ALWAYS:
2761             list_del_init(&params->action);
2762             list_add(&params->action, &hdev->pend_le_conns);
2763             break;
2764 
2765         default:
2766             break;
2767         }
2768     }
2769 
2770     mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2771                  cp->reason, mgmt_conn);
2772 
2773     hci_disconn_cfm(conn, cp->reason);
2774 
2775 done:
2776     /* If the disconnection failed for any reason, the upper layer
2777      * does not retry to disconnect in current implementation.
2778      * Hence, we need to do some basic cleanup here and re-enable
2779      * advertising if necessary.
2780      */
2781     hci_conn_del(conn);
2782 unlock:
2783     hci_dev_unlock(hdev);
2784 }
2785 
2786 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2787 {
2788     /* When using controller based address resolution, then the new
2789      * address types 0x02 and 0x03 are used. These types need to be
2790      * converted back into either public address or random address type
2791      */
2792     switch (type) {
2793     case ADDR_LE_DEV_PUBLIC_RESOLVED:
2794         if (resolved)
2795             *resolved = true;
2796         return ADDR_LE_DEV_PUBLIC;
2797     case ADDR_LE_DEV_RANDOM_RESOLVED:
2798         if (resolved)
2799             *resolved = true;
2800         return ADDR_LE_DEV_RANDOM;
2801     }
2802 
2803     if (resolved)
2804         *resolved = false;
2805     return type;
2806 }
2807 
2808 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2809                   u8 peer_addr_type, u8 own_address_type,
2810                   u8 filter_policy)
2811 {
2812     struct hci_conn *conn;
2813 
2814     conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2815                        peer_addr_type);
2816     if (!conn)
2817         return;
2818 
2819     own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2820 
2821     /* Store the initiator and responder address information which
2822      * is needed for SMP. These values will not change during the
2823      * lifetime of the connection.
2824      */
2825     conn->init_addr_type = own_address_type;
2826     if (own_address_type == ADDR_LE_DEV_RANDOM)
2827         bacpy(&conn->init_addr, &hdev->random_addr);
2828     else
2829         bacpy(&conn->init_addr, &hdev->bdaddr);
2830 
2831     conn->resp_addr_type = peer_addr_type;
2832     bacpy(&conn->resp_addr, peer_addr);
2833 
2834     /* We don't want the connection attempt to stick around
2835      * indefinitely since LE doesn't have a page timeout concept
2836      * like BR/EDR. Set a timer for any connection that doesn't use
2837      * the accept list for connecting.
2838      */
2839     if (filter_policy == HCI_LE_USE_PEER_ADDR)
2840         queue_delayed_work(conn->hdev->workqueue,
2841                    &conn->le_conn_timeout,
2842                    conn->conn_timeout);
2843 }
2844 
2845 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2846 {
2847     struct hci_cp_le_create_conn *cp;
2848 
2849     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2850 
2851     /* All connection failure handling is taken care of by the
2852      * hci_conn_failed function which is triggered by the HCI
2853      * request completion callbacks used for connecting.
2854      */
2855     if (status)
2856         return;
2857 
2858     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2859     if (!cp)
2860         return;
2861 
2862     hci_dev_lock(hdev);
2863 
2864     cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2865               cp->own_address_type, cp->filter_policy);
2866 
2867     hci_dev_unlock(hdev);
2868 }
2869 
2870 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2871 {
2872     struct hci_cp_le_ext_create_conn *cp;
2873 
2874     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2875 
2876     /* All connection failure handling is taken care of by the
2877      * hci_conn_failed function which is triggered by the HCI
2878      * request completion callbacks used for connecting.
2879      */
2880     if (status)
2881         return;
2882 
2883     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2884     if (!cp)
2885         return;
2886 
2887     hci_dev_lock(hdev);
2888 
2889     cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2890               cp->own_addr_type, cp->filter_policy);
2891 
2892     hci_dev_unlock(hdev);
2893 }
2894 
2895 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2896 {
2897     struct hci_cp_le_read_remote_features *cp;
2898     struct hci_conn *conn;
2899 
2900     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2901 
2902     if (!status)
2903         return;
2904 
2905     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2906     if (!cp)
2907         return;
2908 
2909     hci_dev_lock(hdev);
2910 
2911     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2912     if (conn) {
2913         if (conn->state == BT_CONFIG) {
2914             hci_connect_cfm(conn, status);
2915             hci_conn_drop(conn);
2916         }
2917     }
2918 
2919     hci_dev_unlock(hdev);
2920 }
2921 
2922 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2923 {
2924     struct hci_cp_le_start_enc *cp;
2925     struct hci_conn *conn;
2926 
2927     bt_dev_dbg(hdev, "status 0x%2.2x", status);
2928 
2929     if (!status)
2930         return;
2931 
2932     hci_dev_lock(hdev);
2933 
2934     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2935     if (!cp)
2936         goto unlock;
2937 
2938     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2939     if (!conn)
2940         goto unlock;
2941 
2942     if (conn->state != BT_CONNECTED)
2943         goto unlock;
2944 
2945     hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2946     hci_conn_drop(conn);
2947 
2948 unlock:
2949     hci_dev_unlock(hdev);
2950 }
2951 
2952 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2953 {
2954     struct hci_cp_switch_role *cp;
2955     struct hci_conn *conn;
2956 
2957     BT_DBG("%s status 0x%2.2x", hdev->name, status);
2958 
2959     if (!status)
2960         return;
2961 
2962     cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2963     if (!cp)
2964         return;
2965 
2966     hci_dev_lock(hdev);
2967 
2968     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2969     if (conn)
2970         clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2971 
2972     hci_dev_unlock(hdev);
2973 }
2974 
2975 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
2976                      struct sk_buff *skb)
2977 {
2978     struct hci_ev_status *ev = data;
2979     struct discovery_state *discov = &hdev->discovery;
2980     struct inquiry_entry *e;
2981 
2982     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
2983 
2984     hci_conn_check_pending(hdev);
2985 
2986     if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2987         return;
2988 
2989     smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2990     wake_up_bit(&hdev->flags, HCI_INQUIRY);
2991 
2992     if (!hci_dev_test_flag(hdev, HCI_MGMT))
2993         return;
2994 
2995     hci_dev_lock(hdev);
2996 
2997     if (discov->state != DISCOVERY_FINDING)
2998         goto unlock;
2999 
3000     if (list_empty(&discov->resolve)) {
3001         /* When BR/EDR inquiry is active and no LE scanning is in
3002          * progress, then change discovery state to indicate completion.
3003          *
3004          * When running LE scanning and BR/EDR inquiry simultaneously
3005          * and the LE scan already finished, then change the discovery
3006          * state to indicate completion.
3007          */
3008         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3009             !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3010             hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3011         goto unlock;
3012     }
3013 
3014     e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3015     if (e && hci_resolve_name(hdev, e) == 0) {
3016         e->name_state = NAME_PENDING;
3017         hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3018         discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3019     } else {
3020         /* When BR/EDR inquiry is active and no LE scanning is in
3021          * progress, then change discovery state to indicate completion.
3022          *
3023          * When running LE scanning and BR/EDR inquiry simultaneously
3024          * and the LE scan already finished, then change the discovery
3025          * state to indicate completion.
3026          */
3027         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3028             !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3029             hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3030     }
3031 
3032 unlock:
3033     hci_dev_unlock(hdev);
3034 }
3035 
3036 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3037                    struct sk_buff *skb)
3038 {
3039     struct hci_ev_inquiry_result *ev = edata;
3040     struct inquiry_data data;
3041     int i;
3042 
3043     if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3044                  flex_array_size(ev, info, ev->num)))
3045         return;
3046 
3047     bt_dev_dbg(hdev, "num %d", ev->num);
3048 
3049     if (!ev->num)
3050         return;
3051 
3052     if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3053         return;
3054 
3055     hci_dev_lock(hdev);
3056 
3057     for (i = 0; i < ev->num; i++) {
3058         struct inquiry_info *info = &ev->info[i];
3059         u32 flags;
3060 
3061         bacpy(&data.bdaddr, &info->bdaddr);
3062         data.pscan_rep_mode = info->pscan_rep_mode;
3063         data.pscan_period_mode  = info->pscan_period_mode;
3064         data.pscan_mode     = info->pscan_mode;
3065         memcpy(data.dev_class, info->dev_class, 3);
3066         data.clock_offset   = info->clock_offset;
3067         data.rssi       = HCI_RSSI_INVALID;
3068         data.ssp_mode       = 0x00;
3069 
3070         flags = hci_inquiry_cache_update(hdev, &data, false);
3071 
3072         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3073                   info->dev_class, HCI_RSSI_INVALID,
3074                   flags, NULL, 0, NULL, 0);
3075     }
3076 
3077     hci_dev_unlock(hdev);
3078 }
3079 
3080 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3081                   struct sk_buff *skb)
3082 {
3083     struct hci_ev_conn_complete *ev = data;
3084     struct hci_conn *conn;
3085     u8 status = ev->status;
3086 
3087     bt_dev_dbg(hdev, "status 0x%2.2x", status);
3088 
3089     hci_dev_lock(hdev);
3090 
3091     conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3092     if (!conn) {
3093         /* In case of error status and there is no connection pending
3094          * just unlock as there is nothing to cleanup.
3095          */
3096         if (ev->status)
3097             goto unlock;
3098 
3099         /* Connection may not exist if auto-connected. Check the bredr
3100          * allowlist to see if this device is allowed to auto connect.
3101          * If link is an ACL type, create a connection class
3102          * automatically.
3103          *
3104          * Auto-connect will only occur if the event filter is
3105          * programmed with a given address. Right now, event filter is
3106          * only used during suspend.
3107          */
3108         if (ev->link_type == ACL_LINK &&
3109             hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3110                               &ev->bdaddr,
3111                               BDADDR_BREDR)) {
3112             conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3113                         HCI_ROLE_SLAVE);
3114             if (!conn) {
3115                 bt_dev_err(hdev, "no memory for new conn");
3116                 goto unlock;
3117             }
3118         } else {
3119             if (ev->link_type != SCO_LINK)
3120                 goto unlock;
3121 
3122             conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3123                                &ev->bdaddr);
3124             if (!conn)
3125                 goto unlock;
3126 
3127             conn->type = SCO_LINK;
3128         }
3129     }
3130 
3131     /* The HCI_Connection_Complete event is only sent once per connection.
3132      * Processing it more than once per connection can corrupt kernel memory.
3133      *
3134      * As the connection handle is set here for the first time, it indicates
3135      * whether the connection is already set up.
3136      */
3137     if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3138         bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3139         goto unlock;
3140     }
3141 
3142     if (!status) {
3143         conn->handle = __le16_to_cpu(ev->handle);
3144         if (conn->handle > HCI_CONN_HANDLE_MAX) {
3145             bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3146                    conn->handle, HCI_CONN_HANDLE_MAX);
3147             status = HCI_ERROR_INVALID_PARAMETERS;
3148             goto done;
3149         }
3150 
3151         if (conn->type == ACL_LINK) {
3152             conn->state = BT_CONFIG;
3153             hci_conn_hold(conn);
3154 
3155             if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3156                 !hci_find_link_key(hdev, &ev->bdaddr))
3157                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3158             else
3159                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3160         } else
3161             conn->state = BT_CONNECTED;
3162 
3163         hci_debugfs_create_conn(conn);
3164         hci_conn_add_sysfs(conn);
3165 
3166         if (test_bit(HCI_AUTH, &hdev->flags))
3167             set_bit(HCI_CONN_AUTH, &conn->flags);
3168 
3169         if (test_bit(HCI_ENCRYPT, &hdev->flags))
3170             set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3171 
3172         /* Get remote features */
3173         if (conn->type == ACL_LINK) {
3174             struct hci_cp_read_remote_features cp;
3175             cp.handle = ev->handle;
3176             hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3177                      sizeof(cp), &cp);
3178 
3179             hci_update_scan(hdev);
3180         }
3181 
3182         /* Set packet type for incoming connection */
3183         if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3184             struct hci_cp_change_conn_ptype cp;
3185             cp.handle = ev->handle;
3186             cp.pkt_type = cpu_to_le16(conn->pkt_type);
3187             hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3188                      &cp);
3189         }
3190     }
3191 
3192     if (conn->type == ACL_LINK)
3193         hci_sco_setup(conn, ev->status);
3194 
3195 done:
3196     if (status) {
3197         hci_conn_failed(conn, status);
3198     } else if (ev->link_type == SCO_LINK) {
3199         switch (conn->setting & SCO_AIRMODE_MASK) {
3200         case SCO_AIRMODE_CVSD:
3201             if (hdev->notify)
3202                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3203             break;
3204         }
3205 
3206         hci_connect_cfm(conn, status);
3207     }
3208 
3209 unlock:
3210     hci_dev_unlock(hdev);
3211 
3212     hci_conn_check_pending(hdev);
3213 }
3214 
3215 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3216 {
3217     struct hci_cp_reject_conn_req cp;
3218 
3219     bacpy(&cp.bdaddr, bdaddr);
3220     cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3221     hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3222 }
3223 
3224 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3225                  struct sk_buff *skb)
3226 {
3227     struct hci_ev_conn_request *ev = data;
3228     int mask = hdev->link_mode;
3229     struct inquiry_entry *ie;
3230     struct hci_conn *conn;
3231     __u8 flags = 0;
3232 
3233     bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3234 
3235     mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3236                       &flags);
3237 
3238     if (!(mask & HCI_LM_ACCEPT)) {
3239         hci_reject_conn(hdev, &ev->bdaddr);
3240         return;
3241     }
3242 
3243     hci_dev_lock(hdev);
3244 
3245     if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3246                    BDADDR_BREDR)) {
3247         hci_reject_conn(hdev, &ev->bdaddr);
3248         goto unlock;
3249     }
3250 
3251     /* Require HCI_CONNECTABLE or an accept list entry to accept the
3252      * connection. These features are only touched through mgmt so
3253      * only do the checks if HCI_MGMT is set.
3254      */
3255     if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3256         !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3257         !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3258                            BDADDR_BREDR)) {
3259         hci_reject_conn(hdev, &ev->bdaddr);
3260         goto unlock;
3261     }
3262 
3263     /* Connection accepted */
3264 
3265     ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3266     if (ie)
3267         memcpy(ie->data.dev_class, ev->dev_class, 3);
3268 
3269     conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3270             &ev->bdaddr);
3271     if (!conn) {
3272         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3273                     HCI_ROLE_SLAVE);
3274         if (!conn) {
3275             bt_dev_err(hdev, "no memory for new connection");
3276             goto unlock;
3277         }
3278     }
3279 
3280     memcpy(conn->dev_class, ev->dev_class, 3);
3281 
3282     hci_dev_unlock(hdev);
3283 
3284     if (ev->link_type == ACL_LINK ||
3285         (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3286         struct hci_cp_accept_conn_req cp;
3287         conn->state = BT_CONNECT;
3288 
3289         bacpy(&cp.bdaddr, &ev->bdaddr);
3290 
3291         if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3292             cp.role = 0x00; /* Become central */
3293         else
3294             cp.role = 0x01; /* Remain peripheral */
3295 
3296         hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3297     } else if (!(flags & HCI_PROTO_DEFER)) {
3298         struct hci_cp_accept_sync_conn_req cp;
3299         conn->state = BT_CONNECT;
3300 
3301         bacpy(&cp.bdaddr, &ev->bdaddr);
3302         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3303 
3304         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3305         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3306         cp.max_latency    = cpu_to_le16(0xffff);
3307         cp.content_format = cpu_to_le16(hdev->voice_setting);
3308         cp.retrans_effort = 0xff;
3309 
3310         hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3311                  &cp);
3312     } else {
3313         conn->state = BT_CONNECT2;
3314         hci_connect_cfm(conn, 0);
3315     }
3316 
3317     return;
3318 unlock:
3319     hci_dev_unlock(hdev);
3320 }
3321 
3322 static u8 hci_to_mgmt_reason(u8 err)
3323 {
3324     switch (err) {
3325     case HCI_ERROR_CONNECTION_TIMEOUT:
3326         return MGMT_DEV_DISCONN_TIMEOUT;
3327     case HCI_ERROR_REMOTE_USER_TERM:
3328     case HCI_ERROR_REMOTE_LOW_RESOURCES:
3329     case HCI_ERROR_REMOTE_POWER_OFF:
3330         return MGMT_DEV_DISCONN_REMOTE;
3331     case HCI_ERROR_LOCAL_HOST_TERM:
3332         return MGMT_DEV_DISCONN_LOCAL_HOST;
3333     default:
3334         return MGMT_DEV_DISCONN_UNKNOWN;
3335     }
3336 }
3337 
3338 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3339                      struct sk_buff *skb)
3340 {
3341     struct hci_ev_disconn_complete *ev = data;
3342     u8 reason;
3343     struct hci_conn_params *params;
3344     struct hci_conn *conn;
3345     bool mgmt_connected;
3346 
3347     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3348 
3349     hci_dev_lock(hdev);
3350 
3351     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3352     if (!conn)
3353         goto unlock;
3354 
3355     if (ev->status) {
3356         mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3357                        conn->dst_type, ev->status);
3358         goto unlock;
3359     }
3360 
3361     conn->state = BT_CLOSED;
3362 
3363     mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3364 
3365     if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3366         reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3367     else
3368         reason = hci_to_mgmt_reason(ev->reason);
3369 
3370     mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3371                 reason, mgmt_connected);
3372 
3373     if (conn->type == ACL_LINK) {
3374         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3375             hci_remove_link_key(hdev, &conn->dst);
3376 
3377         hci_update_scan(hdev);
3378     }
3379 
3380     params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3381     if (params) {
3382         switch (params->auto_connect) {
3383         case HCI_AUTO_CONN_LINK_LOSS:
3384             if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3385                 break;
3386             fallthrough;
3387 
3388         case HCI_AUTO_CONN_DIRECT:
3389         case HCI_AUTO_CONN_ALWAYS:
3390             list_del_init(&params->action);
3391             list_add(&params->action, &hdev->pend_le_conns);
3392             hci_update_passive_scan(hdev);
3393             break;
3394 
3395         default:
3396             break;
3397         }
3398     }
3399 
3400     hci_disconn_cfm(conn, ev->reason);
3401 
3402     /* Re-enable advertising if necessary, since it might
3403      * have been disabled by the connection. From the
3404      * HCI_LE_Set_Advertise_Enable command description in
3405      * the core specification (v4.0):
3406      * "The Controller shall continue advertising until the Host
3407      * issues an LE_Set_Advertise_Enable command with
3408      * Advertising_Enable set to 0x00 (Advertising is disabled)
3409      * or until a connection is created or until the Advertising
3410      * is timed out due to Directed Advertising."
3411      */
3412     if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3413         hdev->cur_adv_instance = conn->adv_instance;
3414         hci_enable_advertising(hdev);
3415     }
3416 
3417     hci_conn_del(conn);
3418 
3419 unlock:
3420     hci_dev_unlock(hdev);
3421 }
3422 
3423 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3424                   struct sk_buff *skb)
3425 {
3426     struct hci_ev_auth_complete *ev = data;
3427     struct hci_conn *conn;
3428 
3429     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3430 
3431     hci_dev_lock(hdev);
3432 
3433     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3434     if (!conn)
3435         goto unlock;
3436 
3437     if (!ev->status) {
3438         clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3439 
3440         if (!hci_conn_ssp_enabled(conn) &&
3441             test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3442             bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3443         } else {
3444             set_bit(HCI_CONN_AUTH, &conn->flags);
3445             conn->sec_level = conn->pending_sec_level;
3446         }
3447     } else {
3448         if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3449             set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3450 
3451         mgmt_auth_failed(conn, ev->status);
3452     }
3453 
3454     clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3455     clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3456 
3457     if (conn->state == BT_CONFIG) {
3458         if (!ev->status && hci_conn_ssp_enabled(conn)) {
3459             struct hci_cp_set_conn_encrypt cp;
3460             cp.handle  = ev->handle;
3461             cp.encrypt = 0x01;
3462             hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3463                      &cp);
3464         } else {
3465             conn->state = BT_CONNECTED;
3466             hci_connect_cfm(conn, ev->status);
3467             hci_conn_drop(conn);
3468         }
3469     } else {
3470         hci_auth_cfm(conn, ev->status);
3471 
3472         hci_conn_hold(conn);
3473         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3474         hci_conn_drop(conn);
3475     }
3476 
3477     if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3478         if (!ev->status) {
3479             struct hci_cp_set_conn_encrypt cp;
3480             cp.handle  = ev->handle;
3481             cp.encrypt = 0x01;
3482             hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3483                      &cp);
3484         } else {
3485             clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3486             hci_encrypt_cfm(conn, ev->status);
3487         }
3488     }
3489 
3490 unlock:
3491     hci_dev_unlock(hdev);
3492 }
3493 
3494 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3495                 struct sk_buff *skb)
3496 {
3497     struct hci_ev_remote_name *ev = data;
3498     struct hci_conn *conn;
3499 
3500     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3501 
3502     hci_conn_check_pending(hdev);
3503 
3504     hci_dev_lock(hdev);
3505 
3506     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3507 
3508     if (!hci_dev_test_flag(hdev, HCI_MGMT))
3509         goto check_auth;
3510 
3511     if (ev->status == 0)
3512         hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3513                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3514     else
3515         hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3516 
3517 check_auth:
3518     if (!conn)
3519         goto unlock;
3520 
3521     if (!hci_outgoing_auth_needed(hdev, conn))
3522         goto unlock;
3523 
3524     if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3525         struct hci_cp_auth_requested cp;
3526 
3527         set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3528 
3529         cp.handle = __cpu_to_le16(conn->handle);
3530         hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3531     }
3532 
3533 unlock:
3534     hci_dev_unlock(hdev);
3535 }
3536 
3537 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3538                        u16 opcode, struct sk_buff *skb)
3539 {
3540     const struct hci_rp_read_enc_key_size *rp;
3541     struct hci_conn *conn;
3542     u16 handle;
3543 
3544     BT_DBG("%s status 0x%02x", hdev->name, status);
3545 
3546     if (!skb || skb->len < sizeof(*rp)) {
3547         bt_dev_err(hdev, "invalid read key size response");
3548         return;
3549     }
3550 
3551     rp = (void *)skb->data;
3552     handle = le16_to_cpu(rp->handle);
3553 
3554     hci_dev_lock(hdev);
3555 
3556     conn = hci_conn_hash_lookup_handle(hdev, handle);
3557     if (!conn)
3558         goto unlock;
3559 
3560     /* While unexpected, the read_enc_key_size command may fail. The most
3561      * secure approach is to then assume the key size is 0 to force a
3562      * disconnection.
3563      */
3564     if (rp->status) {
3565         bt_dev_err(hdev, "failed to read key size for handle %u",
3566                handle);
3567         conn->enc_key_size = 0;
3568     } else {
3569         conn->enc_key_size = rp->key_size;
3570     }
3571 
3572     hci_encrypt_cfm(conn, 0);
3573 
3574 unlock:
3575     hci_dev_unlock(hdev);
3576 }
3577 
3578 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3579                    struct sk_buff *skb)
3580 {
3581     struct hci_ev_encrypt_change *ev = data;
3582     struct hci_conn *conn;
3583 
3584     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3585 
3586     hci_dev_lock(hdev);
3587 
3588     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3589     if (!conn)
3590         goto unlock;
3591 
3592     if (!ev->status) {
3593         if (ev->encrypt) {
3594             /* Encryption implies authentication */
3595             set_bit(HCI_CONN_AUTH, &conn->flags);
3596             set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3597             conn->sec_level = conn->pending_sec_level;
3598 
3599             /* P-256 authentication key implies FIPS */
3600             if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3601                 set_bit(HCI_CONN_FIPS, &conn->flags);
3602 
3603             if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3604                 conn->type == LE_LINK)
3605                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3606         } else {
3607             clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3608             clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3609         }
3610     }
3611 
3612     /* We should disregard the current RPA and generate a new one
3613      * whenever the encryption procedure fails.
3614      */
3615     if (ev->status && conn->type == LE_LINK) {
3616         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3617         hci_adv_instances_set_rpa_expired(hdev, true);
3618     }
3619 
3620     clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3621 
3622     /* Check link security requirements are met */
3623     if (!hci_conn_check_link_mode(conn))
3624         ev->status = HCI_ERROR_AUTH_FAILURE;
3625 
3626     if (ev->status && conn->state == BT_CONNECTED) {
3627         if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3628             set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3629 
3630         /* Notify upper layers so they can cleanup before
3631          * disconnecting.
3632          */
3633         hci_encrypt_cfm(conn, ev->status);
3634         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3635         hci_conn_drop(conn);
3636         goto unlock;
3637     }
3638 
3639     /* Try reading the encryption key size for encrypted ACL links */
3640     if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3641         struct hci_cp_read_enc_key_size cp;
3642         struct hci_request req;
3643 
3644         /* Only send HCI_Read_Encryption_Key_Size if the
3645          * controller really supports it. If it doesn't, assume
3646          * the default size (16).
3647          */
3648         if (!(hdev->commands[20] & 0x10)) {
3649             conn->enc_key_size = HCI_LINK_KEY_SIZE;
3650             goto notify;
3651         }
3652 
3653         hci_req_init(&req, hdev);
3654 
3655         cp.handle = cpu_to_le16(conn->handle);
3656         hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3657 
3658         if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3659             bt_dev_err(hdev, "sending read key size failed");
3660             conn->enc_key_size = HCI_LINK_KEY_SIZE;
3661             goto notify;
3662         }
3663 
3664         goto unlock;
3665     }
3666 
3667     /* Set the default Authenticated Payload Timeout after
3668      * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3669      * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3670      * sent when the link is active and Encryption is enabled, the conn
3671      * type can be either LE or ACL and controller must support LMP Ping.
3672      * Ensure for AES-CCM encryption as well.
3673      */
3674     if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3675         test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3676         ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3677          (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3678         struct hci_cp_write_auth_payload_to cp;
3679 
3680         cp.handle = cpu_to_le16(conn->handle);
3681         cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3682         hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3683                  sizeof(cp), &cp);
3684     }
3685 
3686 notify:
3687     hci_encrypt_cfm(conn, ev->status);
3688 
3689 unlock:
3690     hci_dev_unlock(hdev);
3691 }
3692 
3693 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3694                          struct sk_buff *skb)
3695 {
3696     struct hci_ev_change_link_key_complete *ev = data;
3697     struct hci_conn *conn;
3698 
3699     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3700 
3701     hci_dev_lock(hdev);
3702 
3703     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3704     if (conn) {
3705         if (!ev->status)
3706             set_bit(HCI_CONN_SECURE, &conn->flags);
3707 
3708         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3709 
3710         hci_key_change_cfm(conn, ev->status);
3711     }
3712 
3713     hci_dev_unlock(hdev);
3714 }
3715 
3716 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3717                     struct sk_buff *skb)
3718 {
3719     struct hci_ev_remote_features *ev = data;
3720     struct hci_conn *conn;
3721 
3722     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3723 
3724     hci_dev_lock(hdev);
3725 
3726     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3727     if (!conn)
3728         goto unlock;
3729 
3730     if (!ev->status)
3731         memcpy(conn->features[0], ev->features, 8);
3732 
3733     if (conn->state != BT_CONFIG)
3734         goto unlock;
3735 
3736     if (!ev->status && lmp_ext_feat_capable(hdev) &&
3737         lmp_ext_feat_capable(conn)) {
3738         struct hci_cp_read_remote_ext_features cp;
3739         cp.handle = ev->handle;
3740         cp.page = 0x01;
3741         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3742                  sizeof(cp), &cp);
3743         goto unlock;
3744     }
3745 
3746     if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3747         struct hci_cp_remote_name_req cp;
3748         memset(&cp, 0, sizeof(cp));
3749         bacpy(&cp.bdaddr, &conn->dst);
3750         cp.pscan_rep_mode = 0x02;
3751         hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3752     } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3753         mgmt_device_connected(hdev, conn, NULL, 0);
3754 
3755     if (!hci_outgoing_auth_needed(hdev, conn)) {
3756         conn->state = BT_CONNECTED;
3757         hci_connect_cfm(conn, ev->status);
3758         hci_conn_drop(conn);
3759     }
3760 
3761 unlock:
3762     hci_dev_unlock(hdev);
3763 }
3764 
3765 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3766 {
3767     cancel_delayed_work(&hdev->cmd_timer);
3768 
3769     rcu_read_lock();
3770     if (!test_bit(HCI_RESET, &hdev->flags)) {
3771         if (ncmd) {
3772             cancel_delayed_work(&hdev->ncmd_timer);
3773             atomic_set(&hdev->cmd_cnt, 1);
3774         } else {
3775             if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3776                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3777                            HCI_NCMD_TIMEOUT);
3778         }
3779     }
3780     rcu_read_unlock();
3781 }
3782 
3783 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3784                     struct sk_buff *skb)
3785 {
3786     struct hci_rp_le_read_buffer_size_v2 *rp = data;
3787 
3788     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3789 
3790     if (rp->status)
3791         return rp->status;
3792 
3793     hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3794     hdev->le_pkts  = rp->acl_max_pkt;
3795     hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3796     hdev->iso_pkts = rp->iso_max_pkt;
3797 
3798     hdev->le_cnt  = hdev->le_pkts;
3799     hdev->iso_cnt = hdev->iso_pkts;
3800 
3801     BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3802            hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3803 
3804     return rp->status;
3805 }
3806 
3807 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3808                    struct sk_buff *skb)
3809 {
3810     struct hci_rp_le_set_cig_params *rp = data;
3811     struct hci_conn *conn;
3812     int i = 0;
3813 
3814     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3815 
3816     hci_dev_lock(hdev);
3817 
3818     if (rp->status) {
3819         while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3820             conn->state = BT_CLOSED;
3821             hci_connect_cfm(conn, rp->status);
3822             hci_conn_del(conn);
3823         }
3824         goto unlock;
3825     }
3826 
3827     rcu_read_lock();
3828 
3829     list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3830         if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3831             conn->state == BT_CONNECTED)
3832             continue;
3833 
3834         conn->handle = __le16_to_cpu(rp->handle[i++]);
3835 
3836         bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3837                conn->handle, conn->link);
3838 
3839         /* Create CIS if LE is already connected */
3840         if (conn->link && conn->link->state == BT_CONNECTED)
3841             hci_le_create_cis(conn->link);
3842 
3843         if (i == rp->num_handles)
3844             break;
3845     }
3846 
3847     rcu_read_unlock();
3848 
3849 unlock:
3850     hci_dev_unlock(hdev);
3851 
3852     return rp->status;
3853 }
3854 
3855 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3856                    struct sk_buff *skb)
3857 {
3858     struct hci_rp_le_setup_iso_path *rp = data;
3859     struct hci_cp_le_setup_iso_path *cp;
3860     struct hci_conn *conn;
3861 
3862     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3863 
3864     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3865     if (!cp)
3866         return rp->status;
3867 
3868     hci_dev_lock(hdev);
3869 
3870     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3871     if (!conn)
3872         goto unlock;
3873 
3874     if (rp->status) {
3875         hci_connect_cfm(conn, rp->status);
3876         hci_conn_del(conn);
3877         goto unlock;
3878     }
3879 
3880     switch (cp->direction) {
3881     /* Input (Host to Controller) */
3882     case 0x00:
3883         /* Only confirm connection if output only */
3884         if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3885             hci_connect_cfm(conn, rp->status);
3886         break;
3887     /* Output (Controller to Host) */
3888     case 0x01:
3889         /* Confirm connection since conn->iso_qos is always configured
3890          * last.
3891          */
3892         hci_connect_cfm(conn, rp->status);
3893         break;
3894     }
3895 
3896 unlock:
3897     hci_dev_unlock(hdev);
3898     return rp->status;
3899 }
3900 
3901 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3902 {
3903     bt_dev_dbg(hdev, "status 0x%2.2x", status);
3904 }
3905 
3906 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3907                    struct sk_buff *skb)
3908 {
3909     struct hci_ev_status *rp = data;
3910     struct hci_cp_le_set_per_adv_params *cp;
3911 
3912     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3913 
3914     if (rp->status)
3915         return rp->status;
3916 
3917     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3918     if (!cp)
3919         return rp->status;
3920 
3921     /* TODO: set the conn state */
3922     return rp->status;
3923 }
3924 
3925 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3926                        struct sk_buff *skb)
3927 {
3928     struct hci_ev_status *rp = data;
3929     __u8 *sent;
3930 
3931     bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3932 
3933     if (rp->status)
3934         return rp->status;
3935 
3936     sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3937     if (!sent)
3938         return rp->status;
3939 
3940     hci_dev_lock(hdev);
3941 
3942     if (*sent)
3943         hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3944     else
3945         hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3946 
3947     hci_dev_unlock(hdev);
3948 
3949     return rp->status;
3950 }
3951 
3952 #define HCI_CC_VL(_op, _func, _min, _max) \
3953 { \
3954     .op = _op, \
3955     .func = _func, \
3956     .min_len = _min, \
3957     .max_len = _max, \
3958 }
3959 
3960 #define HCI_CC(_op, _func, _len) \
3961     HCI_CC_VL(_op, _func, _len, _len)
3962 
3963 #define HCI_CC_STATUS(_op, _func) \
3964     HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3965 
3966 static const struct hci_cc {
3967     u16  op;
3968     u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3969     u16  min_len;
3970     u16  max_len;
3971 } hci_cc_table[] = {
3972     HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
3973     HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
3974     HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
3975     HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
3976               hci_cc_remote_name_req_cancel),
3977     HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
3978            sizeof(struct hci_rp_role_discovery)),
3979     HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
3980            sizeof(struct hci_rp_read_link_policy)),
3981     HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
3982            sizeof(struct hci_rp_write_link_policy)),
3983     HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
3984            sizeof(struct hci_rp_read_def_link_policy)),
3985     HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
3986               hci_cc_write_def_link_policy),
3987     HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
3988     HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
3989            sizeof(struct hci_rp_read_stored_link_key)),
3990     HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
3991            sizeof(struct hci_rp_delete_stored_link_key)),
3992     HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
3993     HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
3994            sizeof(struct hci_rp_read_local_name)),
3995     HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
3996     HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
3997     HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
3998     HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
3999     HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4000            sizeof(struct hci_rp_read_class_of_dev)),
4001     HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4002     HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4003            sizeof(struct hci_rp_read_voice_setting)),
4004     HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4005     HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4006            sizeof(struct hci_rp_read_num_supported_iac)),
4007     HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4008     HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4009     HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4010            sizeof(struct hci_rp_read_auth_payload_to)),
4011     HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4012            sizeof(struct hci_rp_write_auth_payload_to)),
4013     HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4014            sizeof(struct hci_rp_read_local_version)),
4015     HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4016            sizeof(struct hci_rp_read_local_commands)),
4017     HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4018            sizeof(struct hci_rp_read_local_features)),
4019     HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4020            sizeof(struct hci_rp_read_local_ext_features)),
4021     HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4022            sizeof(struct hci_rp_read_buffer_size)),
4023     HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4024            sizeof(struct hci_rp_read_bd_addr)),
4025     HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4026            sizeof(struct hci_rp_read_local_pairing_opts)),
4027     HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4028            sizeof(struct hci_rp_read_page_scan_activity)),
4029     HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4030               hci_cc_write_page_scan_activity),
4031     HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4032            sizeof(struct hci_rp_read_page_scan_type)),
4033     HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4034     HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4035            sizeof(struct hci_rp_read_data_block_size)),
4036     HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4037            sizeof(struct hci_rp_read_flow_control_mode)),
4038     HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4039            sizeof(struct hci_rp_read_local_amp_info)),
4040     HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4041            sizeof(struct hci_rp_read_clock)),
4042     HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4043            sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4044     HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4045            hci_cc_read_def_err_data_reporting,
4046            sizeof(struct hci_rp_read_def_err_data_reporting)),
4047     HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4048               hci_cc_write_def_err_data_reporting),
4049     HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4050            sizeof(struct hci_rp_pin_code_reply)),
4051     HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4052            sizeof(struct hci_rp_pin_code_neg_reply)),
4053     HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4054            sizeof(struct hci_rp_read_local_oob_data)),
4055     HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4056            sizeof(struct hci_rp_read_local_oob_ext_data)),
4057     HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4058            sizeof(struct hci_rp_le_read_buffer_size)),
4059     HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4060            sizeof(struct hci_rp_le_read_local_features)),
4061     HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4062            sizeof(struct hci_rp_le_read_adv_tx_power)),
4063     HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4064            sizeof(struct hci_rp_user_confirm_reply)),
4065     HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4066            sizeof(struct hci_rp_user_confirm_reply)),
4067     HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4068            sizeof(struct hci_rp_user_confirm_reply)),
4069     HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4070            sizeof(struct hci_rp_user_confirm_reply)),
4071     HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4072     HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4073     HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4074     HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4075     HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4076            hci_cc_le_read_accept_list_size,
4077            sizeof(struct hci_rp_le_read_accept_list_size)),
4078     HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4079     HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4080               hci_cc_le_add_to_accept_list),
4081     HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4082               hci_cc_le_del_from_accept_list),
4083     HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4084            sizeof(struct hci_rp_le_read_supported_states)),
4085     HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4086            sizeof(struct hci_rp_le_read_def_data_len)),
4087     HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4088               hci_cc_le_write_def_data_len),
4089     HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4090               hci_cc_le_add_to_resolv_list),
4091     HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4092               hci_cc_le_del_from_resolv_list),
4093     HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4094               hci_cc_le_clear_resolv_list),
4095     HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4096            sizeof(struct hci_rp_le_read_resolv_list_size)),
4097     HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4098               hci_cc_le_set_addr_resolution_enable),
4099     HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4100            sizeof(struct hci_rp_le_read_max_data_len)),
4101     HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4102               hci_cc_write_le_host_supported),
4103     HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4104     HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4105            sizeof(struct hci_rp_read_rssi)),
4106     HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4107            sizeof(struct hci_rp_read_tx_power)),
4108     HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4109     HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4110               hci_cc_le_set_ext_scan_param),
4111     HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4112               hci_cc_le_set_ext_scan_enable),
4113     HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4114     HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4115            hci_cc_le_read_num_adv_sets,
4116            sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4117     HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4118            sizeof(struct hci_rp_le_set_ext_adv_params)),
4119     HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4120               hci_cc_le_set_ext_adv_enable),
4121     HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4122               hci_cc_le_set_adv_set_random_addr),
4123     HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4124     HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4125     HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4126     HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4127               hci_cc_le_set_per_adv_enable),
4128     HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4129            sizeof(struct hci_rp_le_read_transmit_power)),
4130     HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4131     HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4132            sizeof(struct hci_rp_le_read_buffer_size_v2)),
4133     HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4134           sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4135     HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4136            sizeof(struct hci_rp_le_setup_iso_path)),
4137 };
4138 
4139 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4140               struct sk_buff *skb)
4141 {
4142     void *data;
4143 
4144     if (skb->len < cc->min_len) {
4145         bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4146                cc->op, skb->len, cc->min_len);
4147         return HCI_ERROR_UNSPECIFIED;
4148     }
4149 
4150     /* Just warn if the length is over max_len size it still be possible to
4151      * partially parse the cc so leave to callback to decide if that is
4152      * acceptable.
4153      */
4154     if (skb->len > cc->max_len)
4155         bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4156                 cc->op, skb->len, cc->max_len);
4157 
4158     data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4159     if (!data)
4160         return HCI_ERROR_UNSPECIFIED;
4161 
4162     return cc->func(hdev, data, skb);
4163 }
4164 
4165 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4166                  struct sk_buff *skb, u16 *opcode, u8 *status,
4167                  hci_req_complete_t *req_complete,
4168                  hci_req_complete_skb_t *req_complete_skb)
4169 {
4170     struct hci_ev_cmd_complete *ev = data;
4171     int i;
4172 
4173     *opcode = __le16_to_cpu(ev->opcode);
4174 
4175     bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4176 
4177     for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4178         if (hci_cc_table[i].op == *opcode) {
4179             *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4180             break;
4181         }
4182     }
4183 
4184     if (i == ARRAY_SIZE(hci_cc_table)) {
4185         /* Unknown opcode, assume byte 0 contains the status, so
4186          * that e.g. __hci_cmd_sync() properly returns errors
4187          * for vendor specific commands send by HCI drivers.
4188          * If a vendor doesn't actually follow this convention we may
4189          * need to introduce a vendor CC table in order to properly set
4190          * the status.
4191          */
4192         *status = skb->data[0];
4193     }
4194 
4195     handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4196 
4197     hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4198                  req_complete_skb);
4199 
4200     if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4201         bt_dev_err(hdev,
4202                "unexpected event for opcode 0x%4.4x", *opcode);
4203         return;
4204     }
4205 
4206     if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4207         queue_work(hdev->workqueue, &hdev->cmd_work);
4208 }
4209 
4210 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4211 {
4212     struct hci_cp_le_create_cis *cp;
4213     int i;
4214 
4215     bt_dev_dbg(hdev, "status 0x%2.2x", status);
4216 
4217     if (!status)
4218         return;
4219 
4220     cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4221     if (!cp)
4222         return;
4223 
4224     hci_dev_lock(hdev);
4225 
4226     /* Remove connection if command failed */
4227     for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4228         struct hci_conn *conn;
4229         u16 handle;
4230 
4231         handle = __le16_to_cpu(cp->cis[i].cis_handle);
4232 
4233         conn = hci_conn_hash_lookup_handle(hdev, handle);
4234         if (conn) {
4235             conn->state = BT_CLOSED;
4236             hci_connect_cfm(conn, status);
4237             hci_conn_del(conn);
4238         }
4239     }
4240 
4241     hci_dev_unlock(hdev);
4242 }
4243 
4244 #define HCI_CS(_op, _func) \
4245 { \
4246     .op = _op, \
4247     .func = _func, \
4248 }
4249 
4250 static const struct hci_cs {
4251     u16  op;
4252     void (*func)(struct hci_dev *hdev, __u8 status);
4253 } hci_cs_table[] = {
4254     HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4255     HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4256     HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4257     HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4258     HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4259     HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4260     HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4261     HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4262     HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4263            hci_cs_read_remote_ext_features),
4264     HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4265     HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4266            hci_cs_enhanced_setup_sync_conn),
4267     HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4268     HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4269     HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4270     HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4271     HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4272     HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4273     HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4274     HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4275     HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4276 };
4277 
4278 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4279                    struct sk_buff *skb, u16 *opcode, u8 *status,
4280                    hci_req_complete_t *req_complete,
4281                    hci_req_complete_skb_t *req_complete_skb)
4282 {
4283     struct hci_ev_cmd_status *ev = data;
4284     int i;
4285 
4286     *opcode = __le16_to_cpu(ev->opcode);
4287     *status = ev->status;
4288 
4289     bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4290 
4291     for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4292         if (hci_cs_table[i].op == *opcode) {
4293             hci_cs_table[i].func(hdev, ev->status);
4294             break;
4295         }
4296     }
4297 
4298     handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4299 
4300     /* Indicate request completion if the command failed. Also, if
4301      * we're not waiting for a special event and we get a success
4302      * command status we should try to flag the request as completed
4303      * (since for this kind of commands there will not be a command
4304      * complete event).
4305      */
4306     if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4307         hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4308                      req_complete_skb);
4309         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4310             bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4311                    *opcode);
4312             return;
4313         }
4314     }
4315 
4316     if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4317         queue_work(hdev->workqueue, &hdev->cmd_work);
4318 }
4319 
4320 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4321                    struct sk_buff *skb)
4322 {
4323     struct hci_ev_hardware_error *ev = data;
4324 
4325     bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4326 
4327     hdev->hw_error_code = ev->code;
4328 
4329     queue_work(hdev->req_workqueue, &hdev->error_reset);
4330 }
4331 
4332 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4333                 struct sk_buff *skb)
4334 {
4335     struct hci_ev_role_change *ev = data;
4336     struct hci_conn *conn;
4337 
4338     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4339 
4340     hci_dev_lock(hdev);
4341 
4342     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4343     if (conn) {
4344         if (!ev->status)
4345             conn->role = ev->role;
4346 
4347         clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4348 
4349         hci_role_switch_cfm(conn, ev->status, ev->role);
4350     }
4351 
4352     hci_dev_unlock(hdev);
4353 }
4354 
4355 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4356                   struct sk_buff *skb)
4357 {
4358     struct hci_ev_num_comp_pkts *ev = data;
4359     int i;
4360 
4361     if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4362                  flex_array_size(ev, handles, ev->num)))
4363         return;
4364 
4365     if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4366         bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4367         return;
4368     }
4369 
4370     bt_dev_dbg(hdev, "num %d", ev->num);
4371 
4372     for (i = 0; i < ev->num; i++) {
4373         struct hci_comp_pkts_info *info = &ev->handles[i];
4374         struct hci_conn *conn;
4375         __u16  handle, count;
4376 
4377         handle = __le16_to_cpu(info->handle);
4378         count  = __le16_to_cpu(info->count);
4379 
4380         conn = hci_conn_hash_lookup_handle(hdev, handle);
4381         if (!conn)
4382             continue;
4383 
4384         conn->sent -= count;
4385 
4386         switch (conn->type) {
4387         case ACL_LINK:
4388             hdev->acl_cnt += count;
4389             if (hdev->acl_cnt > hdev->acl_pkts)
4390                 hdev->acl_cnt = hdev->acl_pkts;
4391             break;
4392 
4393         case LE_LINK:
4394             if (hdev->le_pkts) {
4395                 hdev->le_cnt += count;
4396                 if (hdev->le_cnt > hdev->le_pkts)
4397                     hdev->le_cnt = hdev->le_pkts;
4398             } else {
4399                 hdev->acl_cnt += count;
4400                 if (hdev->acl_cnt > hdev->acl_pkts)
4401                     hdev->acl_cnt = hdev->acl_pkts;
4402             }
4403             break;
4404 
4405         case SCO_LINK:
4406             hdev->sco_cnt += count;
4407             if (hdev->sco_cnt > hdev->sco_pkts)
4408                 hdev->sco_cnt = hdev->sco_pkts;
4409             break;
4410 
4411         case ISO_LINK:
4412             if (hdev->iso_pkts) {
4413                 hdev->iso_cnt += count;
4414                 if (hdev->iso_cnt > hdev->iso_pkts)
4415                     hdev->iso_cnt = hdev->iso_pkts;
4416             } else if (hdev->le_pkts) {
4417                 hdev->le_cnt += count;
4418                 if (hdev->le_cnt > hdev->le_pkts)
4419                     hdev->le_cnt = hdev->le_pkts;
4420             } else {
4421                 hdev->acl_cnt += count;
4422                 if (hdev->acl_cnt > hdev->acl_pkts)
4423                     hdev->acl_cnt = hdev->acl_pkts;
4424             }
4425             break;
4426 
4427         default:
4428             bt_dev_err(hdev, "unknown type %d conn %p",
4429                    conn->type, conn);
4430             break;
4431         }
4432     }
4433 
4434     queue_work(hdev->workqueue, &hdev->tx_work);
4435 }
4436 
4437 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4438                          __u16 handle)
4439 {
4440     struct hci_chan *chan;
4441 
4442     switch (hdev->dev_type) {
4443     case HCI_PRIMARY:
4444         return hci_conn_hash_lookup_handle(hdev, handle);
4445     case HCI_AMP:
4446         chan = hci_chan_lookup_handle(hdev, handle);
4447         if (chan)
4448             return chan->conn;
4449         break;
4450     default:
4451         bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4452         break;
4453     }
4454 
4455     return NULL;
4456 }
4457 
4458 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4459                     struct sk_buff *skb)
4460 {
4461     struct hci_ev_num_comp_blocks *ev = data;
4462     int i;
4463 
4464     if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4465                  flex_array_size(ev, handles, ev->num_hndl)))
4466         return;
4467 
4468     if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4469         bt_dev_err(hdev, "wrong event for mode %d",
4470                hdev->flow_ctl_mode);
4471         return;
4472     }
4473 
4474     bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4475            ev->num_hndl);
4476 
4477     for (i = 0; i < ev->num_hndl; i++) {
4478         struct hci_comp_blocks_info *info = &ev->handles[i];
4479         struct hci_conn *conn = NULL;
4480         __u16  handle, block_count;
4481 
4482         handle = __le16_to_cpu(info->handle);
4483         block_count = __le16_to_cpu(info->blocks);
4484 
4485         conn = __hci_conn_lookup_handle(hdev, handle);
4486         if (!conn)
4487             continue;
4488 
4489         conn->sent -= block_count;
4490 
4491         switch (conn->type) {
4492         case ACL_LINK:
4493         case AMP_LINK:
4494             hdev->block_cnt += block_count;
4495             if (hdev->block_cnt > hdev->num_blocks)
4496                 hdev->block_cnt = hdev->num_blocks;
4497             break;
4498 
4499         default:
4500             bt_dev_err(hdev, "unknown type %d conn %p",
4501                    conn->type, conn);
4502             break;
4503         }
4504     }
4505 
4506     queue_work(hdev->workqueue, &hdev->tx_work);
4507 }
4508 
4509 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4510                 struct sk_buff *skb)
4511 {
4512     struct hci_ev_mode_change *ev = data;
4513     struct hci_conn *conn;
4514 
4515     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4516 
4517     hci_dev_lock(hdev);
4518 
4519     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4520     if (conn) {
4521         conn->mode = ev->mode;
4522 
4523         if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4524                     &conn->flags)) {
4525             if (conn->mode == HCI_CM_ACTIVE)
4526                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4527             else
4528                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4529         }
4530 
4531         if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4532             hci_sco_setup(conn, ev->status);
4533     }
4534 
4535     hci_dev_unlock(hdev);
4536 }
4537 
4538 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4539                      struct sk_buff *skb)
4540 {
4541     struct hci_ev_pin_code_req *ev = data;
4542     struct hci_conn *conn;
4543 
4544     bt_dev_dbg(hdev, "");
4545 
4546     hci_dev_lock(hdev);
4547 
4548     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4549     if (!conn)
4550         goto unlock;
4551 
4552     if (conn->state == BT_CONNECTED) {
4553         hci_conn_hold(conn);
4554         conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4555         hci_conn_drop(conn);
4556     }
4557 
4558     if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4559         !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4560         hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4561                  sizeof(ev->bdaddr), &ev->bdaddr);
4562     } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4563         u8 secure;
4564 
4565         if (conn->pending_sec_level == BT_SECURITY_HIGH)
4566             secure = 1;
4567         else
4568             secure = 0;
4569 
4570         mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4571     }
4572 
4573 unlock:
4574     hci_dev_unlock(hdev);
4575 }
4576 
4577 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4578 {
4579     if (key_type == HCI_LK_CHANGED_COMBINATION)
4580         return;
4581 
4582     conn->pin_length = pin_len;
4583     conn->key_type = key_type;
4584 
4585     switch (key_type) {
4586     case HCI_LK_LOCAL_UNIT:
4587     case HCI_LK_REMOTE_UNIT:
4588     case HCI_LK_DEBUG_COMBINATION:
4589         return;
4590     case HCI_LK_COMBINATION:
4591         if (pin_len == 16)
4592             conn->pending_sec_level = BT_SECURITY_HIGH;
4593         else
4594             conn->pending_sec_level = BT_SECURITY_MEDIUM;
4595         break;
4596     case HCI_LK_UNAUTH_COMBINATION_P192:
4597     case HCI_LK_UNAUTH_COMBINATION_P256:
4598         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4599         break;
4600     case HCI_LK_AUTH_COMBINATION_P192:
4601         conn->pending_sec_level = BT_SECURITY_HIGH;
4602         break;
4603     case HCI_LK_AUTH_COMBINATION_P256:
4604         conn->pending_sec_level = BT_SECURITY_FIPS;
4605         break;
4606     }
4607 }
4608 
4609 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4610                      struct sk_buff *skb)
4611 {
4612     struct hci_ev_link_key_req *ev = data;
4613     struct hci_cp_link_key_reply cp;
4614     struct hci_conn *conn;
4615     struct link_key *key;
4616 
4617     bt_dev_dbg(hdev, "");
4618 
4619     if (!hci_dev_test_flag(hdev, HCI_MGMT))
4620         return;
4621 
4622     hci_dev_lock(hdev);
4623 
4624     key = hci_find_link_key(hdev, &ev->bdaddr);
4625     if (!key) {
4626         bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4627         goto not_found;
4628     }
4629 
4630     bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4631 
4632     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4633     if (conn) {
4634         clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4635 
4636         if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4637              key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4638             conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4639             bt_dev_dbg(hdev, "ignoring unauthenticated key");
4640             goto not_found;
4641         }
4642 
4643         if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4644             (conn->pending_sec_level == BT_SECURITY_HIGH ||
4645              conn->pending_sec_level == BT_SECURITY_FIPS)) {
4646             bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4647             goto not_found;
4648         }
4649 
4650         conn_set_key(conn, key->type, key->pin_len);
4651     }
4652 
4653     bacpy(&cp.bdaddr, &ev->bdaddr);
4654     memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4655 
4656     hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4657 
4658     hci_dev_unlock(hdev);
4659 
4660     return;
4661 
4662 not_found:
4663     hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4664     hci_dev_unlock(hdev);
4665 }
4666 
4667 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4668                     struct sk_buff *skb)
4669 {
4670     struct hci_ev_link_key_notify *ev = data;
4671     struct hci_conn *conn;
4672     struct link_key *key;
4673     bool persistent;
4674     u8 pin_len = 0;
4675 
4676     bt_dev_dbg(hdev, "");
4677 
4678     hci_dev_lock(hdev);
4679 
4680     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4681     if (!conn)
4682         goto unlock;
4683 
4684     hci_conn_hold(conn);
4685     conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4686     hci_conn_drop(conn);
4687 
4688     set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4689     conn_set_key(conn, ev->key_type, conn->pin_length);
4690 
4691     if (!hci_dev_test_flag(hdev, HCI_MGMT))
4692         goto unlock;
4693 
4694     key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4695                     ev->key_type, pin_len, &persistent);
4696     if (!key)
4697         goto unlock;
4698 
4699     /* Update connection information since adding the key will have
4700      * fixed up the type in the case of changed combination keys.
4701      */
4702     if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4703         conn_set_key(conn, key->type, key->pin_len);
4704 
4705     mgmt_new_link_key(hdev, key, persistent);
4706 
4707     /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4708      * is set. If it's not set simply remove the key from the kernel
4709      * list (we've still notified user space about it but with
4710      * store_hint being 0).
4711      */
4712     if (key->type == HCI_LK_DEBUG_COMBINATION &&
4713         !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4714         list_del_rcu(&key->list);
4715         kfree_rcu(key, rcu);
4716         goto unlock;
4717     }
4718 
4719     if (persistent)
4720         clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4721     else
4722         set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4723 
4724 unlock:
4725     hci_dev_unlock(hdev);
4726 }
4727 
4728 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4729                  struct sk_buff *skb)
4730 {
4731     struct hci_ev_clock_offset *ev = data;
4732     struct hci_conn *conn;
4733 
4734     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4735 
4736     hci_dev_lock(hdev);
4737 
4738     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4739     if (conn && !ev->status) {
4740         struct inquiry_entry *ie;
4741 
4742         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4743         if (ie) {
4744             ie->data.clock_offset = ev->clock_offset;
4745             ie->timestamp = jiffies;
4746         }
4747     }
4748 
4749     hci_dev_unlock(hdev);
4750 }
4751 
4752 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4753                     struct sk_buff *skb)
4754 {
4755     struct hci_ev_pkt_type_change *ev = data;
4756     struct hci_conn *conn;
4757 
4758     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4759 
4760     hci_dev_lock(hdev);
4761 
4762     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4763     if (conn && !ev->status)
4764         conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4765 
4766     hci_dev_unlock(hdev);
4767 }
4768 
4769 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4770                    struct sk_buff *skb)
4771 {
4772     struct hci_ev_pscan_rep_mode *ev = data;
4773     struct inquiry_entry *ie;
4774 
4775     bt_dev_dbg(hdev, "");
4776 
4777     hci_dev_lock(hdev);
4778 
4779     ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4780     if (ie) {
4781         ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4782         ie->timestamp = jiffies;
4783     }
4784 
4785     hci_dev_unlock(hdev);
4786 }
4787 
4788 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4789                          struct sk_buff *skb)
4790 {
4791     struct hci_ev_inquiry_result_rssi *ev = edata;
4792     struct inquiry_data data;
4793     int i;
4794 
4795     bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4796 
4797     if (!ev->num)
4798         return;
4799 
4800     if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4801         return;
4802 
4803     hci_dev_lock(hdev);
4804 
4805     if (skb->len == array_size(ev->num,
4806                    sizeof(struct inquiry_info_rssi_pscan))) {
4807         struct inquiry_info_rssi_pscan *info;
4808 
4809         for (i = 0; i < ev->num; i++) {
4810             u32 flags;
4811 
4812             info = hci_ev_skb_pull(hdev, skb,
4813                            HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4814                            sizeof(*info));
4815             if (!info) {
4816                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4817                        HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4818                 goto unlock;
4819             }
4820 
4821             bacpy(&data.bdaddr, &info->bdaddr);
4822             data.pscan_rep_mode = info->pscan_rep_mode;
4823             data.pscan_period_mode  = info->pscan_period_mode;
4824             data.pscan_mode     = info->pscan_mode;
4825             memcpy(data.dev_class, info->dev_class, 3);
4826             data.clock_offset   = info->clock_offset;
4827             data.rssi       = info->rssi;
4828             data.ssp_mode       = 0x00;
4829 
4830             flags = hci_inquiry_cache_update(hdev, &data, false);
4831 
4832             mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4833                       info->dev_class, info->rssi,
4834                       flags, NULL, 0, NULL, 0);
4835         }
4836     } else if (skb->len == array_size(ev->num,
4837                       sizeof(struct inquiry_info_rssi))) {
4838         struct inquiry_info_rssi *info;
4839 
4840         for (i = 0; i < ev->num; i++) {
4841             u32 flags;
4842 
4843             info = hci_ev_skb_pull(hdev, skb,
4844                            HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4845                            sizeof(*info));
4846             if (!info) {
4847                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4848                        HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4849                 goto unlock;
4850             }
4851 
4852             bacpy(&data.bdaddr, &info->bdaddr);
4853             data.pscan_rep_mode = info->pscan_rep_mode;
4854             data.pscan_period_mode  = info->pscan_period_mode;
4855             data.pscan_mode     = 0x00;
4856             memcpy(data.dev_class, info->dev_class, 3);
4857             data.clock_offset   = info->clock_offset;
4858             data.rssi       = info->rssi;
4859             data.ssp_mode       = 0x00;
4860 
4861             flags = hci_inquiry_cache_update(hdev, &data, false);
4862 
4863             mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4864                       info->dev_class, info->rssi,
4865                       flags, NULL, 0, NULL, 0);
4866         }
4867     } else {
4868         bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4869                HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4870     }
4871 unlock:
4872     hci_dev_unlock(hdev);
4873 }
4874 
4875 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4876                     struct sk_buff *skb)
4877 {
4878     struct hci_ev_remote_ext_features *ev = data;
4879     struct hci_conn *conn;
4880 
4881     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4882 
4883     hci_dev_lock(hdev);
4884 
4885     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4886     if (!conn)
4887         goto unlock;
4888 
4889     if (ev->page < HCI_MAX_PAGES)
4890         memcpy(conn->features[ev->page], ev->features, 8);
4891 
4892     if (!ev->status && ev->page == 0x01) {
4893         struct inquiry_entry *ie;
4894 
4895         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4896         if (ie)
4897             ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4898 
4899         if (ev->features[0] & LMP_HOST_SSP) {
4900             set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4901         } else {
4902             /* It is mandatory by the Bluetooth specification that
4903              * Extended Inquiry Results are only used when Secure
4904              * Simple Pairing is enabled, but some devices violate
4905              * this.
4906              *
4907              * To make these devices work, the internal SSP
4908              * enabled flag needs to be cleared if the remote host
4909              * features do not indicate SSP support */
4910             clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4911         }
4912 
4913         if (ev->features[0] & LMP_HOST_SC)
4914             set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4915     }
4916 
4917     if (conn->state != BT_CONFIG)
4918         goto unlock;
4919 
4920     if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4921         struct hci_cp_remote_name_req cp;
4922         memset(&cp, 0, sizeof(cp));
4923         bacpy(&cp.bdaddr, &conn->dst);
4924         cp.pscan_rep_mode = 0x02;
4925         hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4926     } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4927         mgmt_device_connected(hdev, conn, NULL, 0);
4928 
4929     if (!hci_outgoing_auth_needed(hdev, conn)) {
4930         conn->state = BT_CONNECTED;
4931         hci_connect_cfm(conn, ev->status);
4932         hci_conn_drop(conn);
4933     }
4934 
4935 unlock:
4936     hci_dev_unlock(hdev);
4937 }
4938 
4939 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4940                        struct sk_buff *skb)
4941 {
4942     struct hci_ev_sync_conn_complete *ev = data;
4943     struct hci_conn *conn;
4944     u8 status = ev->status;
4945 
4946     switch (ev->link_type) {
4947     case SCO_LINK:
4948     case ESCO_LINK:
4949         break;
4950     default:
4951         /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4952          * for HCI_Synchronous_Connection_Complete is limited to
4953          * either SCO or eSCO
4954          */
4955         bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4956         return;
4957     }
4958 
4959     bt_dev_dbg(hdev, "status 0x%2.2x", status);
4960 
4961     hci_dev_lock(hdev);
4962 
4963     conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4964     if (!conn) {
4965         if (ev->link_type == ESCO_LINK)
4966             goto unlock;
4967 
4968         /* When the link type in the event indicates SCO connection
4969          * and lookup of the connection object fails, then check
4970          * if an eSCO connection object exists.
4971          *
4972          * The core limits the synchronous connections to either
4973          * SCO or eSCO. The eSCO connection is preferred and tried
4974          * to be setup first and until successfully established,
4975          * the link type will be hinted as eSCO.
4976          */
4977         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4978         if (!conn)
4979             goto unlock;
4980     }
4981 
4982     /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4983      * Processing it more than once per connection can corrupt kernel memory.
4984      *
4985      * As the connection handle is set here for the first time, it indicates
4986      * whether the connection is already set up.
4987      */
4988     if (conn->handle != HCI_CONN_HANDLE_UNSET) {
4989         bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4990         goto unlock;
4991     }
4992 
4993     switch (status) {
4994     case 0x00:
4995         conn->handle = __le16_to_cpu(ev->handle);
4996         if (conn->handle > HCI_CONN_HANDLE_MAX) {
4997             bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
4998                    conn->handle, HCI_CONN_HANDLE_MAX);
4999             status = HCI_ERROR_INVALID_PARAMETERS;
5000             conn->state = BT_CLOSED;
5001             break;
5002         }
5003 
5004         conn->state  = BT_CONNECTED;
5005         conn->type   = ev->link_type;
5006 
5007         hci_debugfs_create_conn(conn);
5008         hci_conn_add_sysfs(conn);
5009         break;
5010 
5011     case 0x10:  /* Connection Accept Timeout */
5012     case 0x0d:  /* Connection Rejected due to Limited Resources */
5013     case 0x11:  /* Unsupported Feature or Parameter Value */
5014     case 0x1c:  /* SCO interval rejected */
5015     case 0x1a:  /* Unsupported Remote Feature */
5016     case 0x1e:  /* Invalid LMP Parameters */
5017     case 0x1f:  /* Unspecified error */
5018     case 0x20:  /* Unsupported LMP Parameter value */
5019         if (conn->out) {
5020             conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5021                     (hdev->esco_type & EDR_ESCO_MASK);
5022             if (hci_setup_sync(conn, conn->link->handle))
5023                 goto unlock;
5024         }
5025         fallthrough;
5026 
5027     default:
5028         conn->state = BT_CLOSED;
5029         break;
5030     }
5031 
5032     bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5033     /* Notify only in case of SCO over HCI transport data path which
5034      * is zero and non-zero value shall be non-HCI transport data path
5035      */
5036     if (conn->codec.data_path == 0 && hdev->notify) {
5037         switch (ev->air_mode) {
5038         case 0x02:
5039             hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5040             break;
5041         case 0x03:
5042             hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5043             break;
5044         }
5045     }
5046 
5047     hci_connect_cfm(conn, status);
5048     if (status)
5049         hci_conn_del(conn);
5050 
5051 unlock:
5052     hci_dev_unlock(hdev);
5053 }
5054 
5055 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5056 {
5057     size_t parsed = 0;
5058 
5059     while (parsed < eir_len) {
5060         u8 field_len = eir[0];
5061 
5062         if (field_len == 0)
5063             return parsed;
5064 
5065         parsed += field_len + 1;
5066         eir += field_len + 1;
5067     }
5068 
5069     return eir_len;
5070 }
5071 
5072 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5073                         struct sk_buff *skb)
5074 {
5075     struct hci_ev_ext_inquiry_result *ev = edata;
5076     struct inquiry_data data;
5077     size_t eir_len;
5078     int i;
5079 
5080     if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5081                  flex_array_size(ev, info, ev->num)))
5082         return;
5083 
5084     bt_dev_dbg(hdev, "num %d", ev->num);
5085 
5086     if (!ev->num)
5087         return;
5088 
5089     if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5090         return;
5091 
5092     hci_dev_lock(hdev);
5093 
5094     for (i = 0; i < ev->num; i++) {
5095         struct extended_inquiry_info *info = &ev->info[i];
5096         u32 flags;
5097         bool name_known;
5098 
5099         bacpy(&data.bdaddr, &info->bdaddr);
5100         data.pscan_rep_mode = info->pscan_rep_mode;
5101         data.pscan_period_mode  = info->pscan_period_mode;
5102         data.pscan_mode     = 0x00;
5103         memcpy(data.dev_class, info->dev_class, 3);
5104         data.clock_offset   = info->clock_offset;
5105         data.rssi       = info->rssi;
5106         data.ssp_mode       = 0x01;
5107 
5108         if (hci_dev_test_flag(hdev, HCI_MGMT))
5109             name_known = eir_get_data(info->data,
5110                           sizeof(info->data),
5111                           EIR_NAME_COMPLETE, NULL);
5112         else
5113             name_known = true;
5114 
5115         flags = hci_inquiry_cache_update(hdev, &data, name_known);
5116 
5117         eir_len = eir_get_length(info->data, sizeof(info->data));
5118 
5119         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5120                   info->dev_class, info->rssi,
5121                   flags, info->data, eir_len, NULL, 0);
5122     }
5123 
5124     hci_dev_unlock(hdev);
5125 }
5126 
5127 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5128                      struct sk_buff *skb)
5129 {
5130     struct hci_ev_key_refresh_complete *ev = data;
5131     struct hci_conn *conn;
5132 
5133     bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5134            __le16_to_cpu(ev->handle));
5135 
5136     hci_dev_lock(hdev);
5137 
5138     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5139     if (!conn)
5140         goto unlock;
5141 
5142     /* For BR/EDR the necessary steps are taken through the
5143      * auth_complete event.
5144      */
5145     if (conn->type != LE_LINK)
5146         goto unlock;
5147 
5148     if (!ev->status)
5149         conn->sec_level = conn->pending_sec_level;
5150 
5151     clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5152 
5153     if (ev->status && conn->state == BT_CONNECTED) {
5154         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5155         hci_conn_drop(conn);
5156         goto unlock;
5157     }
5158 
5159     if (conn->state == BT_CONFIG) {
5160         if (!ev->status)
5161             conn->state = BT_CONNECTED;
5162 
5163         hci_connect_cfm(conn, ev->status);
5164         hci_conn_drop(conn);
5165     } else {
5166         hci_auth_cfm(conn, ev->status);
5167 
5168         hci_conn_hold(conn);
5169         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5170         hci_conn_drop(conn);
5171     }
5172 
5173 unlock:
5174     hci_dev_unlock(hdev);
5175 }
5176 
5177 static u8 hci_get_auth_req(struct hci_conn *conn)
5178 {
5179     /* If remote requests no-bonding follow that lead */
5180     if (conn->remote_auth == HCI_AT_NO_BONDING ||
5181         conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5182         return conn->remote_auth | (conn->auth_type & 0x01);
5183 
5184     /* If both remote and local have enough IO capabilities, require
5185      * MITM protection
5186      */
5187     if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5188         conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5189         return conn->remote_auth | 0x01;
5190 
5191     /* No MITM protection possible so ignore remote requirement */
5192     return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5193 }
5194 
5195 static u8 bredr_oob_data_present(struct hci_conn *conn)
5196 {
5197     struct hci_dev *hdev = conn->hdev;
5198     struct oob_data *data;
5199 
5200     data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5201     if (!data)
5202         return 0x00;
5203 
5204     if (bredr_sc_enabled(hdev)) {
5205         /* When Secure Connections is enabled, then just
5206          * return the present value stored with the OOB
5207          * data. The stored value contains the right present
5208          * information. However it can only be trusted when
5209          * not in Secure Connection Only mode.
5210          */
5211         if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5212             return data->present;
5213 
5214         /* When Secure Connections Only mode is enabled, then
5215          * the P-256 values are required. If they are not
5216          * available, then do not declare that OOB data is
5217          * present.
5218          */
5219         if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5220             !memcmp(data->hash256, ZERO_KEY, 16))
5221             return 0x00;
5222 
5223         return 0x02;
5224     }
5225 
5226     /* When Secure Connections is not enabled or actually
5227      * not supported by the hardware, then check that if
5228      * P-192 data values are present.
5229      */
5230     if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5231         !memcmp(data->hash192, ZERO_KEY, 16))
5232         return 0x00;
5233 
5234     return 0x01;
5235 }
5236 
5237 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5238                     struct sk_buff *skb)
5239 {
5240     struct hci_ev_io_capa_request *ev = data;
5241     struct hci_conn *conn;
5242 
5243     bt_dev_dbg(hdev, "");
5244 
5245     hci_dev_lock(hdev);
5246 
5247     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5248     if (!conn)
5249         goto unlock;
5250 
5251     hci_conn_hold(conn);
5252 
5253     if (!hci_dev_test_flag(hdev, HCI_MGMT))
5254         goto unlock;
5255 
5256     /* Allow pairing if we're pairable, the initiators of the
5257      * pairing or if the remote is not requesting bonding.
5258      */
5259     if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5260         test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5261         (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5262         struct hci_cp_io_capability_reply cp;
5263 
5264         bacpy(&cp.bdaddr, &ev->bdaddr);
5265         /* Change the IO capability from KeyboardDisplay
5266          * to DisplayYesNo as it is not supported by BT spec. */
5267         cp.capability = (conn->io_capability == 0x04) ?
5268                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5269 
5270         /* If we are initiators, there is no remote information yet */
5271         if (conn->remote_auth == 0xff) {
5272             /* Request MITM protection if our IO caps allow it
5273              * except for the no-bonding case.
5274              */
5275             if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5276                 conn->auth_type != HCI_AT_NO_BONDING)
5277                 conn->auth_type |= 0x01;
5278         } else {
5279             conn->auth_type = hci_get_auth_req(conn);
5280         }
5281 
5282         /* If we're not bondable, force one of the non-bondable
5283          * authentication requirement values.
5284          */
5285         if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5286             conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5287 
5288         cp.authentication = conn->auth_type;
5289         cp.oob_data = bredr_oob_data_present(conn);
5290 
5291         hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5292                  sizeof(cp), &cp);
5293     } else {
5294         struct hci_cp_io_capability_neg_reply cp;
5295 
5296         bacpy(&cp.bdaddr, &ev->bdaddr);
5297         cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5298 
5299         hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5300                  sizeof(cp), &cp);
5301     }
5302 
5303 unlock:
5304     hci_dev_unlock(hdev);
5305 }
5306 
5307 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5308                   struct sk_buff *skb)
5309 {
5310     struct hci_ev_io_capa_reply *ev = data;
5311     struct hci_conn *conn;
5312 
5313     bt_dev_dbg(hdev, "");
5314 
5315     hci_dev_lock(hdev);
5316 
5317     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5318     if (!conn)
5319         goto unlock;
5320 
5321     conn->remote_cap = ev->capability;
5322     conn->remote_auth = ev->authentication;
5323 
5324 unlock:
5325     hci_dev_unlock(hdev);
5326 }
5327 
5328 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5329                      struct sk_buff *skb)
5330 {
5331     struct hci_ev_user_confirm_req *ev = data;
5332     int loc_mitm, rem_mitm, confirm_hint = 0;
5333     struct hci_conn *conn;
5334 
5335     bt_dev_dbg(hdev, "");
5336 
5337     hci_dev_lock(hdev);
5338 
5339     if (!hci_dev_test_flag(hdev, HCI_MGMT))
5340         goto unlock;
5341 
5342     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5343     if (!conn)
5344         goto unlock;
5345 
5346     loc_mitm = (conn->auth_type & 0x01);
5347     rem_mitm = (conn->remote_auth & 0x01);
5348 
5349     /* If we require MITM but the remote device can't provide that
5350      * (it has NoInputNoOutput) then reject the confirmation
5351      * request. We check the security level here since it doesn't
5352      * necessarily match conn->auth_type.
5353      */
5354     if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5355         conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5356         bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5357         hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5358                  sizeof(ev->bdaddr), &ev->bdaddr);
5359         goto unlock;
5360     }
5361 
5362     /* If no side requires MITM protection; auto-accept */
5363     if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5364         (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5365 
5366         /* If we're not the initiators request authorization to
5367          * proceed from user space (mgmt_user_confirm with
5368          * confirm_hint set to 1). The exception is if neither
5369          * side had MITM or if the local IO capability is
5370          * NoInputNoOutput, in which case we do auto-accept
5371          */
5372         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5373             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5374             (loc_mitm || rem_mitm)) {
5375             bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5376             confirm_hint = 1;
5377             goto confirm;
5378         }
5379 
5380         /* If there already exists link key in local host, leave the
5381          * decision to user space since the remote device could be
5382          * legitimate or malicious.
5383          */
5384         if (hci_find_link_key(hdev, &ev->bdaddr)) {
5385             bt_dev_dbg(hdev, "Local host already has link key");
5386             confirm_hint = 1;
5387             goto confirm;
5388         }
5389 
5390         BT_DBG("Auto-accept of user confirmation with %ums delay",
5391                hdev->auto_accept_delay);
5392 
5393         if (hdev->auto_accept_delay > 0) {
5394             int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5395             queue_delayed_work(conn->hdev->workqueue,
5396                        &conn->auto_accept_work, delay);
5397             goto unlock;
5398         }
5399 
5400         hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5401                  sizeof(ev->bdaddr), &ev->bdaddr);
5402         goto unlock;
5403     }
5404 
5405 confirm:
5406     mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5407                   le32_to_cpu(ev->passkey), confirm_hint);
5408 
5409 unlock:
5410     hci_dev_unlock(hdev);
5411 }
5412 
5413 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5414                      struct sk_buff *skb)
5415 {
5416     struct hci_ev_user_passkey_req *ev = data;
5417 
5418     bt_dev_dbg(hdev, "");
5419 
5420     if (hci_dev_test_flag(hdev, HCI_MGMT))
5421         mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5422 }
5423 
5424 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5425                     struct sk_buff *skb)
5426 {
5427     struct hci_ev_user_passkey_notify *ev = data;
5428     struct hci_conn *conn;
5429 
5430     bt_dev_dbg(hdev, "");
5431 
5432     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5433     if (!conn)
5434         return;
5435 
5436     conn->passkey_notify = __le32_to_cpu(ev->passkey);
5437     conn->passkey_entered = 0;
5438 
5439     if (hci_dev_test_flag(hdev, HCI_MGMT))
5440         mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5441                      conn->dst_type, conn->passkey_notify,
5442                      conn->passkey_entered);
5443 }
5444 
5445 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5446                     struct sk_buff *skb)
5447 {
5448     struct hci_ev_keypress_notify *ev = data;
5449     struct hci_conn *conn;
5450 
5451     bt_dev_dbg(hdev, "");
5452 
5453     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5454     if (!conn)
5455         return;
5456 
5457     switch (ev->type) {
5458     case HCI_KEYPRESS_STARTED:
5459         conn->passkey_entered = 0;
5460         return;
5461 
5462     case HCI_KEYPRESS_ENTERED:
5463         conn->passkey_entered++;
5464         break;
5465 
5466     case HCI_KEYPRESS_ERASED:
5467         conn->passkey_entered--;
5468         break;
5469 
5470     case HCI_KEYPRESS_CLEARED:
5471         conn->passkey_entered = 0;
5472         break;
5473 
5474     case HCI_KEYPRESS_COMPLETED:
5475         return;
5476     }
5477 
5478     if (hci_dev_test_flag(hdev, HCI_MGMT))
5479         mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5480                      conn->dst_type, conn->passkey_notify,
5481                      conn->passkey_entered);
5482 }
5483 
5484 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5485                      struct sk_buff *skb)
5486 {
5487     struct hci_ev_simple_pair_complete *ev = data;
5488     struct hci_conn *conn;
5489 
5490     bt_dev_dbg(hdev, "");
5491 
5492     hci_dev_lock(hdev);
5493 
5494     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5495     if (!conn)
5496         goto unlock;
5497 
5498     /* Reset the authentication requirement to unknown */
5499     conn->remote_auth = 0xff;
5500 
5501     /* To avoid duplicate auth_failed events to user space we check
5502      * the HCI_CONN_AUTH_PEND flag which will be set if we
5503      * initiated the authentication. A traditional auth_complete
5504      * event gets always produced as initiator and is also mapped to
5505      * the mgmt_auth_failed event */
5506     if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5507         mgmt_auth_failed(conn, ev->status);
5508 
5509     hci_conn_drop(conn);
5510 
5511 unlock:
5512     hci_dev_unlock(hdev);
5513 }
5514 
5515 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5516                      struct sk_buff *skb)
5517 {
5518     struct hci_ev_remote_host_features *ev = data;
5519     struct inquiry_entry *ie;
5520     struct hci_conn *conn;
5521 
5522     bt_dev_dbg(hdev, "");
5523 
5524     hci_dev_lock(hdev);
5525 
5526     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5527     if (conn)
5528         memcpy(conn->features[1], ev->features, 8);
5529 
5530     ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5531     if (ie)
5532         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5533 
5534     hci_dev_unlock(hdev);
5535 }
5536 
5537 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5538                         struct sk_buff *skb)
5539 {
5540     struct hci_ev_remote_oob_data_request *ev = edata;
5541     struct oob_data *data;
5542 
5543     bt_dev_dbg(hdev, "");
5544 
5545     hci_dev_lock(hdev);
5546 
5547     if (!hci_dev_test_flag(hdev, HCI_MGMT))
5548         goto unlock;
5549 
5550     data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5551     if (!data) {
5552         struct hci_cp_remote_oob_data_neg_reply cp;
5553 
5554         bacpy(&cp.bdaddr, &ev->bdaddr);
5555         hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5556                  sizeof(cp), &cp);
5557         goto unlock;
5558     }
5559 
5560     if (bredr_sc_enabled(hdev)) {
5561         struct hci_cp_remote_oob_ext_data_reply cp;
5562 
5563         bacpy(&cp.bdaddr, &ev->bdaddr);
5564         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5565             memset(cp.hash192, 0, sizeof(cp.hash192));
5566             memset(cp.rand192, 0, sizeof(cp.rand192));
5567         } else {
5568             memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5569             memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5570         }
5571         memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5572         memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5573 
5574         hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5575                  sizeof(cp), &cp);
5576     } else {
5577         struct hci_cp_remote_oob_data_reply cp;
5578 
5579         bacpy(&cp.bdaddr, &ev->bdaddr);
5580         memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5581         memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5582 
5583         hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5584                  sizeof(cp), &cp);
5585     }
5586 
5587 unlock:
5588     hci_dev_unlock(hdev);
5589 }
5590 
5591 #if IS_ENABLED(CONFIG_BT_HS)
5592 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5593                   struct sk_buff *skb)
5594 {
5595     struct hci_ev_channel_selected *ev = data;
5596     struct hci_conn *hcon;
5597 
5598     bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5599 
5600     hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5601     if (!hcon)
5602         return;
5603 
5604     amp_read_loc_assoc_final_data(hdev, hcon);
5605 }
5606 
5607 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5608                       struct sk_buff *skb)
5609 {
5610     struct hci_ev_phy_link_complete *ev = data;
5611     struct hci_conn *hcon, *bredr_hcon;
5612 
5613     bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5614            ev->status);
5615 
5616     hci_dev_lock(hdev);
5617 
5618     hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5619     if (!hcon)
5620         goto unlock;
5621 
5622     if (!hcon->amp_mgr)
5623         goto unlock;
5624 
5625     if (ev->status) {
5626         hci_conn_del(hcon);
5627         goto unlock;
5628     }
5629 
5630     bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5631 
5632     hcon->state = BT_CONNECTED;
5633     bacpy(&hcon->dst, &bredr_hcon->dst);
5634 
5635     hci_conn_hold(hcon);
5636     hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5637     hci_conn_drop(hcon);
5638 
5639     hci_debugfs_create_conn(hcon);
5640     hci_conn_add_sysfs(hcon);
5641 
5642     amp_physical_cfm(bredr_hcon, hcon);
5643 
5644 unlock:
5645     hci_dev_unlock(hdev);
5646 }
5647 
5648 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5649                      struct sk_buff *skb)
5650 {
5651     struct hci_ev_logical_link_complete *ev = data;
5652     struct hci_conn *hcon;
5653     struct hci_chan *hchan;
5654     struct amp_mgr *mgr;
5655 
5656     bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5657            le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5658 
5659     hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5660     if (!hcon)
5661         return;
5662 
5663     /* Create AMP hchan */
5664     hchan = hci_chan_create(hcon);
5665     if (!hchan)
5666         return;
5667 
5668     hchan->handle = le16_to_cpu(ev->handle);
5669     hchan->amp = true;
5670 
5671     BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5672 
5673     mgr = hcon->amp_mgr;
5674     if (mgr && mgr->bredr_chan) {
5675         struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5676 
5677         l2cap_chan_lock(bredr_chan);
5678 
5679         bredr_chan->conn->mtu = hdev->block_mtu;
5680         l2cap_logical_cfm(bredr_chan, hchan, 0);
5681         hci_conn_hold(hcon);
5682 
5683         l2cap_chan_unlock(bredr_chan);
5684     }
5685 }
5686 
5687 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5688                          struct sk_buff *skb)
5689 {
5690     struct hci_ev_disconn_logical_link_complete *ev = data;
5691     struct hci_chan *hchan;
5692 
5693     bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5694            le16_to_cpu(ev->handle), ev->status);
5695 
5696     if (ev->status)
5697         return;
5698 
5699     hci_dev_lock(hdev);
5700 
5701     hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5702     if (!hchan || !hchan->amp)
5703         goto unlock;
5704 
5705     amp_destroy_logical_link(hchan, ev->reason);
5706 
5707 unlock:
5708     hci_dev_unlock(hdev);
5709 }
5710 
5711 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5712                          struct sk_buff *skb)
5713 {
5714     struct hci_ev_disconn_phy_link_complete *ev = data;
5715     struct hci_conn *hcon;
5716 
5717     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5718 
5719     if (ev->status)
5720         return;
5721 
5722     hci_dev_lock(hdev);
5723 
5724     hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5725     if (hcon && hcon->type == AMP_LINK) {
5726         hcon->state = BT_CLOSED;
5727         hci_disconn_cfm(hcon, ev->reason);
5728         hci_conn_del(hcon);
5729     }
5730 
5731     hci_dev_unlock(hdev);
5732 }
5733 #endif
5734 
5735 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5736                 u8 bdaddr_type, bdaddr_t *local_rpa)
5737 {
5738     if (conn->out) {
5739         conn->dst_type = bdaddr_type;
5740         conn->resp_addr_type = bdaddr_type;
5741         bacpy(&conn->resp_addr, bdaddr);
5742 
5743         /* Check if the controller has set a Local RPA then it must be
5744          * used instead or hdev->rpa.
5745          */
5746         if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5747             conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5748             bacpy(&conn->init_addr, local_rpa);
5749         } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5750             conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5751             bacpy(&conn->init_addr, &conn->hdev->rpa);
5752         } else {
5753             hci_copy_identity_address(conn->hdev, &conn->init_addr,
5754                           &conn->init_addr_type);
5755         }
5756     } else {
5757         conn->resp_addr_type = conn->hdev->adv_addr_type;
5758         /* Check if the controller has set a Local RPA then it must be
5759          * used instead or hdev->rpa.
5760          */
5761         if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5762             conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5763             bacpy(&conn->resp_addr, local_rpa);
5764         } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5765             /* In case of ext adv, resp_addr will be updated in
5766              * Adv Terminated event.
5767              */
5768             if (!ext_adv_capable(conn->hdev))
5769                 bacpy(&conn->resp_addr,
5770                       &conn->hdev->random_addr);
5771         } else {
5772             bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5773         }
5774 
5775         conn->init_addr_type = bdaddr_type;
5776         bacpy(&conn->init_addr, bdaddr);
5777 
5778         /* For incoming connections, set the default minimum
5779          * and maximum connection interval. They will be used
5780          * to check if the parameters are in range and if not
5781          * trigger the connection update procedure.
5782          */
5783         conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5784         conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5785     }
5786 }
5787 
5788 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5789                  bdaddr_t *bdaddr, u8 bdaddr_type,
5790                  bdaddr_t *local_rpa, u8 role, u16 handle,
5791                  u16 interval, u16 latency,
5792                  u16 supervision_timeout)
5793 {
5794     struct hci_conn_params *params;
5795     struct hci_conn *conn;
5796     struct smp_irk *irk;
5797     u8 addr_type;
5798 
5799     hci_dev_lock(hdev);
5800 
5801     /* All controllers implicitly stop advertising in the event of a
5802      * connection, so ensure that the state bit is cleared.
5803      */
5804     hci_dev_clear_flag(hdev, HCI_LE_ADV);
5805 
5806     conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5807     if (!conn) {
5808         /* In case of error status and there is no connection pending
5809          * just unlock as there is nothing to cleanup.
5810          */
5811         if (status)
5812             goto unlock;
5813 
5814         conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5815         if (!conn) {
5816             bt_dev_err(hdev, "no memory for new connection");
5817             goto unlock;
5818         }
5819 
5820         conn->dst_type = bdaddr_type;
5821 
5822         /* If we didn't have a hci_conn object previously
5823          * but we're in central role this must be something
5824          * initiated using an accept list. Since accept list based
5825          * connections are not "first class citizens" we don't
5826          * have full tracking of them. Therefore, we go ahead
5827          * with a "best effort" approach of determining the
5828          * initiator address based on the HCI_PRIVACY flag.
5829          */
5830         if (conn->out) {
5831             conn->resp_addr_type = bdaddr_type;
5832             bacpy(&conn->resp_addr, bdaddr);
5833             if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5834                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5835                 bacpy(&conn->init_addr, &hdev->rpa);
5836             } else {
5837                 hci_copy_identity_address(hdev,
5838                               &conn->init_addr,
5839                               &conn->init_addr_type);
5840             }
5841         }
5842     } else {
5843         cancel_delayed_work(&conn->le_conn_timeout);
5844     }
5845 
5846     /* The HCI_LE_Connection_Complete event is only sent once per connection.
5847      * Processing it more than once per connection can corrupt kernel memory.
5848      *
5849      * As the connection handle is set here for the first time, it indicates
5850      * whether the connection is already set up.
5851      */
5852     if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5853         bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5854         goto unlock;
5855     }
5856 
5857     le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5858 
5859     /* Lookup the identity address from the stored connection
5860      * address and address type.
5861      *
5862      * When establishing connections to an identity address, the
5863      * connection procedure will store the resolvable random
5864      * address first. Now if it can be converted back into the
5865      * identity address, start using the identity address from
5866      * now on.
5867      */
5868     irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5869     if (irk) {
5870         bacpy(&conn->dst, &irk->bdaddr);
5871         conn->dst_type = irk->addr_type;
5872     }
5873 
5874     conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5875 
5876     if (handle > HCI_CONN_HANDLE_MAX) {
5877         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5878                HCI_CONN_HANDLE_MAX);
5879         status = HCI_ERROR_INVALID_PARAMETERS;
5880     }
5881 
5882     /* All connection failure handling is taken care of by the
5883      * hci_conn_failed function which is triggered by the HCI
5884      * request completion callbacks used for connecting.
5885      */
5886     if (status)
5887         goto unlock;
5888 
5889     if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5890         addr_type = BDADDR_LE_PUBLIC;
5891     else
5892         addr_type = BDADDR_LE_RANDOM;
5893 
5894     /* Drop the connection if the device is blocked */
5895     if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5896         hci_conn_drop(conn);
5897         goto unlock;
5898     }
5899 
5900     if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5901         mgmt_device_connected(hdev, conn, NULL, 0);
5902 
5903     conn->sec_level = BT_SECURITY_LOW;
5904     conn->handle = handle;
5905     conn->state = BT_CONFIG;
5906 
5907     /* Store current advertising instance as connection advertising instance
5908      * when sotfware rotation is in use so it can be re-enabled when
5909      * disconnected.
5910      */
5911     if (!ext_adv_capable(hdev))
5912         conn->adv_instance = hdev->cur_adv_instance;
5913 
5914     conn->le_conn_interval = interval;
5915     conn->le_conn_latency = latency;
5916     conn->le_supv_timeout = supervision_timeout;
5917 
5918     hci_debugfs_create_conn(conn);
5919     hci_conn_add_sysfs(conn);
5920 
5921     /* The remote features procedure is defined for central
5922      * role only. So only in case of an initiated connection
5923      * request the remote features.
5924      *
5925      * If the local controller supports peripheral-initiated features
5926      * exchange, then requesting the remote features in peripheral
5927      * role is possible. Otherwise just transition into the
5928      * connected state without requesting the remote features.
5929      */
5930     if (conn->out ||
5931         (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5932         struct hci_cp_le_read_remote_features cp;
5933 
5934         cp.handle = __cpu_to_le16(conn->handle);
5935 
5936         hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5937                  sizeof(cp), &cp);
5938 
5939         hci_conn_hold(conn);
5940     } else {
5941         conn->state = BT_CONNECTED;
5942         hci_connect_cfm(conn, status);
5943     }
5944 
5945     params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5946                        conn->dst_type);
5947     if (params) {
5948         list_del_init(&params->action);
5949         if (params->conn) {
5950             hci_conn_drop(params->conn);
5951             hci_conn_put(params->conn);
5952             params->conn = NULL;
5953         }
5954     }
5955 
5956 unlock:
5957     hci_update_passive_scan(hdev);
5958     hci_dev_unlock(hdev);
5959 }
5960 
5961 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5962                      struct sk_buff *skb)
5963 {
5964     struct hci_ev_le_conn_complete *ev = data;
5965 
5966     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5967 
5968     le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5969                  NULL, ev->role, le16_to_cpu(ev->handle),
5970                  le16_to_cpu(ev->interval),
5971                  le16_to_cpu(ev->latency),
5972                  le16_to_cpu(ev->supervision_timeout));
5973 }
5974 
5975 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5976                      struct sk_buff *skb)
5977 {
5978     struct hci_ev_le_enh_conn_complete *ev = data;
5979 
5980     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5981 
5982     le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5983                  &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5984                  le16_to_cpu(ev->interval),
5985                  le16_to_cpu(ev->latency),
5986                  le16_to_cpu(ev->supervision_timeout));
5987 }
5988 
5989 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5990                     struct sk_buff *skb)
5991 {
5992     struct hci_evt_le_ext_adv_set_term *ev = data;
5993     struct hci_conn *conn;
5994     struct adv_info *adv, *n;
5995 
5996     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5997 
5998     /* The Bluetooth Core 5.3 specification clearly states that this event
5999      * shall not be sent when the Host disables the advertising set. So in
6000      * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6001      *
6002      * When the Host disables an advertising set, all cleanup is done via
6003      * its command callback and not needed to be duplicated here.
6004      */
6005     if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6006         bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6007         return;
6008     }
6009 
6010     hci_dev_lock(hdev);
6011 
6012     adv = hci_find_adv_instance(hdev, ev->handle);
6013 
6014     if (ev->status) {
6015         if (!adv)
6016             goto unlock;
6017 
6018         /* Remove advertising as it has been terminated */
6019         hci_remove_adv_instance(hdev, ev->handle);
6020         mgmt_advertising_removed(NULL, hdev, ev->handle);
6021 
6022         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6023             if (adv->enabled)
6024                 goto unlock;
6025         }
6026 
6027         /* We are no longer advertising, clear HCI_LE_ADV */
6028         hci_dev_clear_flag(hdev, HCI_LE_ADV);
6029         goto unlock;
6030     }
6031 
6032     if (adv)
6033         adv->enabled = false;
6034 
6035     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6036     if (conn) {
6037         /* Store handle in the connection so the correct advertising
6038          * instance can be re-enabled when disconnected.
6039          */
6040         conn->adv_instance = ev->handle;
6041 
6042         if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6043             bacmp(&conn->resp_addr, BDADDR_ANY))
6044             goto unlock;
6045 
6046         if (!ev->handle) {
6047             bacpy(&conn->resp_addr, &hdev->random_addr);
6048             goto unlock;
6049         }
6050 
6051         if (adv)
6052             bacpy(&conn->resp_addr, &adv->random_addr);
6053     }
6054 
6055 unlock:
6056     hci_dev_unlock(hdev);
6057 }
6058 
6059 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6060                         struct sk_buff *skb)
6061 {
6062     struct hci_ev_le_conn_update_complete *ev = data;
6063     struct hci_conn *conn;
6064 
6065     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6066 
6067     if (ev->status)
6068         return;
6069 
6070     hci_dev_lock(hdev);
6071 
6072     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6073     if (conn) {
6074         conn->le_conn_interval = le16_to_cpu(ev->interval);
6075         conn->le_conn_latency = le16_to_cpu(ev->latency);
6076         conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6077     }
6078 
6079     hci_dev_unlock(hdev);
6080 }
6081 
6082 /* This function requires the caller holds hdev->lock */
6083 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6084                           bdaddr_t *addr,
6085                           u8 addr_type, bool addr_resolved,
6086                           u8 adv_type)
6087 {
6088     struct hci_conn *conn;
6089     struct hci_conn_params *params;
6090 
6091     /* If the event is not connectable don't proceed further */
6092     if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6093         return NULL;
6094 
6095     /* Ignore if the device is blocked or hdev is suspended */
6096     if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6097         hdev->suspended)
6098         return NULL;
6099 
6100     /* Most controller will fail if we try to create new connections
6101      * while we have an existing one in peripheral role.
6102      */
6103     if (hdev->conn_hash.le_num_peripheral > 0 &&
6104         (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6105          !(hdev->le_states[3] & 0x10)))
6106         return NULL;
6107 
6108     /* If we're not connectable only connect devices that we have in
6109      * our pend_le_conns list.
6110      */
6111     params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6112                        addr_type);
6113     if (!params)
6114         return NULL;
6115 
6116     if (!params->explicit_connect) {
6117         switch (params->auto_connect) {
6118         case HCI_AUTO_CONN_DIRECT:
6119             /* Only devices advertising with ADV_DIRECT_IND are
6120              * triggering a connection attempt. This is allowing
6121              * incoming connections from peripheral devices.
6122              */
6123             if (adv_type != LE_ADV_DIRECT_IND)
6124                 return NULL;
6125             break;
6126         case HCI_AUTO_CONN_ALWAYS:
6127             /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6128              * are triggering a connection attempt. This means
6129              * that incoming connections from peripheral device are
6130              * accepted and also outgoing connections to peripheral
6131              * devices are established when found.
6132              */
6133             break;
6134         default:
6135             return NULL;
6136         }
6137     }
6138 
6139     conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6140                   BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6141                   HCI_ROLE_MASTER);
6142     if (!IS_ERR(conn)) {
6143         /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6144          * by higher layer that tried to connect, if no then
6145          * store the pointer since we don't really have any
6146          * other owner of the object besides the params that
6147          * triggered it. This way we can abort the connection if
6148          * the parameters get removed and keep the reference
6149          * count consistent once the connection is established.
6150          */
6151 
6152         if (!params->explicit_connect)
6153             params->conn = hci_conn_get(conn);
6154 
6155         return conn;
6156     }
6157 
6158     switch (PTR_ERR(conn)) {
6159     case -EBUSY:
6160         /* If hci_connect() returns -EBUSY it means there is already
6161          * an LE connection attempt going on. Since controllers don't
6162          * support more than one connection attempt at the time, we
6163          * don't consider this an error case.
6164          */
6165         break;
6166     default:
6167         BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6168         return NULL;
6169     }
6170 
6171     return NULL;
6172 }
6173 
6174 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6175                    u8 bdaddr_type, bdaddr_t *direct_addr,
6176                    u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6177                    bool ext_adv)
6178 {
6179     struct discovery_state *d = &hdev->discovery;
6180     struct smp_irk *irk;
6181     struct hci_conn *conn;
6182     bool match, bdaddr_resolved;
6183     u32 flags;
6184     u8 *ptr;
6185 
6186     switch (type) {
6187     case LE_ADV_IND:
6188     case LE_ADV_DIRECT_IND:
6189     case LE_ADV_SCAN_IND:
6190     case LE_ADV_NONCONN_IND:
6191     case LE_ADV_SCAN_RSP:
6192         break;
6193     default:
6194         bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6195                        "type: 0x%02x", type);
6196         return;
6197     }
6198 
6199     if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6200         bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6201         return;
6202     }
6203 
6204     /* Find the end of the data in case the report contains padded zero
6205      * bytes at the end causing an invalid length value.
6206      *
6207      * When data is NULL, len is 0 so there is no need for extra ptr
6208      * check as 'ptr < data + 0' is already false in such case.
6209      */
6210     for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6211         if (ptr + 1 + *ptr > data + len)
6212             break;
6213     }
6214 
6215     /* Adjust for actual length. This handles the case when remote
6216      * device is advertising with incorrect data length.
6217      */
6218     len = ptr - data;
6219 
6220     /* If the direct address is present, then this report is from
6221      * a LE Direct Advertising Report event. In that case it is
6222      * important to see if the address is matching the local
6223      * controller address.
6224      */
6225     if (direct_addr) {
6226         direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6227                           &bdaddr_resolved);
6228 
6229         /* Only resolvable random addresses are valid for these
6230          * kind of reports and others can be ignored.
6231          */
6232         if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6233             return;
6234 
6235         /* If the controller is not using resolvable random
6236          * addresses, then this report can be ignored.
6237          */
6238         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6239             return;
6240 
6241         /* If the local IRK of the controller does not match
6242          * with the resolvable random address provided, then
6243          * this report can be ignored.
6244          */
6245         if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6246             return;
6247     }
6248 
6249     /* Check if we need to convert to identity address */
6250     irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6251     if (irk) {
6252         bdaddr = &irk->bdaddr;
6253         bdaddr_type = irk->addr_type;
6254     }
6255 
6256     bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6257 
6258     /* Check if we have been requested to connect to this device.
6259      *
6260      * direct_addr is set only for directed advertising reports (it is NULL
6261      * for advertising reports) and is already verified to be RPA above.
6262      */
6263     conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6264                      type);
6265     if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6266         /* Store report for later inclusion by
6267          * mgmt_device_connected
6268          */
6269         memcpy(conn->le_adv_data, data, len);
6270         conn->le_adv_data_len = len;
6271     }
6272 
6273     /* Passive scanning shouldn't trigger any device found events,
6274      * except for devices marked as CONN_REPORT for which we do send
6275      * device found events, or advertisement monitoring requested.
6276      */
6277     if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6278         if (type == LE_ADV_DIRECT_IND)
6279             return;
6280 
6281         if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6282                            bdaddr, bdaddr_type) &&
6283             idr_is_empty(&hdev->adv_monitors_idr))
6284             return;
6285 
6286         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6287             flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6288         else
6289             flags = 0;
6290         mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6291                   rssi, flags, data, len, NULL, 0);
6292         return;
6293     }
6294 
6295     /* When receiving non-connectable or scannable undirected
6296      * advertising reports, this means that the remote device is
6297      * not connectable and then clearly indicate this in the
6298      * device found event.
6299      *
6300      * When receiving a scan response, then there is no way to
6301      * know if the remote device is connectable or not. However
6302      * since scan responses are merged with a previously seen
6303      * advertising report, the flags field from that report
6304      * will be used.
6305      *
6306      * In the really unlikely case that a controller get confused
6307      * and just sends a scan response event, then it is marked as
6308      * not connectable as well.
6309      */
6310     if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
6311         type == LE_ADV_SCAN_RSP)
6312         flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6313     else
6314         flags = 0;
6315 
6316     /* If there's nothing pending either store the data from this
6317      * event or send an immediate device found event if the data
6318      * should not be stored for later.
6319      */
6320     if (!ext_adv && !has_pending_adv_report(hdev)) {
6321         /* If the report will trigger a SCAN_REQ store it for
6322          * later merging.
6323          */
6324         if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6325             store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6326                          rssi, flags, data, len);
6327             return;
6328         }
6329 
6330         mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6331                   rssi, flags, data, len, NULL, 0);
6332         return;
6333     }
6334 
6335     /* Check if the pending report is for the same device as the new one */
6336     match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6337          bdaddr_type == d->last_adv_addr_type);
6338 
6339     /* If the pending data doesn't match this report or this isn't a
6340      * scan response (e.g. we got a duplicate ADV_IND) then force
6341      * sending of the pending data.
6342      */
6343     if (type != LE_ADV_SCAN_RSP || !match) {
6344         /* Send out whatever is in the cache, but skip duplicates */
6345         if (!match)
6346             mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6347                       d->last_adv_addr_type, NULL,
6348                       d->last_adv_rssi, d->last_adv_flags,
6349                       d->last_adv_data,
6350                       d->last_adv_data_len, NULL, 0);
6351 
6352         /* If the new report will trigger a SCAN_REQ store it for
6353          * later merging.
6354          */
6355         if (!ext_adv && (type == LE_ADV_IND ||
6356                  type == LE_ADV_SCAN_IND)) {
6357             store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6358                          rssi, flags, data, len);
6359             return;
6360         }
6361 
6362         /* The advertising reports cannot be merged, so clear
6363          * the pending report and send out a device found event.
6364          */
6365         clear_pending_adv_report(hdev);
6366         mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6367                   rssi, flags, data, len, NULL, 0);
6368         return;
6369     }
6370 
6371     /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6372      * the new event is a SCAN_RSP. We can therefore proceed with
6373      * sending a merged device found event.
6374      */
6375     mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6376               d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6377               d->last_adv_data, d->last_adv_data_len, data, len);
6378     clear_pending_adv_report(hdev);
6379 }
6380 
6381 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6382                   struct sk_buff *skb)
6383 {
6384     struct hci_ev_le_advertising_report *ev = data;
6385 
6386     if (!ev->num)
6387         return;
6388 
6389     hci_dev_lock(hdev);
6390 
6391     while (ev->num--) {
6392         struct hci_ev_le_advertising_info *info;
6393         s8 rssi;
6394 
6395         info = hci_le_ev_skb_pull(hdev, skb,
6396                       HCI_EV_LE_ADVERTISING_REPORT,
6397                       sizeof(*info));
6398         if (!info)
6399             break;
6400 
6401         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6402                     info->length + 1))
6403             break;
6404 
6405         if (info->length <= HCI_MAX_AD_LENGTH) {
6406             rssi = info->data[info->length];
6407             process_adv_report(hdev, info->type, &info->bdaddr,
6408                        info->bdaddr_type, NULL, 0, rssi,
6409                        info->data, info->length, false);
6410         } else {
6411             bt_dev_err(hdev, "Dropping invalid advertising data");
6412         }
6413     }
6414 
6415     hci_dev_unlock(hdev);
6416 }
6417 
6418 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6419 {
6420     if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6421         switch (evt_type) {
6422         case LE_LEGACY_ADV_IND:
6423             return LE_ADV_IND;
6424         case LE_LEGACY_ADV_DIRECT_IND:
6425             return LE_ADV_DIRECT_IND;
6426         case LE_LEGACY_ADV_SCAN_IND:
6427             return LE_ADV_SCAN_IND;
6428         case LE_LEGACY_NONCONN_IND:
6429             return LE_ADV_NONCONN_IND;
6430         case LE_LEGACY_SCAN_RSP_ADV:
6431         case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6432             return LE_ADV_SCAN_RSP;
6433         }
6434 
6435         goto invalid;
6436     }
6437 
6438     if (evt_type & LE_EXT_ADV_CONN_IND) {
6439         if (evt_type & LE_EXT_ADV_DIRECT_IND)
6440             return LE_ADV_DIRECT_IND;
6441 
6442         return LE_ADV_IND;
6443     }
6444 
6445     if (evt_type & LE_EXT_ADV_SCAN_RSP)
6446         return LE_ADV_SCAN_RSP;
6447 
6448     if (evt_type & LE_EXT_ADV_SCAN_IND)
6449         return LE_ADV_SCAN_IND;
6450 
6451     if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6452         evt_type & LE_EXT_ADV_DIRECT_IND)
6453         return LE_ADV_NONCONN_IND;
6454 
6455 invalid:
6456     bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6457                    evt_type);
6458 
6459     return LE_ADV_INVALID;
6460 }
6461 
6462 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6463                       struct sk_buff *skb)
6464 {
6465     struct hci_ev_le_ext_adv_report *ev = data;
6466 
6467     if (!ev->num)
6468         return;
6469 
6470     hci_dev_lock(hdev);
6471 
6472     while (ev->num--) {
6473         struct hci_ev_le_ext_adv_info *info;
6474         u8 legacy_evt_type;
6475         u16 evt_type;
6476 
6477         info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6478                       sizeof(*info));
6479         if (!info)
6480             break;
6481 
6482         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6483                     info->length))
6484             break;
6485 
6486         evt_type = __le16_to_cpu(info->type);
6487         legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6488         if (legacy_evt_type != LE_ADV_INVALID) {
6489             process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6490                        info->bdaddr_type, NULL, 0,
6491                        info->rssi, info->data, info->length,
6492                        !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6493         }
6494     }
6495 
6496     hci_dev_unlock(hdev);
6497 }
6498 
6499 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6500 {
6501     struct hci_cp_le_pa_term_sync cp;
6502 
6503     memset(&cp, 0, sizeof(cp));
6504     cp.handle = handle;
6505 
6506     return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6507 }
6508 
6509 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6510                         struct sk_buff *skb)
6511 {
6512     struct hci_ev_le_pa_sync_established *ev = data;
6513     int mask = hdev->link_mode;
6514     __u8 flags = 0;
6515 
6516     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6517 
6518     if (ev->status)
6519         return;
6520 
6521     hci_dev_lock(hdev);
6522 
6523     hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6524 
6525     mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6526     if (!(mask & HCI_LM_ACCEPT))
6527         hci_le_pa_term_sync(hdev, ev->handle);
6528 
6529     hci_dev_unlock(hdev);
6530 }
6531 
6532 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6533                         struct sk_buff *skb)
6534 {
6535     struct hci_ev_le_remote_feat_complete *ev = data;
6536     struct hci_conn *conn;
6537 
6538     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6539 
6540     hci_dev_lock(hdev);
6541 
6542     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6543     if (conn) {
6544         if (!ev->status)
6545             memcpy(conn->features[0], ev->features, 8);
6546 
6547         if (conn->state == BT_CONFIG) {
6548             __u8 status;
6549 
6550             /* If the local controller supports peripheral-initiated
6551              * features exchange, but the remote controller does
6552              * not, then it is possible that the error code 0x1a
6553              * for unsupported remote feature gets returned.
6554              *
6555              * In this specific case, allow the connection to
6556              * transition into connected state and mark it as
6557              * successful.
6558              */
6559             if (!conn->out && ev->status == 0x1a &&
6560                 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6561                 status = 0x00;
6562             else
6563                 status = ev->status;
6564 
6565             conn->state = BT_CONNECTED;
6566             hci_connect_cfm(conn, status);
6567             hci_conn_drop(conn);
6568         }
6569     }
6570 
6571     hci_dev_unlock(hdev);
6572 }
6573 
6574 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6575                    struct sk_buff *skb)
6576 {
6577     struct hci_ev_le_ltk_req *ev = data;
6578     struct hci_cp_le_ltk_reply cp;
6579     struct hci_cp_le_ltk_neg_reply neg;
6580     struct hci_conn *conn;
6581     struct smp_ltk *ltk;
6582 
6583     bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6584 
6585     hci_dev_lock(hdev);
6586 
6587     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6588     if (conn == NULL)
6589         goto not_found;
6590 
6591     ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6592     if (!ltk)
6593         goto not_found;
6594 
6595     if (smp_ltk_is_sc(ltk)) {
6596         /* With SC both EDiv and Rand are set to zero */
6597         if (ev->ediv || ev->rand)
6598             goto not_found;
6599     } else {
6600         /* For non-SC keys check that EDiv and Rand match */
6601         if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6602             goto not_found;
6603     }
6604 
6605     memcpy(cp.ltk, ltk->val, ltk->enc_size);
6606     memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6607     cp.handle = cpu_to_le16(conn->handle);
6608 
6609     conn->pending_sec_level = smp_ltk_sec_level(ltk);
6610 
6611     conn->enc_key_size = ltk->enc_size;
6612 
6613     hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6614 
6615     /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6616      * temporary key used to encrypt a connection following
6617      * pairing. It is used during the Encrypted Session Setup to
6618      * distribute the keys. Later, security can be re-established
6619      * using a distributed LTK.
6620      */
6621     if (ltk->type == SMP_STK) {
6622         set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6623         list_del_rcu(&ltk->list);
6624         kfree_rcu(ltk, rcu);
6625     } else {
6626         clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6627     }
6628 
6629     hci_dev_unlock(hdev);
6630 
6631     return;
6632 
6633 not_found:
6634     neg.handle = ev->handle;
6635     hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6636     hci_dev_unlock(hdev);
6637 }
6638 
6639 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6640                       u8 reason)
6641 {
6642     struct hci_cp_le_conn_param_req_neg_reply cp;
6643 
6644     cp.handle = cpu_to_le16(handle);
6645     cp.reason = reason;
6646 
6647     hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6648              &cp);
6649 }
6650 
6651 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6652                          struct sk_buff *skb)
6653 {
6654     struct hci_ev_le_remote_conn_param_req *ev = data;
6655     struct hci_cp_le_conn_param_req_reply cp;
6656     struct hci_conn *hcon;
6657     u16 handle, min, max, latency, timeout;
6658 
6659     bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6660 
6661     handle = le16_to_cpu(ev->handle);
6662     min = le16_to_cpu(ev->interval_min);
6663     max = le16_to_cpu(ev->interval_max);
6664     latency = le16_to_cpu(ev->latency);
6665     timeout = le16_to_cpu(ev->timeout);
6666 
6667     hcon = hci_conn_hash_lookup_handle(hdev, handle);
6668     if (!hcon || hcon->state != BT_CONNECTED)
6669         return send_conn_param_neg_reply(hdev, handle,
6670                          HCI_ERROR_UNKNOWN_CONN_ID);
6671 
6672     if (hci_check_conn_params(min, max, latency, timeout))
6673         return send_conn_param_neg_reply(hdev, handle,
6674                          HCI_ERROR_INVALID_LL_PARAMS);
6675 
6676     if (hcon->role == HCI_ROLE_MASTER) {
6677         struct hci_conn_params *params;
6678         u8 store_hint;
6679 
6680         hci_dev_lock(hdev);
6681 
6682         params = hci_conn_params_lookup(hdev, &hcon->dst,
6683                         hcon->dst_type);
6684         if (params) {
6685             params->conn_min_interval = min;
6686             params->conn_max_interval = max;
6687             params->conn_latency = latency;
6688             params->supervision_timeout = timeout;
6689             store_hint = 0x01;
6690         } else {
6691             store_hint = 0x00;
6692         }
6693 
6694         hci_dev_unlock(hdev);
6695 
6696         mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6697                     store_hint, min, max, latency, timeout);
6698     }
6699 
6700     cp.handle = ev->handle;
6701     cp.interval_min = ev->interval_min;
6702     cp.interval_max = ev->interval_max;
6703     cp.latency = ev->latency;
6704     cp.timeout = ev->timeout;
6705     cp.min_ce_len = 0;
6706     cp.max_ce_len = 0;
6707 
6708     hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6709 }
6710 
6711 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6712                      struct sk_buff *skb)
6713 {
6714     struct hci_ev_le_direct_adv_report *ev = data;
6715     int i;
6716 
6717     if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6718                 flex_array_size(ev, info, ev->num)))
6719         return;
6720 
6721     if (!ev->num)
6722         return;
6723 
6724     hci_dev_lock(hdev);
6725 
6726     for (i = 0; i < ev->num; i++) {
6727         struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6728 
6729         process_adv_report(hdev, info->type, &info->bdaddr,
6730                    info->bdaddr_type, &info->direct_addr,
6731                    info->direct_addr_type, info->rssi, NULL, 0,
6732                    false);
6733     }
6734 
6735     hci_dev_unlock(hdev);
6736 }
6737 
6738 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6739                   struct sk_buff *skb)
6740 {
6741     struct hci_ev_le_phy_update_complete *ev = data;
6742     struct hci_conn *conn;
6743 
6744     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6745 
6746     if (ev->status)
6747         return;
6748 
6749     hci_dev_lock(hdev);
6750 
6751     conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6752     if (!conn)
6753         goto unlock;
6754 
6755     conn->le_tx_phy = ev->tx_phy;
6756     conn->le_rx_phy = ev->rx_phy;
6757 
6758 unlock:
6759     hci_dev_unlock(hdev);
6760 }
6761 
6762 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6763                     struct sk_buff *skb)
6764 {
6765     struct hci_evt_le_cis_established *ev = data;
6766     struct hci_conn *conn;
6767     u16 handle = __le16_to_cpu(ev->handle);
6768 
6769     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6770 
6771     hci_dev_lock(hdev);
6772 
6773     conn = hci_conn_hash_lookup_handle(hdev, handle);
6774     if (!conn) {
6775         bt_dev_err(hdev,
6776                "Unable to find connection with handle 0x%4.4x",
6777                handle);
6778         goto unlock;
6779     }
6780 
6781     if (conn->role == HCI_ROLE_SLAVE) {
6782         __le32 interval;
6783 
6784         memset(&interval, 0, sizeof(interval));
6785 
6786         memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6787         conn->iso_qos.in.interval = le32_to_cpu(interval);
6788         memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6789         conn->iso_qos.out.interval = le32_to_cpu(interval);
6790         conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6791         conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6792         conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6793         conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6794         conn->iso_qos.in.phy = ev->c_phy;
6795         conn->iso_qos.out.phy = ev->p_phy;
6796     }
6797 
6798     if (!ev->status) {
6799         conn->state = BT_CONNECTED;
6800         hci_debugfs_create_conn(conn);
6801         hci_conn_add_sysfs(conn);
6802         hci_iso_setup_path(conn);
6803         goto unlock;
6804     }
6805 
6806     hci_connect_cfm(conn, ev->status);
6807     hci_conn_del(conn);
6808 
6809 unlock:
6810     hci_dev_unlock(hdev);
6811 }
6812 
6813 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6814 {
6815     struct hci_cp_le_reject_cis cp;
6816 
6817     memset(&cp, 0, sizeof(cp));
6818     cp.handle = handle;
6819     cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6820     hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6821 }
6822 
6823 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6824 {
6825     struct hci_cp_le_accept_cis cp;
6826 
6827     memset(&cp, 0, sizeof(cp));
6828     cp.handle = handle;
6829     hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6830 }
6831 
6832 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6833                    struct sk_buff *skb)
6834 {
6835     struct hci_evt_le_cis_req *ev = data;
6836     u16 acl_handle, cis_handle;
6837     struct hci_conn *acl, *cis;
6838     int mask;
6839     __u8 flags = 0;
6840 
6841     acl_handle = __le16_to_cpu(ev->acl_handle);
6842     cis_handle = __le16_to_cpu(ev->cis_handle);
6843 
6844     bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6845            acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6846 
6847     hci_dev_lock(hdev);
6848 
6849     acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6850     if (!acl)
6851         goto unlock;
6852 
6853     mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6854     if (!(mask & HCI_LM_ACCEPT)) {
6855         hci_le_reject_cis(hdev, ev->cis_handle);
6856         goto unlock;
6857     }
6858 
6859     cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6860     if (!cis) {
6861         cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6862         if (!cis) {
6863             hci_le_reject_cis(hdev, ev->cis_handle);
6864             goto unlock;
6865         }
6866         cis->handle = cis_handle;
6867     }
6868 
6869     cis->iso_qos.cig = ev->cig_id;
6870     cis->iso_qos.cis = ev->cis_id;
6871 
6872     if (!(flags & HCI_PROTO_DEFER)) {
6873         hci_le_accept_cis(hdev, ev->cis_handle);
6874     } else {
6875         cis->state = BT_CONNECT2;
6876         hci_connect_cfm(cis, 0);
6877     }
6878 
6879 unlock:
6880     hci_dev_unlock(hdev);
6881 }
6882 
6883 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6884                        struct sk_buff *skb)
6885 {
6886     struct hci_evt_le_create_big_complete *ev = data;
6887     struct hci_conn *conn;
6888 
6889     BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6890 
6891     if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6892                 flex_array_size(ev, bis_handle, ev->num_bis)))
6893         return;
6894 
6895     hci_dev_lock(hdev);
6896 
6897     conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6898     if (!conn)
6899         goto unlock;
6900 
6901     if (ev->num_bis)
6902         conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6903 
6904     if (!ev->status) {
6905         conn->state = BT_CONNECTED;
6906         hci_debugfs_create_conn(conn);
6907         hci_conn_add_sysfs(conn);
6908         hci_iso_setup_path(conn);
6909         goto unlock;
6910     }
6911 
6912     hci_connect_cfm(conn, ev->status);
6913     hci_conn_del(conn);
6914 
6915 unlock:
6916     hci_dev_unlock(hdev);
6917 }
6918 
6919 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6920                         struct sk_buff *skb)
6921 {
6922     struct hci_evt_le_big_sync_estabilished *ev = data;
6923     struct hci_conn *bis;
6924     int i;
6925 
6926     bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6927 
6928     if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6929                 flex_array_size(ev, bis, ev->num_bis)))
6930         return;
6931 
6932     if (ev->status)
6933         return;
6934 
6935     hci_dev_lock(hdev);
6936 
6937     for (i = 0; i < ev->num_bis; i++) {
6938         u16 handle = le16_to_cpu(ev->bis[i]);
6939         __le32 interval;
6940 
6941         bis = hci_conn_hash_lookup_handle(hdev, handle);
6942         if (!bis) {
6943             bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6944                        HCI_ROLE_SLAVE);
6945             if (!bis)
6946                 continue;
6947             bis->handle = handle;
6948         }
6949 
6950         bis->iso_qos.big = ev->handle;
6951         memset(&interval, 0, sizeof(interval));
6952         memcpy(&interval, ev->latency, sizeof(ev->latency));
6953         bis->iso_qos.in.interval = le32_to_cpu(interval);
6954         /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6955         bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6956         bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
6957 
6958         hci_connect_cfm(bis, ev->status);
6959     }
6960 
6961     hci_dev_unlock(hdev);
6962 }
6963 
6964 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6965                        struct sk_buff *skb)
6966 {
6967     struct hci_evt_le_big_info_adv_report *ev = data;
6968     int mask = hdev->link_mode;
6969     __u8 flags = 0;
6970 
6971     bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6972 
6973     hci_dev_lock(hdev);
6974 
6975     mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6976     if (!(mask & HCI_LM_ACCEPT))
6977         hci_le_pa_term_sync(hdev, ev->sync_handle);
6978 
6979     hci_dev_unlock(hdev);
6980 }
6981 
6982 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
6983 [_op] = { \
6984     .func = _func, \
6985     .min_len = _min_len, \
6986     .max_len = _max_len, \
6987 }
6988 
6989 #define HCI_LE_EV(_op, _func, _len) \
6990     HCI_LE_EV_VL(_op, _func, _len, _len)
6991 
6992 #define HCI_LE_EV_STATUS(_op, _func) \
6993     HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
6994 
6995 /* Entries in this table shall have their position according to the subevent
6996  * opcode they handle so the use of the macros above is recommend since it does
6997  * attempt to initialize at its proper index using Designated Initializers that
6998  * way events without a callback function can be ommited.
6999  */
7000 static const struct hci_le_ev {
7001     void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7002     u16  min_len;
7003     u16  max_len;
7004 } hci_le_ev_table[U8_MAX + 1] = {
7005     /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7006     HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7007           sizeof(struct hci_ev_le_conn_complete)),
7008     /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7009     HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7010              sizeof(struct hci_ev_le_advertising_report),
7011              HCI_MAX_EVENT_SIZE),
7012     /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7013     HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7014           hci_le_conn_update_complete_evt,
7015           sizeof(struct hci_ev_le_conn_update_complete)),
7016     /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7017     HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7018           hci_le_remote_feat_complete_evt,
7019           sizeof(struct hci_ev_le_remote_feat_complete)),
7020     /* [0x05 = HCI_EV_LE_LTK_REQ] */
7021     HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7022           sizeof(struct hci_ev_le_ltk_req)),
7023     /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7024     HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7025           hci_le_remote_conn_param_req_evt,
7026           sizeof(struct hci_ev_le_remote_conn_param_req)),
7027     /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7028     HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7029           hci_le_enh_conn_complete_evt,
7030           sizeof(struct hci_ev_le_enh_conn_complete)),
7031     /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7032     HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7033              sizeof(struct hci_ev_le_direct_adv_report),
7034              HCI_MAX_EVENT_SIZE),
7035     /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7036     HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7037           sizeof(struct hci_ev_le_phy_update_complete)),
7038     /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7039     HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7040              sizeof(struct hci_ev_le_ext_adv_report),
7041              HCI_MAX_EVENT_SIZE),
7042     /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7043     HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7044           hci_le_pa_sync_estabilished_evt,
7045           sizeof(struct hci_ev_le_pa_sync_established)),
7046     /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7047     HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7048           sizeof(struct hci_evt_le_ext_adv_set_term)),
7049     /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7050     HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7051           sizeof(struct hci_evt_le_cis_established)),
7052     /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7053     HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7054           sizeof(struct hci_evt_le_cis_req)),
7055     /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7056     HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7057              hci_le_create_big_complete_evt,
7058              sizeof(struct hci_evt_le_create_big_complete),
7059              HCI_MAX_EVENT_SIZE),
7060     /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7061     HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7062              hci_le_big_sync_established_evt,
7063              sizeof(struct hci_evt_le_big_sync_estabilished),
7064              HCI_MAX_EVENT_SIZE),
7065     /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7066     HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7067              hci_le_big_info_adv_report_evt,
7068              sizeof(struct hci_evt_le_big_info_adv_report),
7069              HCI_MAX_EVENT_SIZE),
7070 };
7071 
7072 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7073                 struct sk_buff *skb, u16 *opcode, u8 *status,
7074                 hci_req_complete_t *req_complete,
7075                 hci_req_complete_skb_t *req_complete_skb)
7076 {
7077     struct hci_ev_le_meta *ev = data;
7078     const struct hci_le_ev *subev;
7079 
7080     bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7081 
7082     /* Only match event if command OGF is for LE */
7083     if (hdev->sent_cmd &&
7084         hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7085         hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7086         *opcode = hci_skb_opcode(hdev->sent_cmd);
7087         hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7088                      req_complete_skb);
7089     }
7090 
7091     subev = &hci_le_ev_table[ev->subevent];
7092     if (!subev->func)
7093         return;
7094 
7095     if (skb->len < subev->min_len) {
7096         bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7097                ev->subevent, skb->len, subev->min_len);
7098         return;
7099     }
7100 
7101     /* Just warn if the length is over max_len size it still be
7102      * possible to partially parse the event so leave to callback to
7103      * decide if that is acceptable.
7104      */
7105     if (skb->len > subev->max_len)
7106         bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7107                 ev->subevent, skb->len, subev->max_len);
7108     data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7109     if (!data)
7110         return;
7111 
7112     subev->func(hdev, data, skb);
7113 }
7114 
7115 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7116                  u8 event, struct sk_buff *skb)
7117 {
7118     struct hci_ev_cmd_complete *ev;
7119     struct hci_event_hdr *hdr;
7120 
7121     if (!skb)
7122         return false;
7123 
7124     hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7125     if (!hdr)
7126         return false;
7127 
7128     if (event) {
7129         if (hdr->evt != event)
7130             return false;
7131         return true;
7132     }
7133 
7134     /* Check if request ended in Command Status - no way to retrieve
7135      * any extra parameters in this case.
7136      */
7137     if (hdr->evt == HCI_EV_CMD_STATUS)
7138         return false;
7139 
7140     if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7141         bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7142                hdr->evt);
7143         return false;
7144     }
7145 
7146     ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7147     if (!ev)
7148         return false;
7149 
7150     if (opcode != __le16_to_cpu(ev->opcode)) {
7151         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7152                __le16_to_cpu(ev->opcode));
7153         return false;
7154     }
7155 
7156     return true;
7157 }
7158 
7159 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7160                   struct sk_buff *skb)
7161 {
7162     struct hci_ev_le_advertising_info *adv;
7163     struct hci_ev_le_direct_adv_info *direct_adv;
7164     struct hci_ev_le_ext_adv_info *ext_adv;
7165     const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7166     const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7167 
7168     hci_dev_lock(hdev);
7169 
7170     /* If we are currently suspended and this is the first BT event seen,
7171      * save the wake reason associated with the event.
7172      */
7173     if (!hdev->suspended || hdev->wake_reason)
7174         goto unlock;
7175 
7176     /* Default to remote wake. Values for wake_reason are documented in the
7177      * Bluez mgmt api docs.
7178      */
7179     hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7180 
7181     /* Once configured for remote wakeup, we should only wake up for
7182      * reconnections. It's useful to see which device is waking us up so
7183      * keep track of the bdaddr of the connection event that woke us up.
7184      */
7185     if (event == HCI_EV_CONN_REQUEST) {
7186         bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7187         hdev->wake_addr_type = BDADDR_BREDR;
7188     } else if (event == HCI_EV_CONN_COMPLETE) {
7189         bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7190         hdev->wake_addr_type = BDADDR_BREDR;
7191     } else if (event == HCI_EV_LE_META) {
7192         struct hci_ev_le_meta *le_ev = (void *)skb->data;
7193         u8 subevent = le_ev->subevent;
7194         u8 *ptr = &skb->data[sizeof(*le_ev)];
7195         u8 num_reports = *ptr;
7196 
7197         if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7198              subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7199              subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7200             num_reports) {
7201             adv = (void *)(ptr + 1);
7202             direct_adv = (void *)(ptr + 1);
7203             ext_adv = (void *)(ptr + 1);
7204 
7205             switch (subevent) {
7206             case HCI_EV_LE_ADVERTISING_REPORT:
7207                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7208                 hdev->wake_addr_type = adv->bdaddr_type;
7209                 break;
7210             case HCI_EV_LE_DIRECT_ADV_REPORT:
7211                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7212                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7213                 break;
7214             case HCI_EV_LE_EXT_ADV_REPORT:
7215                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7216                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7217                 break;
7218             }
7219         }
7220     } else {
7221         hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7222     }
7223 
7224 unlock:
7225     hci_dev_unlock(hdev);
7226 }
7227 
7228 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7229 [_op] = { \
7230     .req = false, \
7231     .func = _func, \
7232     .min_len = _min_len, \
7233     .max_len = _max_len, \
7234 }
7235 
7236 #define HCI_EV(_op, _func, _len) \
7237     HCI_EV_VL(_op, _func, _len, _len)
7238 
7239 #define HCI_EV_STATUS(_op, _func) \
7240     HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7241 
7242 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7243 [_op] = { \
7244     .req = true, \
7245     .func_req = _func, \
7246     .min_len = _min_len, \
7247     .max_len = _max_len, \
7248 }
7249 
7250 #define HCI_EV_REQ(_op, _func, _len) \
7251     HCI_EV_REQ_VL(_op, _func, _len, _len)
7252 
7253 /* Entries in this table shall have their position according to the event opcode
7254  * they handle so the use of the macros above is recommend since it does attempt
7255  * to initialize at its proper index using Designated Initializers that way
7256  * events without a callback function don't have entered.
7257  */
7258 static const struct hci_ev {
7259     bool req;
7260     union {
7261         void (*func)(struct hci_dev *hdev, void *data,
7262                  struct sk_buff *skb);
7263         void (*func_req)(struct hci_dev *hdev, void *data,
7264                  struct sk_buff *skb, u16 *opcode, u8 *status,
7265                  hci_req_complete_t *req_complete,
7266                  hci_req_complete_skb_t *req_complete_skb);
7267     };
7268     u16  min_len;
7269     u16  max_len;
7270 } hci_ev_table[U8_MAX + 1] = {
7271     /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7272     HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7273     /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7274     HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7275           sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7276     /* [0x03 = HCI_EV_CONN_COMPLETE] */
7277     HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7278            sizeof(struct hci_ev_conn_complete)),
7279     /* [0x04 = HCI_EV_CONN_REQUEST] */
7280     HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7281            sizeof(struct hci_ev_conn_request)),
7282     /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7283     HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7284            sizeof(struct hci_ev_disconn_complete)),
7285     /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7286     HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7287            sizeof(struct hci_ev_auth_complete)),
7288     /* [0x07 = HCI_EV_REMOTE_NAME] */
7289     HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7290            sizeof(struct hci_ev_remote_name)),
7291     /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7292     HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7293            sizeof(struct hci_ev_encrypt_change)),
7294     /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7295     HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7296            hci_change_link_key_complete_evt,
7297            sizeof(struct hci_ev_change_link_key_complete)),
7298     /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7299     HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7300            sizeof(struct hci_ev_remote_features)),
7301     /* [0x0e = HCI_EV_CMD_COMPLETE] */
7302     HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7303               sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7304     /* [0x0f = HCI_EV_CMD_STATUS] */
7305     HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7306            sizeof(struct hci_ev_cmd_status)),
7307     /* [0x10 = HCI_EV_CMD_STATUS] */
7308     HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7309            sizeof(struct hci_ev_hardware_error)),
7310     /* [0x12 = HCI_EV_ROLE_CHANGE] */
7311     HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7312            sizeof(struct hci_ev_role_change)),
7313     /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7314     HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7315           sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7316     /* [0x14 = HCI_EV_MODE_CHANGE] */
7317     HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7318            sizeof(struct hci_ev_mode_change)),
7319     /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7320     HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7321            sizeof(struct hci_ev_pin_code_req)),
7322     /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7323     HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7324            sizeof(struct hci_ev_link_key_req)),
7325     /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7326     HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7327            sizeof(struct hci_ev_link_key_notify)),
7328     /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7329     HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7330            sizeof(struct hci_ev_clock_offset)),
7331     /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7332     HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7333            sizeof(struct hci_ev_pkt_type_change)),
7334     /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7335     HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7336            sizeof(struct hci_ev_pscan_rep_mode)),
7337     /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7338     HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7339           hci_inquiry_result_with_rssi_evt,
7340           sizeof(struct hci_ev_inquiry_result_rssi),
7341           HCI_MAX_EVENT_SIZE),
7342     /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7343     HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7344            sizeof(struct hci_ev_remote_ext_features)),
7345     /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7346     HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7347            sizeof(struct hci_ev_sync_conn_complete)),
7348     /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7349     HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7350           hci_extended_inquiry_result_evt,
7351           sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7352     /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7353     HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7354            sizeof(struct hci_ev_key_refresh_complete)),
7355     /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7356     HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7357            sizeof(struct hci_ev_io_capa_request)),
7358     /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7359     HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7360            sizeof(struct hci_ev_io_capa_reply)),
7361     /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7362     HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7363            sizeof(struct hci_ev_user_confirm_req)),
7364     /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7365     HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7366            sizeof(struct hci_ev_user_passkey_req)),
7367     /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7368     HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7369            sizeof(struct hci_ev_remote_oob_data_request)),
7370     /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7371     HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7372            sizeof(struct hci_ev_simple_pair_complete)),
7373     /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7374     HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7375            sizeof(struct hci_ev_user_passkey_notify)),
7376     /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7377     HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7378            sizeof(struct hci_ev_keypress_notify)),
7379     /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7380     HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7381            sizeof(struct hci_ev_remote_host_features)),
7382     /* [0x3e = HCI_EV_LE_META] */
7383     HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7384               sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7385 #if IS_ENABLED(CONFIG_BT_HS)
7386     /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7387     HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7388            sizeof(struct hci_ev_phy_link_complete)),
7389     /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7390     HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7391            sizeof(struct hci_ev_channel_selected)),
7392     /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7393     HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7394            hci_disconn_loglink_complete_evt,
7395            sizeof(struct hci_ev_disconn_logical_link_complete)),
7396     /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7397     HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7398            sizeof(struct hci_ev_logical_link_complete)),
7399     /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7400     HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7401            hci_disconn_phylink_complete_evt,
7402            sizeof(struct hci_ev_disconn_phy_link_complete)),
7403 #endif
7404     /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7405     HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7406            sizeof(struct hci_ev_num_comp_blocks)),
7407     /* [0xff = HCI_EV_VENDOR] */
7408     HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7409 };
7410 
7411 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7412                u16 *opcode, u8 *status,
7413                hci_req_complete_t *req_complete,
7414                hci_req_complete_skb_t *req_complete_skb)
7415 {
7416     const struct hci_ev *ev = &hci_ev_table[event];
7417     void *data;
7418 
7419     if (!ev->func)
7420         return;
7421 
7422     if (skb->len < ev->min_len) {
7423         bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7424                event, skb->len, ev->min_len);
7425         return;
7426     }
7427 
7428     /* Just warn if the length is over max_len size it still be
7429      * possible to partially parse the event so leave to callback to
7430      * decide if that is acceptable.
7431      */
7432     if (skb->len > ev->max_len)
7433         bt_dev_warn_ratelimited(hdev,
7434                     "unexpected event 0x%2.2x length: %u > %u",
7435                     event, skb->len, ev->max_len);
7436 
7437     data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7438     if (!data)
7439         return;
7440 
7441     if (ev->req)
7442         ev->func_req(hdev, data, skb, opcode, status, req_complete,
7443                  req_complete_skb);
7444     else
7445         ev->func(hdev, data, skb);
7446 }
7447 
7448 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7449 {
7450     struct hci_event_hdr *hdr = (void *) skb->data;
7451     hci_req_complete_t req_complete = NULL;
7452     hci_req_complete_skb_t req_complete_skb = NULL;
7453     struct sk_buff *orig_skb = NULL;
7454     u8 status = 0, event, req_evt = 0;
7455     u16 opcode = HCI_OP_NOP;
7456 
7457     if (skb->len < sizeof(*hdr)) {
7458         bt_dev_err(hdev, "Malformed HCI Event");
7459         goto done;
7460     }
7461 
7462     kfree_skb(hdev->recv_event);
7463     hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7464 
7465     event = hdr->evt;
7466     if (!event) {
7467         bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7468                 event);
7469         goto done;
7470     }
7471 
7472     /* Only match event if command OGF is not for LE */
7473     if (hdev->sent_cmd &&
7474         hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7475         hci_skb_event(hdev->sent_cmd) == event) {
7476         hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7477                      status, &req_complete, &req_complete_skb);
7478         req_evt = event;
7479     }
7480 
7481     /* If it looks like we might end up having to call
7482      * req_complete_skb, store a pristine copy of the skb since the
7483      * various handlers may modify the original one through
7484      * skb_pull() calls, etc.
7485      */
7486     if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7487         event == HCI_EV_CMD_COMPLETE)
7488         orig_skb = skb_clone(skb, GFP_KERNEL);
7489 
7490     skb_pull(skb, HCI_EVENT_HDR_SIZE);
7491 
7492     /* Store wake reason if we're suspended */
7493     hci_store_wake_reason(hdev, event, skb);
7494 
7495     bt_dev_dbg(hdev, "event 0x%2.2x", event);
7496 
7497     hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7498                &req_complete_skb);
7499 
7500     if (req_complete) {
7501         req_complete(hdev, status, opcode);
7502     } else if (req_complete_skb) {
7503         if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7504             kfree_skb(orig_skb);
7505             orig_skb = NULL;
7506         }
7507         req_complete_skb(hdev, status, opcode, orig_skb);
7508     }
7509 
7510 done:
7511     kfree_skb(orig_skb);
7512     kfree_skb(skb);
7513     hdev->stat.evt_rx++;
7514 }