Back to home page

OSCL-LXR

 
 

    


0001 /*
0002    BlueZ - Bluetooth protocol stack for Linux
0003    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
0004 
0005    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
0006 
0007    This program is free software; you can redistribute it and/or modify
0008    it under the terms of the GNU General Public License version 2 as
0009    published by the Free Software Foundation;
0010 
0011    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
0012    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0013    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
0014    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
0015    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
0016    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0017    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0018    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0019 
0020    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
0021    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
0022    SOFTWARE IS DISCLAIMED.
0023 */
0024 
0025 /* Bluetooth HCI connection handling. */
0026 
0027 #include <linux/export.h>
0028 #include <linux/debugfs.h>
0029 
0030 #include <net/bluetooth/bluetooth.h>
0031 #include <net/bluetooth/hci_core.h>
0032 #include <net/bluetooth/l2cap.h>
0033 #include <net/bluetooth/iso.h>
0034 #include <net/bluetooth/mgmt.h>
0035 
0036 #include "hci_request.h"
0037 #include "smp.h"
0038 #include "a2mp.h"
0039 #include "eir.h"
0040 
0041 struct sco_param {
0042     u16 pkt_type;
0043     u16 max_latency;
0044     u8  retrans_effort;
0045 };
0046 
0047 static const struct sco_param esco_param_cvsd[] = {
0048     { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
0049     { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
0050     { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
0051     { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
0052     { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
0053 };
0054 
0055 static const struct sco_param sco_param_cvsd[] = {
0056     { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
0057     { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
0058 };
0059 
0060 static const struct sco_param esco_param_msbc[] = {
0061     { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
0062     { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
0063 };
0064 
0065 /* This function requires the caller holds hdev->lock */
0066 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
0067 {
0068     struct hci_conn_params *params;
0069     struct hci_dev *hdev = conn->hdev;
0070     struct smp_irk *irk;
0071     bdaddr_t *bdaddr;
0072     u8 bdaddr_type;
0073 
0074     bdaddr = &conn->dst;
0075     bdaddr_type = conn->dst_type;
0076 
0077     /* Check if we need to convert to identity address */
0078     irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
0079     if (irk) {
0080         bdaddr = &irk->bdaddr;
0081         bdaddr_type = irk->addr_type;
0082     }
0083 
0084     params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
0085                        bdaddr_type);
0086     if (!params || !params->explicit_connect)
0087         return;
0088 
0089     /* The connection attempt was doing scan for new RPA, and is
0090      * in scan phase. If params are not associated with any other
0091      * autoconnect action, remove them completely. If they are, just unmark
0092      * them as waiting for connection, by clearing explicit_connect field.
0093      */
0094     params->explicit_connect = false;
0095 
0096     list_del_init(&params->action);
0097 
0098     switch (params->auto_connect) {
0099     case HCI_AUTO_CONN_EXPLICIT:
0100         hci_conn_params_del(hdev, bdaddr, bdaddr_type);
0101         /* return instead of break to avoid duplicate scan update */
0102         return;
0103     case HCI_AUTO_CONN_DIRECT:
0104     case HCI_AUTO_CONN_ALWAYS:
0105         list_add(&params->action, &hdev->pend_le_conns);
0106         break;
0107     case HCI_AUTO_CONN_REPORT:
0108         list_add(&params->action, &hdev->pend_le_reports);
0109         break;
0110     default:
0111         break;
0112     }
0113 
0114     hci_update_passive_scan(hdev);
0115 }
0116 
0117 static void hci_conn_cleanup(struct hci_conn *conn)
0118 {
0119     struct hci_dev *hdev = conn->hdev;
0120 
0121     if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
0122         hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
0123 
0124     if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
0125         hci_remove_link_key(hdev, &conn->dst);
0126 
0127     hci_chan_list_flush(conn);
0128 
0129     hci_conn_hash_del(hdev, conn);
0130 
0131     if (conn->cleanup)
0132         conn->cleanup(conn);
0133 
0134     if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
0135         switch (conn->setting & SCO_AIRMODE_MASK) {
0136         case SCO_AIRMODE_CVSD:
0137         case SCO_AIRMODE_TRANSP:
0138             if (hdev->notify)
0139                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
0140             break;
0141         }
0142     } else {
0143         if (hdev->notify)
0144             hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
0145     }
0146 
0147     hci_conn_del_sysfs(conn);
0148 
0149     debugfs_remove_recursive(conn->debugfs);
0150 
0151     hci_dev_put(hdev);
0152 
0153     hci_conn_put(conn);
0154 }
0155 
0156 static void le_scan_cleanup(struct work_struct *work)
0157 {
0158     struct hci_conn *conn = container_of(work, struct hci_conn,
0159                          le_scan_cleanup);
0160     struct hci_dev *hdev = conn->hdev;
0161     struct hci_conn *c = NULL;
0162 
0163     BT_DBG("%s hcon %p", hdev->name, conn);
0164 
0165     hci_dev_lock(hdev);
0166 
0167     /* Check that the hci_conn is still around */
0168     rcu_read_lock();
0169     list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
0170         if (c == conn)
0171             break;
0172     }
0173     rcu_read_unlock();
0174 
0175     if (c == conn) {
0176         hci_connect_le_scan_cleanup(conn);
0177         hci_conn_cleanup(conn);
0178     }
0179 
0180     hci_dev_unlock(hdev);
0181     hci_dev_put(hdev);
0182     hci_conn_put(conn);
0183 }
0184 
0185 static void hci_connect_le_scan_remove(struct hci_conn *conn)
0186 {
0187     BT_DBG("%s hcon %p", conn->hdev->name, conn);
0188 
0189     /* We can't call hci_conn_del/hci_conn_cleanup here since that
0190      * could deadlock with another hci_conn_del() call that's holding
0191      * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
0192      * Instead, grab temporary extra references to the hci_dev and
0193      * hci_conn and perform the necessary cleanup in a separate work
0194      * callback.
0195      */
0196 
0197     hci_dev_hold(conn->hdev);
0198     hci_conn_get(conn);
0199 
0200     /* Even though we hold a reference to the hdev, many other
0201      * things might get cleaned up meanwhile, including the hdev's
0202      * own workqueue, so we can't use that for scheduling.
0203      */
0204     schedule_work(&conn->le_scan_cleanup);
0205 }
0206 
0207 static void hci_acl_create_connection(struct hci_conn *conn)
0208 {
0209     struct hci_dev *hdev = conn->hdev;
0210     struct inquiry_entry *ie;
0211     struct hci_cp_create_conn cp;
0212 
0213     BT_DBG("hcon %p", conn);
0214 
0215     /* Many controllers disallow HCI Create Connection while it is doing
0216      * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
0217      * Connection. This may cause the MGMT discovering state to become false
0218      * without user space's request but it is okay since the MGMT Discovery
0219      * APIs do not promise that discovery should be done forever. Instead,
0220      * the user space monitors the status of MGMT discovering and it may
0221      * request for discovery again when this flag becomes false.
0222      */
0223     if (test_bit(HCI_INQUIRY, &hdev->flags)) {
0224         /* Put this connection to "pending" state so that it will be
0225          * executed after the inquiry cancel command complete event.
0226          */
0227         conn->state = BT_CONNECT2;
0228         hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
0229         return;
0230     }
0231 
0232     conn->state = BT_CONNECT;
0233     conn->out = true;
0234     conn->role = HCI_ROLE_MASTER;
0235 
0236     conn->attempt++;
0237 
0238     conn->link_policy = hdev->link_policy;
0239 
0240     memset(&cp, 0, sizeof(cp));
0241     bacpy(&cp.bdaddr, &conn->dst);
0242     cp.pscan_rep_mode = 0x02;
0243 
0244     ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
0245     if (ie) {
0246         if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
0247             cp.pscan_rep_mode = ie->data.pscan_rep_mode;
0248             cp.pscan_mode     = ie->data.pscan_mode;
0249             cp.clock_offset   = ie->data.clock_offset |
0250                         cpu_to_le16(0x8000);
0251         }
0252 
0253         memcpy(conn->dev_class, ie->data.dev_class, 3);
0254     }
0255 
0256     cp.pkt_type = cpu_to_le16(conn->pkt_type);
0257     if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
0258         cp.role_switch = 0x01;
0259     else
0260         cp.role_switch = 0x00;
0261 
0262     hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
0263 }
0264 
0265 int hci_disconnect(struct hci_conn *conn, __u8 reason)
0266 {
0267     BT_DBG("hcon %p", conn);
0268 
0269     /* When we are central of an established connection and it enters
0270      * the disconnect timeout, then go ahead and try to read the
0271      * current clock offset.  Processing of the result is done
0272      * within the event handling and hci_clock_offset_evt function.
0273      */
0274     if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
0275         (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
0276         struct hci_dev *hdev = conn->hdev;
0277         struct hci_cp_read_clock_offset clkoff_cp;
0278 
0279         clkoff_cp.handle = cpu_to_le16(conn->handle);
0280         hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
0281                  &clkoff_cp);
0282     }
0283 
0284     return hci_abort_conn(conn, reason);
0285 }
0286 
0287 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
0288 {
0289     struct hci_dev *hdev = conn->hdev;
0290     struct hci_cp_add_sco cp;
0291 
0292     BT_DBG("hcon %p", conn);
0293 
0294     conn->state = BT_CONNECT;
0295     conn->out = true;
0296 
0297     conn->attempt++;
0298 
0299     cp.handle   = cpu_to_le16(handle);
0300     cp.pkt_type = cpu_to_le16(conn->pkt_type);
0301 
0302     hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
0303 }
0304 
0305 static bool find_next_esco_param(struct hci_conn *conn,
0306                  const struct sco_param *esco_param, int size)
0307 {
0308     for (; conn->attempt <= size; conn->attempt++) {
0309         if (lmp_esco_2m_capable(conn->link) ||
0310             (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
0311             break;
0312         BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
0313                conn, conn->attempt);
0314     }
0315 
0316     return conn->attempt <= size;
0317 }
0318 
0319 static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
0320 {
0321     struct hci_dev *hdev = conn->hdev;
0322     struct hci_cp_enhanced_setup_sync_conn cp;
0323     const struct sco_param *param;
0324 
0325     bt_dev_dbg(hdev, "hcon %p", conn);
0326 
0327     /* for offload use case, codec needs to configured before opening SCO */
0328     if (conn->codec.data_path)
0329         hci_req_configure_datapath(hdev, &conn->codec);
0330 
0331     conn->state = BT_CONNECT;
0332     conn->out = true;
0333 
0334     conn->attempt++;
0335 
0336     memset(&cp, 0x00, sizeof(cp));
0337 
0338     cp.handle   = cpu_to_le16(handle);
0339 
0340     cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
0341     cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
0342 
0343     switch (conn->codec.id) {
0344     case BT_CODEC_MSBC:
0345         if (!find_next_esco_param(conn, esco_param_msbc,
0346                       ARRAY_SIZE(esco_param_msbc)))
0347             return false;
0348 
0349         param = &esco_param_msbc[conn->attempt - 1];
0350         cp.tx_coding_format.id = 0x05;
0351         cp.rx_coding_format.id = 0x05;
0352         cp.tx_codec_frame_size = __cpu_to_le16(60);
0353         cp.rx_codec_frame_size = __cpu_to_le16(60);
0354         cp.in_bandwidth = __cpu_to_le32(32000);
0355         cp.out_bandwidth = __cpu_to_le32(32000);
0356         cp.in_coding_format.id = 0x04;
0357         cp.out_coding_format.id = 0x04;
0358         cp.in_coded_data_size = __cpu_to_le16(16);
0359         cp.out_coded_data_size = __cpu_to_le16(16);
0360         cp.in_pcm_data_format = 2;
0361         cp.out_pcm_data_format = 2;
0362         cp.in_pcm_sample_payload_msb_pos = 0;
0363         cp.out_pcm_sample_payload_msb_pos = 0;
0364         cp.in_data_path = conn->codec.data_path;
0365         cp.out_data_path = conn->codec.data_path;
0366         cp.in_transport_unit_size = 1;
0367         cp.out_transport_unit_size = 1;
0368         break;
0369 
0370     case BT_CODEC_TRANSPARENT:
0371         if (!find_next_esco_param(conn, esco_param_msbc,
0372                       ARRAY_SIZE(esco_param_msbc)))
0373             return false;
0374         param = &esco_param_msbc[conn->attempt - 1];
0375         cp.tx_coding_format.id = 0x03;
0376         cp.rx_coding_format.id = 0x03;
0377         cp.tx_codec_frame_size = __cpu_to_le16(60);
0378         cp.rx_codec_frame_size = __cpu_to_le16(60);
0379         cp.in_bandwidth = __cpu_to_le32(0x1f40);
0380         cp.out_bandwidth = __cpu_to_le32(0x1f40);
0381         cp.in_coding_format.id = 0x03;
0382         cp.out_coding_format.id = 0x03;
0383         cp.in_coded_data_size = __cpu_to_le16(16);
0384         cp.out_coded_data_size = __cpu_to_le16(16);
0385         cp.in_pcm_data_format = 2;
0386         cp.out_pcm_data_format = 2;
0387         cp.in_pcm_sample_payload_msb_pos = 0;
0388         cp.out_pcm_sample_payload_msb_pos = 0;
0389         cp.in_data_path = conn->codec.data_path;
0390         cp.out_data_path = conn->codec.data_path;
0391         cp.in_transport_unit_size = 1;
0392         cp.out_transport_unit_size = 1;
0393         break;
0394 
0395     case BT_CODEC_CVSD:
0396         if (lmp_esco_capable(conn->link)) {
0397             if (!find_next_esco_param(conn, esco_param_cvsd,
0398                           ARRAY_SIZE(esco_param_cvsd)))
0399                 return false;
0400             param = &esco_param_cvsd[conn->attempt - 1];
0401         } else {
0402             if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
0403                 return false;
0404             param = &sco_param_cvsd[conn->attempt - 1];
0405         }
0406         cp.tx_coding_format.id = 2;
0407         cp.rx_coding_format.id = 2;
0408         cp.tx_codec_frame_size = __cpu_to_le16(60);
0409         cp.rx_codec_frame_size = __cpu_to_le16(60);
0410         cp.in_bandwidth = __cpu_to_le32(16000);
0411         cp.out_bandwidth = __cpu_to_le32(16000);
0412         cp.in_coding_format.id = 4;
0413         cp.out_coding_format.id = 4;
0414         cp.in_coded_data_size = __cpu_to_le16(16);
0415         cp.out_coded_data_size = __cpu_to_le16(16);
0416         cp.in_pcm_data_format = 2;
0417         cp.out_pcm_data_format = 2;
0418         cp.in_pcm_sample_payload_msb_pos = 0;
0419         cp.out_pcm_sample_payload_msb_pos = 0;
0420         cp.in_data_path = conn->codec.data_path;
0421         cp.out_data_path = conn->codec.data_path;
0422         cp.in_transport_unit_size = 16;
0423         cp.out_transport_unit_size = 16;
0424         break;
0425     default:
0426         return false;
0427     }
0428 
0429     cp.retrans_effort = param->retrans_effort;
0430     cp.pkt_type = __cpu_to_le16(param->pkt_type);
0431     cp.max_latency = __cpu_to_le16(param->max_latency);
0432 
0433     if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
0434         return false;
0435 
0436     return true;
0437 }
0438 
0439 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
0440 {
0441     struct hci_dev *hdev = conn->hdev;
0442     struct hci_cp_setup_sync_conn cp;
0443     const struct sco_param *param;
0444 
0445     bt_dev_dbg(hdev, "hcon %p", conn);
0446 
0447     conn->state = BT_CONNECT;
0448     conn->out = true;
0449 
0450     conn->attempt++;
0451 
0452     cp.handle   = cpu_to_le16(handle);
0453 
0454     cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
0455     cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
0456     cp.voice_setting  = cpu_to_le16(conn->setting);
0457 
0458     switch (conn->setting & SCO_AIRMODE_MASK) {
0459     case SCO_AIRMODE_TRANSP:
0460         if (!find_next_esco_param(conn, esco_param_msbc,
0461                       ARRAY_SIZE(esco_param_msbc)))
0462             return false;
0463         param = &esco_param_msbc[conn->attempt - 1];
0464         break;
0465     case SCO_AIRMODE_CVSD:
0466         if (lmp_esco_capable(conn->link)) {
0467             if (!find_next_esco_param(conn, esco_param_cvsd,
0468                           ARRAY_SIZE(esco_param_cvsd)))
0469                 return false;
0470             param = &esco_param_cvsd[conn->attempt - 1];
0471         } else {
0472             if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
0473                 return false;
0474             param = &sco_param_cvsd[conn->attempt - 1];
0475         }
0476         break;
0477     default:
0478         return false;
0479     }
0480 
0481     cp.retrans_effort = param->retrans_effort;
0482     cp.pkt_type = __cpu_to_le16(param->pkt_type);
0483     cp.max_latency = __cpu_to_le16(param->max_latency);
0484 
0485     if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
0486         return false;
0487 
0488     return true;
0489 }
0490 
0491 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
0492 {
0493     if (enhanced_sync_conn_capable(conn->hdev))
0494         return hci_enhanced_setup_sync_conn(conn, handle);
0495 
0496     return hci_setup_sync_conn(conn, handle);
0497 }
0498 
0499 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
0500               u16 to_multiplier)
0501 {
0502     struct hci_dev *hdev = conn->hdev;
0503     struct hci_conn_params *params;
0504     struct hci_cp_le_conn_update cp;
0505 
0506     hci_dev_lock(hdev);
0507 
0508     params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
0509     if (params) {
0510         params->conn_min_interval = min;
0511         params->conn_max_interval = max;
0512         params->conn_latency = latency;
0513         params->supervision_timeout = to_multiplier;
0514     }
0515 
0516     hci_dev_unlock(hdev);
0517 
0518     memset(&cp, 0, sizeof(cp));
0519     cp.handle       = cpu_to_le16(conn->handle);
0520     cp.conn_interval_min    = cpu_to_le16(min);
0521     cp.conn_interval_max    = cpu_to_le16(max);
0522     cp.conn_latency     = cpu_to_le16(latency);
0523     cp.supervision_timeout  = cpu_to_le16(to_multiplier);
0524     cp.min_ce_len       = cpu_to_le16(0x0000);
0525     cp.max_ce_len       = cpu_to_le16(0x0000);
0526 
0527     hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
0528 
0529     if (params)
0530         return 0x01;
0531 
0532     return 0x00;
0533 }
0534 
0535 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
0536               __u8 ltk[16], __u8 key_size)
0537 {
0538     struct hci_dev *hdev = conn->hdev;
0539     struct hci_cp_le_start_enc cp;
0540 
0541     BT_DBG("hcon %p", conn);
0542 
0543     memset(&cp, 0, sizeof(cp));
0544 
0545     cp.handle = cpu_to_le16(conn->handle);
0546     cp.rand = rand;
0547     cp.ediv = ediv;
0548     memcpy(cp.ltk, ltk, key_size);
0549 
0550     hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
0551 }
0552 
0553 /* Device _must_ be locked */
0554 void hci_sco_setup(struct hci_conn *conn, __u8 status)
0555 {
0556     struct hci_conn *sco = conn->link;
0557 
0558     if (!sco)
0559         return;
0560 
0561     BT_DBG("hcon %p", conn);
0562 
0563     if (!status) {
0564         if (lmp_esco_capable(conn->hdev))
0565             hci_setup_sync(sco, conn->handle);
0566         else
0567             hci_add_sco(sco, conn->handle);
0568     } else {
0569         hci_connect_cfm(sco, status);
0570         hci_conn_del(sco);
0571     }
0572 }
0573 
0574 static void hci_conn_timeout(struct work_struct *work)
0575 {
0576     struct hci_conn *conn = container_of(work, struct hci_conn,
0577                          disc_work.work);
0578     int refcnt = atomic_read(&conn->refcnt);
0579 
0580     BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
0581 
0582     WARN_ON(refcnt < 0);
0583 
0584     /* FIXME: It was observed that in pairing failed scenario, refcnt
0585      * drops below 0. Probably this is because l2cap_conn_del calls
0586      * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
0587      * dropped. After that loop hci_chan_del is called which also drops
0588      * conn. For now make sure that ACL is alive if refcnt is higher then 0,
0589      * otherwise drop it.
0590      */
0591     if (refcnt > 0)
0592         return;
0593 
0594     /* LE connections in scanning state need special handling */
0595     if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
0596         test_bit(HCI_CONN_SCANNING, &conn->flags)) {
0597         hci_connect_le_scan_remove(conn);
0598         return;
0599     }
0600 
0601     hci_abort_conn(conn, hci_proto_disconn_ind(conn));
0602 }
0603 
0604 /* Enter sniff mode */
0605 static void hci_conn_idle(struct work_struct *work)
0606 {
0607     struct hci_conn *conn = container_of(work, struct hci_conn,
0608                          idle_work.work);
0609     struct hci_dev *hdev = conn->hdev;
0610 
0611     BT_DBG("hcon %p mode %d", conn, conn->mode);
0612 
0613     if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
0614         return;
0615 
0616     if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
0617         return;
0618 
0619     if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
0620         struct hci_cp_sniff_subrate cp;
0621         cp.handle             = cpu_to_le16(conn->handle);
0622         cp.max_latency        = cpu_to_le16(0);
0623         cp.min_remote_timeout = cpu_to_le16(0);
0624         cp.min_local_timeout  = cpu_to_le16(0);
0625         hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
0626     }
0627 
0628     if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
0629         struct hci_cp_sniff_mode cp;
0630         cp.handle       = cpu_to_le16(conn->handle);
0631         cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
0632         cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
0633         cp.attempt      = cpu_to_le16(4);
0634         cp.timeout      = cpu_to_le16(1);
0635         hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
0636     }
0637 }
0638 
0639 static void hci_conn_auto_accept(struct work_struct *work)
0640 {
0641     struct hci_conn *conn = container_of(work, struct hci_conn,
0642                          auto_accept_work.work);
0643 
0644     hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
0645              &conn->dst);
0646 }
0647 
0648 static void le_disable_advertising(struct hci_dev *hdev)
0649 {
0650     if (ext_adv_capable(hdev)) {
0651         struct hci_cp_le_set_ext_adv_enable cp;
0652 
0653         cp.enable = 0x00;
0654         cp.num_of_sets = 0x00;
0655 
0656         hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
0657                  &cp);
0658     } else {
0659         u8 enable = 0x00;
0660         hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
0661                  &enable);
0662     }
0663 }
0664 
0665 static void le_conn_timeout(struct work_struct *work)
0666 {
0667     struct hci_conn *conn = container_of(work, struct hci_conn,
0668                          le_conn_timeout.work);
0669     struct hci_dev *hdev = conn->hdev;
0670 
0671     BT_DBG("");
0672 
0673     /* We could end up here due to having done directed advertising,
0674      * so clean up the state if necessary. This should however only
0675      * happen with broken hardware or if low duty cycle was used
0676      * (which doesn't have a timeout of its own).
0677      */
0678     if (conn->role == HCI_ROLE_SLAVE) {
0679         /* Disable LE Advertising */
0680         le_disable_advertising(hdev);
0681         hci_dev_lock(hdev);
0682         hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
0683         hci_dev_unlock(hdev);
0684         return;
0685     }
0686 
0687     hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
0688 }
0689 
0690 struct iso_list_data {
0691     union {
0692         u8  cig;
0693         u8  big;
0694     };
0695     union {
0696         u8  cis;
0697         u8  bis;
0698         u16 sync_handle;
0699     };
0700     int count;
0701     struct {
0702         struct hci_cp_le_set_cig_params cp;
0703         struct hci_cis_params cis[0x11];
0704     } pdu;
0705 };
0706 
0707 static void bis_list(struct hci_conn *conn, void *data)
0708 {
0709     struct iso_list_data *d = data;
0710 
0711     /* Skip if not broadcast/ANY address */
0712     if (bacmp(&conn->dst, BDADDR_ANY))
0713         return;
0714 
0715     if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
0716         d->bis != conn->iso_qos.bis)
0717         return;
0718 
0719     d->count++;
0720 }
0721 
0722 static void find_bis(struct hci_conn *conn, void *data)
0723 {
0724     struct iso_list_data *d = data;
0725 
0726     /* Ignore unicast */
0727     if (bacmp(&conn->dst, BDADDR_ANY))
0728         return;
0729 
0730     d->count++;
0731 }
0732 
0733 static int terminate_big_sync(struct hci_dev *hdev, void *data)
0734 {
0735     struct iso_list_data *d = data;
0736 
0737     bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
0738 
0739     hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
0740 
0741     /* Check if ISO connection is a BIS and terminate BIG if there are
0742      * no other connections using it.
0743      */
0744     hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
0745     if (d->count)
0746         return 0;
0747 
0748     return hci_le_terminate_big_sync(hdev, d->big,
0749                      HCI_ERROR_LOCAL_HOST_TERM);
0750 }
0751 
0752 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
0753 {
0754     kfree(data);
0755 }
0756 
0757 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
0758 {
0759     struct iso_list_data *d;
0760 
0761     bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
0762 
0763     d = kmalloc(sizeof(*d), GFP_KERNEL);
0764     if (!d)
0765         return -ENOMEM;
0766 
0767     memset(d, 0, sizeof(*d));
0768     d->big = big;
0769     d->bis = bis;
0770 
0771     return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
0772                   terminate_big_destroy);
0773 }
0774 
0775 static int big_terminate_sync(struct hci_dev *hdev, void *data)
0776 {
0777     struct iso_list_data *d = data;
0778 
0779     bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
0780            d->sync_handle);
0781 
0782     /* Check if ISO connection is a BIS and terminate BIG if there are
0783      * no other connections using it.
0784      */
0785     hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
0786     if (d->count)
0787         return 0;
0788 
0789     hci_le_big_terminate_sync(hdev, d->big);
0790 
0791     return hci_le_pa_terminate_sync(hdev, d->sync_handle);
0792 }
0793 
0794 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
0795 {
0796     struct iso_list_data *d;
0797 
0798     bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
0799 
0800     d = kmalloc(sizeof(*d), GFP_KERNEL);
0801     if (!d)
0802         return -ENOMEM;
0803 
0804     memset(d, 0, sizeof(*d));
0805     d->big = big;
0806     d->sync_handle = sync_handle;
0807 
0808     return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
0809                   terminate_big_destroy);
0810 }
0811 
0812 /* Cleanup BIS connection
0813  *
0814  * Detects if there any BIS left connected in a BIG
0815  * broadcaster: Remove advertising instance and terminate BIG.
0816  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
0817  */
0818 static void bis_cleanup(struct hci_conn *conn)
0819 {
0820     struct hci_dev *hdev = conn->hdev;
0821 
0822     bt_dev_dbg(hdev, "conn %p", conn);
0823 
0824     if (conn->role == HCI_ROLE_MASTER) {
0825         if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
0826             return;
0827 
0828         hci_le_terminate_big(hdev, conn->iso_qos.big,
0829                      conn->iso_qos.bis);
0830     } else {
0831         hci_le_big_terminate(hdev, conn->iso_qos.big,
0832                      conn->sync_handle);
0833     }
0834 }
0835 
0836 static int remove_cig_sync(struct hci_dev *hdev, void *data)
0837 {
0838     u8 handle = PTR_ERR(data);
0839 
0840     return hci_le_remove_cig_sync(hdev, handle);
0841 }
0842 
0843 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
0844 {
0845     bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
0846 
0847     return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
0848 }
0849 
0850 static void find_cis(struct hci_conn *conn, void *data)
0851 {
0852     struct iso_list_data *d = data;
0853 
0854     /* Ignore broadcast */
0855     if (!bacmp(&conn->dst, BDADDR_ANY))
0856         return;
0857 
0858     d->count++;
0859 }
0860 
0861 /* Cleanup CIS connection:
0862  *
0863  * Detects if there any CIS left connected in a CIG and remove it.
0864  */
0865 static void cis_cleanup(struct hci_conn *conn)
0866 {
0867     struct hci_dev *hdev = conn->hdev;
0868     struct iso_list_data d;
0869 
0870     memset(&d, 0, sizeof(d));
0871     d.cig = conn->iso_qos.cig;
0872 
0873     /* Check if ISO connection is a CIS and remove CIG if there are
0874      * no other connections using it.
0875      */
0876     hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
0877     if (d.count)
0878         return;
0879 
0880     hci_le_remove_cig(hdev, conn->iso_qos.cig);
0881 }
0882 
0883 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
0884                   u8 role)
0885 {
0886     struct hci_conn *conn;
0887 
0888     BT_DBG("%s dst %pMR", hdev->name, dst);
0889 
0890     conn = kzalloc(sizeof(*conn), GFP_KERNEL);
0891     if (!conn)
0892         return NULL;
0893 
0894     bacpy(&conn->dst, dst);
0895     bacpy(&conn->src, &hdev->bdaddr);
0896     conn->handle = HCI_CONN_HANDLE_UNSET;
0897     conn->hdev  = hdev;
0898     conn->type  = type;
0899     conn->role  = role;
0900     conn->mode  = HCI_CM_ACTIVE;
0901     conn->state = BT_OPEN;
0902     conn->auth_type = HCI_AT_GENERAL_BONDING;
0903     conn->io_capability = hdev->io_capability;
0904     conn->remote_auth = 0xff;
0905     conn->key_type = 0xff;
0906     conn->rssi = HCI_RSSI_INVALID;
0907     conn->tx_power = HCI_TX_POWER_INVALID;
0908     conn->max_tx_power = HCI_TX_POWER_INVALID;
0909 
0910     set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
0911     conn->disc_timeout = HCI_DISCONN_TIMEOUT;
0912 
0913     /* Set Default Authenticated payload timeout to 30s */
0914     conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
0915 
0916     if (conn->role == HCI_ROLE_MASTER)
0917         conn->out = true;
0918 
0919     switch (type) {
0920     case ACL_LINK:
0921         conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
0922         break;
0923     case LE_LINK:
0924         /* conn->src should reflect the local identity address */
0925         hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
0926         break;
0927     case ISO_LINK:
0928         /* conn->src should reflect the local identity address */
0929         hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
0930 
0931         /* set proper cleanup function */
0932         if (!bacmp(dst, BDADDR_ANY))
0933             conn->cleanup = bis_cleanup;
0934         else if (conn->role == HCI_ROLE_MASTER)
0935             conn->cleanup = cis_cleanup;
0936 
0937         break;
0938     case SCO_LINK:
0939         if (lmp_esco_capable(hdev))
0940             conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
0941                     (hdev->esco_type & EDR_ESCO_MASK);
0942         else
0943             conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
0944         break;
0945     case ESCO_LINK:
0946         conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
0947         break;
0948     }
0949 
0950     skb_queue_head_init(&conn->data_q);
0951 
0952     INIT_LIST_HEAD(&conn->chan_list);
0953 
0954     INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
0955     INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
0956     INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
0957     INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
0958     INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
0959 
0960     atomic_set(&conn->refcnt, 0);
0961 
0962     hci_dev_hold(hdev);
0963 
0964     hci_conn_hash_add(hdev, conn);
0965 
0966     /* The SCO and eSCO connections will only be notified when their
0967      * setup has been completed. This is different to ACL links which
0968      * can be notified right away.
0969      */
0970     if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
0971         if (hdev->notify)
0972             hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
0973     }
0974 
0975     hci_conn_init_sysfs(conn);
0976 
0977     return conn;
0978 }
0979 
0980 int hci_conn_del(struct hci_conn *conn)
0981 {
0982     struct hci_dev *hdev = conn->hdev;
0983 
0984     BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
0985 
0986     cancel_delayed_work_sync(&conn->disc_work);
0987     cancel_delayed_work_sync(&conn->auto_accept_work);
0988     cancel_delayed_work_sync(&conn->idle_work);
0989 
0990     if (conn->type == ACL_LINK) {
0991         struct hci_conn *sco = conn->link;
0992         if (sco)
0993             sco->link = NULL;
0994 
0995         /* Unacked frames */
0996         hdev->acl_cnt += conn->sent;
0997     } else if (conn->type == LE_LINK) {
0998         cancel_delayed_work(&conn->le_conn_timeout);
0999 
1000         if (hdev->le_pkts)
1001             hdev->le_cnt += conn->sent;
1002         else
1003             hdev->acl_cnt += conn->sent;
1004     } else {
1005         struct hci_conn *acl = conn->link;
1006         if (acl) {
1007             acl->link = NULL;
1008             hci_conn_drop(acl);
1009         }
1010     }
1011 
1012     if (conn->amp_mgr)
1013         amp_mgr_put(conn->amp_mgr);
1014 
1015     skb_queue_purge(&conn->data_q);
1016 
1017     /* Remove the connection from the list and cleanup its remaining
1018      * state. This is a separate function since for some cases like
1019      * BT_CONNECT_SCAN we *only* want the cleanup part without the
1020      * rest of hci_conn_del.
1021      */
1022     hci_conn_cleanup(conn);
1023 
1024     return 0;
1025 }
1026 
1027 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1028 {
1029     int use_src = bacmp(src, BDADDR_ANY);
1030     struct hci_dev *hdev = NULL, *d;
1031 
1032     BT_DBG("%pMR -> %pMR", src, dst);
1033 
1034     read_lock(&hci_dev_list_lock);
1035 
1036     list_for_each_entry(d, &hci_dev_list, list) {
1037         if (!test_bit(HCI_UP, &d->flags) ||
1038             hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1039             d->dev_type != HCI_PRIMARY)
1040             continue;
1041 
1042         /* Simple routing:
1043          *   No source address - find interface with bdaddr != dst
1044          *   Source address    - find interface with bdaddr == src
1045          */
1046 
1047         if (use_src) {
1048             bdaddr_t id_addr;
1049             u8 id_addr_type;
1050 
1051             if (src_type == BDADDR_BREDR) {
1052                 if (!lmp_bredr_capable(d))
1053                     continue;
1054                 bacpy(&id_addr, &d->bdaddr);
1055                 id_addr_type = BDADDR_BREDR;
1056             } else {
1057                 if (!lmp_le_capable(d))
1058                     continue;
1059 
1060                 hci_copy_identity_address(d, &id_addr,
1061                               &id_addr_type);
1062 
1063                 /* Convert from HCI to three-value type */
1064                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1065                     id_addr_type = BDADDR_LE_PUBLIC;
1066                 else
1067                     id_addr_type = BDADDR_LE_RANDOM;
1068             }
1069 
1070             if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1071                 hdev = d; break;
1072             }
1073         } else {
1074             if (bacmp(&d->bdaddr, dst)) {
1075                 hdev = d; break;
1076             }
1077         }
1078     }
1079 
1080     if (hdev)
1081         hdev = hci_dev_hold(hdev);
1082 
1083     read_unlock(&hci_dev_list_lock);
1084     return hdev;
1085 }
1086 EXPORT_SYMBOL(hci_get_route);
1087 
1088 /* This function requires the caller holds hdev->lock */
1089 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1090 {
1091     struct hci_dev *hdev = conn->hdev;
1092     struct hci_conn_params *params;
1093 
1094     params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
1095                        conn->dst_type);
1096     if (params && params->conn) {
1097         hci_conn_drop(params->conn);
1098         hci_conn_put(params->conn);
1099         params->conn = NULL;
1100     }
1101 
1102     /* If the status indicates successful cancellation of
1103      * the attempt (i.e. Unknown Connection Id) there's no point of
1104      * notifying failure since we'll go back to keep trying to
1105      * connect. The only exception is explicit connect requests
1106      * where a timeout + cancel does indicate an actual failure.
1107      */
1108     if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
1109         (params && params->explicit_connect))
1110         mgmt_connect_failed(hdev, &conn->dst, conn->type,
1111                     conn->dst_type, status);
1112 
1113     /* Since we may have temporarily stopped the background scanning in
1114      * favor of connection establishment, we should restart it.
1115      */
1116     hci_update_passive_scan(hdev);
1117 
1118     /* Enable advertising in case this was a failed connection
1119      * attempt as a peripheral.
1120      */
1121     hci_enable_advertising(hdev);
1122 }
1123 
1124 /* This function requires the caller holds hdev->lock */
1125 void hci_conn_failed(struct hci_conn *conn, u8 status)
1126 {
1127     struct hci_dev *hdev = conn->hdev;
1128 
1129     bt_dev_dbg(hdev, "status 0x%2.2x", status);
1130 
1131     switch (conn->type) {
1132     case LE_LINK:
1133         hci_le_conn_failed(conn, status);
1134         break;
1135     case ACL_LINK:
1136         mgmt_connect_failed(hdev, &conn->dst, conn->type,
1137                     conn->dst_type, status);
1138         break;
1139     }
1140 
1141     conn->state = BT_CLOSED;
1142     hci_connect_cfm(conn, status);
1143     hci_conn_del(conn);
1144 }
1145 
1146 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1147 {
1148     struct hci_conn *conn = data;
1149 
1150     hci_dev_lock(hdev);
1151 
1152     if (!err) {
1153         hci_connect_le_scan_cleanup(conn);
1154         goto done;
1155     }
1156 
1157     bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
1158 
1159     /* Check if connection is still pending */
1160     if (conn != hci_lookup_le_connect(hdev))
1161         goto done;
1162 
1163     hci_conn_failed(conn, bt_status(err));
1164 
1165 done:
1166     hci_dev_unlock(hdev);
1167 }
1168 
1169 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1170 {
1171     struct hci_conn *conn = data;
1172 
1173     bt_dev_dbg(hdev, "conn %p", conn);
1174 
1175     return hci_le_create_conn_sync(hdev, conn);
1176 }
1177 
1178 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1179                 u8 dst_type, bool dst_resolved, u8 sec_level,
1180                 u16 conn_timeout, u8 role)
1181 {
1182     struct hci_conn *conn;
1183     struct smp_irk *irk;
1184     int err;
1185 
1186     /* Let's make sure that le is enabled.*/
1187     if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1188         if (lmp_le_capable(hdev))
1189             return ERR_PTR(-ECONNREFUSED);
1190 
1191         return ERR_PTR(-EOPNOTSUPP);
1192     }
1193 
1194     /* Since the controller supports only one LE connection attempt at a
1195      * time, we return -EBUSY if there is any connection attempt running.
1196      */
1197     if (hci_lookup_le_connect(hdev))
1198         return ERR_PTR(-EBUSY);
1199 
1200     /* If there's already a connection object but it's not in
1201      * scanning state it means it must already be established, in
1202      * which case we can't do anything else except report a failure
1203      * to connect.
1204      */
1205     conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1206     if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1207         return ERR_PTR(-EBUSY);
1208     }
1209 
1210     /* Check if the destination address has been resolved by the controller
1211      * since if it did then the identity address shall be used.
1212      */
1213     if (!dst_resolved) {
1214         /* When given an identity address with existing identity
1215          * resolving key, the connection needs to be established
1216          * to a resolvable random address.
1217          *
1218          * Storing the resolvable random address is required here
1219          * to handle connection failures. The address will later
1220          * be resolved back into the original identity address
1221          * from the connect request.
1222          */
1223         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1224         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1225             dst = &irk->rpa;
1226             dst_type = ADDR_LE_DEV_RANDOM;
1227         }
1228     }
1229 
1230     if (conn) {
1231         bacpy(&conn->dst, dst);
1232     } else {
1233         conn = hci_conn_add(hdev, LE_LINK, dst, role);
1234         if (!conn)
1235             return ERR_PTR(-ENOMEM);
1236         hci_conn_hold(conn);
1237         conn->pending_sec_level = sec_level;
1238     }
1239 
1240     conn->dst_type = dst_type;
1241     conn->sec_level = BT_SECURITY_LOW;
1242     conn->conn_timeout = conn_timeout;
1243 
1244     conn->state = BT_CONNECT;
1245     clear_bit(HCI_CONN_SCANNING, &conn->flags);
1246 
1247     err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1248                  create_le_conn_complete);
1249     if (err) {
1250         hci_conn_del(conn);
1251         return ERR_PTR(err);
1252     }
1253 
1254     return conn;
1255 }
1256 
1257 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1258 {
1259     struct hci_conn *conn;
1260 
1261     conn = hci_conn_hash_lookup_le(hdev, addr, type);
1262     if (!conn)
1263         return false;
1264 
1265     if (conn->state != BT_CONNECTED)
1266         return false;
1267 
1268     return true;
1269 }
1270 
1271 /* This function requires the caller holds hdev->lock */
1272 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1273                     bdaddr_t *addr, u8 addr_type)
1274 {
1275     struct hci_conn_params *params;
1276 
1277     if (is_connected(hdev, addr, addr_type))
1278         return -EISCONN;
1279 
1280     params = hci_conn_params_lookup(hdev, addr, addr_type);
1281     if (!params) {
1282         params = hci_conn_params_add(hdev, addr, addr_type);
1283         if (!params)
1284             return -ENOMEM;
1285 
1286         /* If we created new params, mark them to be deleted in
1287          * hci_connect_le_scan_cleanup. It's different case than
1288          * existing disabled params, those will stay after cleanup.
1289          */
1290         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1291     }
1292 
1293     /* We're trying to connect, so make sure params are at pend_le_conns */
1294     if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1295         params->auto_connect == HCI_AUTO_CONN_REPORT ||
1296         params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1297         list_del_init(&params->action);
1298         list_add(&params->action, &hdev->pend_le_conns);
1299     }
1300 
1301     params->explicit_connect = true;
1302 
1303     BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1304            params->auto_connect);
1305 
1306     return 0;
1307 }
1308 
1309 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1310 {
1311     struct iso_list_data data;
1312 
1313     /* Allocate a BIG if not set */
1314     if (qos->big == BT_ISO_QOS_BIG_UNSET) {
1315         for (data.big = 0x00; data.big < 0xef; data.big++) {
1316             data.count = 0;
1317             data.bis = 0xff;
1318 
1319             hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1320                          BT_BOUND, &data);
1321             if (!data.count)
1322                 break;
1323         }
1324 
1325         if (data.big == 0xef)
1326             return -EADDRNOTAVAIL;
1327 
1328         /* Update BIG */
1329         qos->big = data.big;
1330     }
1331 
1332     return 0;
1333 }
1334 
1335 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1336 {
1337     struct iso_list_data data;
1338 
1339     /* Allocate BIS if not set */
1340     if (qos->bis == BT_ISO_QOS_BIS_UNSET) {
1341         /* Find an unused adv set to advertise BIS, skip instance 0x00
1342          * since it is reserved as general purpose set.
1343          */
1344         for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1345              data.bis++) {
1346             data.count = 0;
1347 
1348             hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1349                          BT_BOUND, &data);
1350             if (!data.count)
1351                 break;
1352         }
1353 
1354         if (data.bis == hdev->le_num_of_adv_sets)
1355             return -EADDRNOTAVAIL;
1356 
1357         /* Update BIS */
1358         qos->bis = data.bis;
1359     }
1360 
1361     return 0;
1362 }
1363 
1364 /* This function requires the caller holds hdev->lock */
1365 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1366                     struct bt_iso_qos *qos)
1367 {
1368     struct hci_conn *conn;
1369     struct iso_list_data data;
1370     int err;
1371 
1372     /* Let's make sure that le is enabled.*/
1373     if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1374         if (lmp_le_capable(hdev))
1375             return ERR_PTR(-ECONNREFUSED);
1376         return ERR_PTR(-EOPNOTSUPP);
1377     }
1378 
1379     err = qos_set_big(hdev, qos);
1380     if (err)
1381         return ERR_PTR(err);
1382 
1383     err = qos_set_bis(hdev, qos);
1384     if (err)
1385         return ERR_PTR(err);
1386 
1387     data.big = qos->big;
1388     data.bis = qos->bis;
1389     data.count = 0;
1390 
1391     /* Check if there is already a matching BIG/BIS */
1392     hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1393     if (data.count)
1394         return ERR_PTR(-EADDRINUSE);
1395 
1396     conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis);
1397     if (conn)
1398         return ERR_PTR(-EADDRINUSE);
1399 
1400     conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1401     if (!conn)
1402         return ERR_PTR(-ENOMEM);
1403 
1404     set_bit(HCI_CONN_PER_ADV, &conn->flags);
1405     conn->state = BT_CONNECT;
1406 
1407     hci_conn_hold(conn);
1408     return conn;
1409 }
1410 
1411 /* This function requires the caller holds hdev->lock */
1412 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1413                      u8 dst_type, u8 sec_level,
1414                      u16 conn_timeout,
1415                      enum conn_reasons conn_reason)
1416 {
1417     struct hci_conn *conn;
1418 
1419     /* Let's make sure that le is enabled.*/
1420     if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1421         if (lmp_le_capable(hdev))
1422             return ERR_PTR(-ECONNREFUSED);
1423 
1424         return ERR_PTR(-EOPNOTSUPP);
1425     }
1426 
1427     /* Some devices send ATT messages as soon as the physical link is
1428      * established. To be able to handle these ATT messages, the user-
1429      * space first establishes the connection and then starts the pairing
1430      * process.
1431      *
1432      * So if a hci_conn object already exists for the following connection
1433      * attempt, we simply update pending_sec_level and auth_type fields
1434      * and return the object found.
1435      */
1436     conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1437     if (conn) {
1438         if (conn->pending_sec_level < sec_level)
1439             conn->pending_sec_level = sec_level;
1440         goto done;
1441     }
1442 
1443     BT_DBG("requesting refresh of dst_addr");
1444 
1445     conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1446     if (!conn)
1447         return ERR_PTR(-ENOMEM);
1448 
1449     if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1450         hci_conn_del(conn);
1451         return ERR_PTR(-EBUSY);
1452     }
1453 
1454     conn->state = BT_CONNECT;
1455     set_bit(HCI_CONN_SCANNING, &conn->flags);
1456     conn->dst_type = dst_type;
1457     conn->sec_level = BT_SECURITY_LOW;
1458     conn->pending_sec_level = sec_level;
1459     conn->conn_timeout = conn_timeout;
1460     conn->conn_reason = conn_reason;
1461 
1462     hci_update_passive_scan(hdev);
1463 
1464 done:
1465     hci_conn_hold(conn);
1466     return conn;
1467 }
1468 
1469 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1470                  u8 sec_level, u8 auth_type,
1471                  enum conn_reasons conn_reason)
1472 {
1473     struct hci_conn *acl;
1474 
1475     if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1476         if (lmp_bredr_capable(hdev))
1477             return ERR_PTR(-ECONNREFUSED);
1478 
1479         return ERR_PTR(-EOPNOTSUPP);
1480     }
1481 
1482     acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1483     if (!acl) {
1484         acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1485         if (!acl)
1486             return ERR_PTR(-ENOMEM);
1487     }
1488 
1489     hci_conn_hold(acl);
1490 
1491     acl->conn_reason = conn_reason;
1492     if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1493         acl->sec_level = BT_SECURITY_LOW;
1494         acl->pending_sec_level = sec_level;
1495         acl->auth_type = auth_type;
1496         hci_acl_create_connection(acl);
1497     }
1498 
1499     return acl;
1500 }
1501 
1502 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1503                  __u16 setting, struct bt_codec *codec)
1504 {
1505     struct hci_conn *acl;
1506     struct hci_conn *sco;
1507 
1508     acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1509                   CONN_REASON_SCO_CONNECT);
1510     if (IS_ERR(acl))
1511         return acl;
1512 
1513     sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1514     if (!sco) {
1515         sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1516         if (!sco) {
1517             hci_conn_drop(acl);
1518             return ERR_PTR(-ENOMEM);
1519         }
1520     }
1521 
1522     acl->link = sco;
1523     sco->link = acl;
1524 
1525     hci_conn_hold(sco);
1526 
1527     sco->setting = setting;
1528     sco->codec = *codec;
1529 
1530     if (acl->state == BT_CONNECTED &&
1531         (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1532         set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1533         hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1534 
1535         if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1536             /* defer SCO setup until mode change completed */
1537             set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1538             return sco;
1539         }
1540 
1541         hci_sco_setup(acl, 0x00);
1542     }
1543 
1544     return sco;
1545 }
1546 
1547 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1548 {
1549     struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1550 
1551     cis->cis_id = qos->cis;
1552     cis->c_sdu  = cpu_to_le16(qos->out.sdu);
1553     cis->p_sdu  = cpu_to_le16(qos->in.sdu);
1554     cis->c_phy  = qos->out.phy ? qos->out.phy : qos->in.phy;
1555     cis->p_phy  = qos->in.phy ? qos->in.phy : qos->out.phy;
1556     cis->c_rtn  = qos->out.rtn;
1557     cis->p_rtn  = qos->in.rtn;
1558 
1559     d->pdu.cp.num_cis++;
1560 }
1561 
1562 static void cis_list(struct hci_conn *conn, void *data)
1563 {
1564     struct iso_list_data *d = data;
1565 
1566     /* Skip if broadcast/ANY address */
1567     if (!bacmp(&conn->dst, BDADDR_ANY))
1568         return;
1569 
1570     if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1571         d->cis != conn->iso_qos.cis)
1572         return;
1573 
1574     d->count++;
1575 
1576     if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1577         d->count >= ARRAY_SIZE(d->pdu.cis))
1578         return;
1579 
1580     cis_add(d, &conn->iso_qos);
1581 }
1582 
1583 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1584 {
1585     struct hci_dev *hdev = conn->hdev;
1586     struct hci_cp_le_create_big cp;
1587 
1588     memset(&cp, 0, sizeof(cp));
1589 
1590     cp.handle = qos->big;
1591     cp.adv_handle = qos->bis;
1592     cp.num_bis  = 0x01;
1593     hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval);
1594     cp.bis.sdu = cpu_to_le16(qos->out.sdu);
1595     cp.bis.latency =  cpu_to_le16(qos->out.latency);
1596     cp.bis.rtn  = qos->out.rtn;
1597     cp.bis.phy  = qos->out.phy;
1598     cp.bis.packing = qos->packing;
1599     cp.bis.framing = qos->framing;
1600     cp.bis.encryption = 0x00;
1601     memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode));
1602 
1603     return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1604 }
1605 
1606 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1607 {
1608     struct hci_dev *hdev = conn->hdev;
1609     struct iso_list_data data;
1610 
1611     memset(&data, 0, sizeof(data));
1612 
1613     /* Allocate a CIG if not set */
1614     if (qos->cig == BT_ISO_QOS_CIG_UNSET) {
1615         for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1616             data.count = 0;
1617             data.cis = 0xff;
1618 
1619             hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1620                          BT_BOUND, &data);
1621             if (data.count)
1622                 continue;
1623 
1624             hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1625                          BT_CONNECTED, &data);
1626             if (!data.count)
1627                 break;
1628         }
1629 
1630         if (data.cig == 0xff)
1631             return false;
1632 
1633         /* Update CIG */
1634         qos->cig = data.cig;
1635     }
1636 
1637     data.pdu.cp.cig_id = qos->cig;
1638     hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval);
1639     hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval);
1640     data.pdu.cp.sca = qos->sca;
1641     data.pdu.cp.packing = qos->packing;
1642     data.pdu.cp.framing = qos->framing;
1643     data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency);
1644     data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency);
1645 
1646     if (qos->cis != BT_ISO_QOS_CIS_UNSET) {
1647         data.count = 0;
1648         data.cig = qos->cig;
1649         data.cis = qos->cis;
1650 
1651         hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1652                      &data);
1653         if (data.count)
1654             return false;
1655 
1656         cis_add(&data, qos);
1657     }
1658 
1659     /* Reprogram all CIS(s) with the same CIG */
1660     for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11;
1661          data.cis++) {
1662         data.count = 0;
1663 
1664         hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1665                      &data);
1666         if (data.count)
1667             continue;
1668 
1669         /* Allocate a CIS if not set */
1670         if (qos->cis == BT_ISO_QOS_CIS_UNSET) {
1671             /* Update CIS */
1672             qos->cis = data.cis;
1673             cis_add(&data, qos);
1674         }
1675     }
1676 
1677     if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1678         return false;
1679 
1680     if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1681              sizeof(data.pdu.cp) +
1682              (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1683              &data.pdu) < 0)
1684         return false;
1685 
1686     return true;
1687 }
1688 
1689 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1690                   __u8 dst_type, struct bt_iso_qos *qos)
1691 {
1692     struct hci_conn *cis;
1693 
1694     cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type);
1695     if (!cis) {
1696         cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1697         if (!cis)
1698             return ERR_PTR(-ENOMEM);
1699         cis->cleanup = cis_cleanup;
1700     }
1701 
1702     if (cis->state == BT_CONNECTED)
1703         return cis;
1704 
1705     /* Check if CIS has been set and the settings matches */
1706     if (cis->state == BT_BOUND &&
1707         !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1708         return cis;
1709 
1710     /* Update LINK PHYs according to QoS preference */
1711     cis->le_tx_phy = qos->out.phy;
1712     cis->le_rx_phy = qos->in.phy;
1713 
1714     /* If output interval is not set use the input interval as it cannot be
1715      * 0x000000.
1716      */
1717     if (!qos->out.interval)
1718         qos->out.interval = qos->in.interval;
1719 
1720     /* If input interval is not set use the output interval as it cannot be
1721      * 0x000000.
1722      */
1723     if (!qos->in.interval)
1724         qos->in.interval = qos->out.interval;
1725 
1726     /* If output latency is not set use the input latency as it cannot be
1727      * 0x0000.
1728      */
1729     if (!qos->out.latency)
1730         qos->out.latency = qos->in.latency;
1731 
1732     /* If input latency is not set use the output latency as it cannot be
1733      * 0x0000.
1734      */
1735     if (!qos->in.latency)
1736         qos->in.latency = qos->out.latency;
1737 
1738     if (!hci_le_set_cig_params(cis, qos)) {
1739         hci_conn_drop(cis);
1740         return ERR_PTR(-EINVAL);
1741     }
1742 
1743     cis->iso_qos = *qos;
1744     cis->state = BT_BOUND;
1745 
1746     return cis;
1747 }
1748 
1749 bool hci_iso_setup_path(struct hci_conn *conn)
1750 {
1751     struct hci_dev *hdev = conn->hdev;
1752     struct hci_cp_le_setup_iso_path cmd;
1753 
1754     memset(&cmd, 0, sizeof(cmd));
1755 
1756     if (conn->iso_qos.out.sdu) {
1757         cmd.handle = cpu_to_le16(conn->handle);
1758         cmd.direction = 0x00; /* Input (Host to Controller) */
1759         cmd.path = 0x00; /* HCI path if enabled */
1760         cmd.codec = 0x03; /* Transparent Data */
1761 
1762         if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1763                  &cmd) < 0)
1764             return false;
1765     }
1766 
1767     if (conn->iso_qos.in.sdu) {
1768         cmd.handle = cpu_to_le16(conn->handle);
1769         cmd.direction = 0x01; /* Output (Controller to Host) */
1770         cmd.path = 0x00; /* HCI path if enabled */
1771         cmd.codec = 0x03; /* Transparent Data */
1772 
1773         if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1774                  &cmd) < 0)
1775             return false;
1776     }
1777 
1778     return true;
1779 }
1780 
1781 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1782 {
1783     struct {
1784         struct hci_cp_le_create_cis cp;
1785         struct hci_cis cis[0x1f];
1786     } cmd;
1787     struct hci_conn *conn = data;
1788     u8 cig;
1789 
1790     memset(&cmd, 0, sizeof(cmd));
1791     cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle);
1792     cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
1793     cmd.cp.num_cis++;
1794     cig = conn->iso_qos.cig;
1795 
1796     hci_dev_lock(hdev);
1797 
1798     rcu_read_lock();
1799 
1800     list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1801         struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
1802 
1803         if (conn == data || conn->type != ISO_LINK ||
1804             conn->state == BT_CONNECTED || conn->iso_qos.cig != cig)
1805             continue;
1806 
1807         /* Check if all CIS(s) belonging to a CIG are ready */
1808         if (conn->link->state != BT_CONNECTED ||
1809             conn->state != BT_CONNECT) {
1810             cmd.cp.num_cis = 0;
1811             break;
1812         }
1813 
1814         /* Group all CIS with state BT_CONNECT since the spec don't
1815          * allow to send them individually:
1816          *
1817          * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
1818          * page 2566:
1819          *
1820          * If the Host issues this command before all the
1821          * HCI_LE_CIS_Established events from the previous use of the
1822          * command have been generated, the Controller shall return the
1823          * error code Command Disallowed (0x0C).
1824          */
1825         cis->acl_handle = cpu_to_le16(conn->link->handle);
1826         cis->cis_handle = cpu_to_le16(conn->handle);
1827         cmd.cp.num_cis++;
1828     }
1829 
1830     rcu_read_unlock();
1831 
1832     hci_dev_unlock(hdev);
1833 
1834     if (!cmd.cp.num_cis)
1835         return 0;
1836 
1837     return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) +
1838                 sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd);
1839 }
1840 
1841 int hci_le_create_cis(struct hci_conn *conn)
1842 {
1843     struct hci_conn *cis;
1844     struct hci_dev *hdev = conn->hdev;
1845     int err;
1846 
1847     switch (conn->type) {
1848     case LE_LINK:
1849         if (!conn->link || conn->state != BT_CONNECTED)
1850             return -EINVAL;
1851         cis = conn->link;
1852         break;
1853     case ISO_LINK:
1854         cis = conn;
1855         break;
1856     default:
1857         return -EINVAL;
1858     }
1859 
1860     if (cis->state == BT_CONNECT)
1861         return 0;
1862 
1863     /* Queue Create CIS */
1864     err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1865     if (err)
1866         return err;
1867 
1868     cis->state = BT_CONNECT;
1869 
1870     return 0;
1871 }
1872 
1873 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1874                   struct bt_iso_io_qos *qos, __u8 phy)
1875 {
1876     /* Only set MTU if PHY is enabled */
1877     if (!qos->sdu && qos->phy) {
1878         if (hdev->iso_mtu > 0)
1879             qos->sdu = hdev->iso_mtu;
1880         else if (hdev->le_mtu > 0)
1881             qos->sdu = hdev->le_mtu;
1882         else
1883             qos->sdu = hdev->acl_mtu;
1884     }
1885 
1886     /* Use the same PHY as ACL if set to any */
1887     if (qos->phy == BT_ISO_PHY_ANY)
1888         qos->phy = phy;
1889 
1890     /* Use LE ACL connection interval if not set */
1891     if (!qos->interval)
1892         /* ACL interval unit in 1.25 ms to us */
1893         qos->interval = conn->le_conn_interval * 1250;
1894 
1895     /* Use LE ACL connection latency if not set */
1896     if (!qos->latency)
1897         qos->latency = conn->le_conn_latency;
1898 }
1899 
1900 static struct hci_conn *hci_bind_bis(struct hci_conn *conn,
1901                      struct bt_iso_qos *qos)
1902 {
1903     /* Update LINK PHYs according to QoS preference */
1904     conn->le_tx_phy = qos->out.phy;
1905     conn->le_tx_phy = qos->out.phy;
1906     conn->iso_qos = *qos;
1907     conn->state = BT_BOUND;
1908 
1909     return conn;
1910 }
1911 
1912 static int create_big_sync(struct hci_dev *hdev, void *data)
1913 {
1914     struct hci_conn *conn = data;
1915     struct bt_iso_qos *qos = &conn->iso_qos;
1916     u16 interval, sync_interval = 0;
1917     u32 flags = 0;
1918     int err;
1919 
1920     if (qos->out.phy == 0x02)
1921         flags |= MGMT_ADV_FLAG_SEC_2M;
1922 
1923     /* Align intervals */
1924     interval = qos->out.interval / 1250;
1925 
1926     if (qos->bis)
1927         sync_interval = qos->sync_interval * 1600;
1928 
1929     err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len,
1930                      conn->le_per_adv_data, flags, interval,
1931                      interval, sync_interval);
1932     if (err)
1933         return err;
1934 
1935     return hci_le_create_big(conn, &conn->iso_qos);
1936 }
1937 
1938 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
1939 {
1940     struct hci_cp_le_pa_create_sync *cp = data;
1941 
1942     bt_dev_dbg(hdev, "");
1943 
1944     if (err)
1945         bt_dev_err(hdev, "Unable to create PA: %d", err);
1946 
1947     kfree(cp);
1948 }
1949 
1950 static int create_pa_sync(struct hci_dev *hdev, void *data)
1951 {
1952     struct hci_cp_le_pa_create_sync *cp = data;
1953     int err;
1954 
1955     err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
1956                     sizeof(*cp), cp, HCI_CMD_TIMEOUT);
1957     if (err) {
1958         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
1959         return err;
1960     }
1961 
1962     return hci_update_passive_scan_sync(hdev);
1963 }
1964 
1965 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
1966                __u8 sid)
1967 {
1968     struct hci_cp_le_pa_create_sync *cp;
1969 
1970     if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
1971         return -EBUSY;
1972 
1973     cp = kmalloc(sizeof(*cp), GFP_KERNEL);
1974     if (!cp) {
1975         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
1976         return -ENOMEM;
1977     }
1978 
1979     /* Convert from ISO socket address type to HCI address type  */
1980     if (dst_type == BDADDR_LE_PUBLIC)
1981         dst_type = ADDR_LE_DEV_PUBLIC;
1982     else
1983         dst_type = ADDR_LE_DEV_RANDOM;
1984 
1985     memset(cp, 0, sizeof(*cp));
1986     cp->sid = sid;
1987     cp->addr_type = dst_type;
1988     bacpy(&cp->addr, dst);
1989 
1990     /* Queue start pa_create_sync and scan */
1991     return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
1992 }
1993 
1994 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
1995                __u16 sync_handle, __u8 num_bis, __u8 bis[])
1996 {
1997     struct _packed {
1998         struct hci_cp_le_big_create_sync cp;
1999         __u8  bis[0x11];
2000     } pdu;
2001     int err;
2002 
2003     if (num_bis > sizeof(pdu.bis))
2004         return -EINVAL;
2005 
2006     err = qos_set_big(hdev, qos);
2007     if (err)
2008         return err;
2009 
2010     memset(&pdu, 0, sizeof(pdu));
2011     pdu.cp.handle = qos->big;
2012     pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2013     pdu.cp.num_bis = num_bis;
2014     memcpy(pdu.bis, bis, num_bis);
2015 
2016     return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2017                 sizeof(pdu.cp) + num_bis, &pdu);
2018 }
2019 
2020 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2021 {
2022     struct hci_conn *conn = data;
2023 
2024     bt_dev_dbg(hdev, "conn %p", conn);
2025 
2026     if (err) {
2027         bt_dev_err(hdev, "Unable to create BIG: %d", err);
2028         hci_connect_cfm(conn, err);
2029         hci_conn_del(conn);
2030     }
2031 }
2032 
2033 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2034                  __u8 dst_type, struct bt_iso_qos *qos,
2035                  __u8 base_len, __u8 *base)
2036 {
2037     struct hci_conn *conn;
2038     int err;
2039 
2040     /* We need hci_conn object using the BDADDR_ANY as dst */
2041     conn = hci_add_bis(hdev, dst, qos);
2042     if (IS_ERR(conn))
2043         return conn;
2044 
2045     conn = hci_bind_bis(conn, qos);
2046     if (!conn) {
2047         hci_conn_drop(conn);
2048         return ERR_PTR(-ENOMEM);
2049     }
2050 
2051     /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2052     if (base_len && base) {
2053         base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2054                            0x1851, base, base_len);
2055         conn->le_per_adv_data_len = base_len;
2056     }
2057 
2058     /* Queue start periodic advertising and create BIG */
2059     err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2060                  create_big_complete);
2061     if (err < 0) {
2062         hci_conn_drop(conn);
2063         return ERR_PTR(err);
2064     }
2065 
2066     hci_iso_qos_setup(hdev, conn, &qos->out,
2067               conn->le_tx_phy ? conn->le_tx_phy :
2068               hdev->le_tx_def_phys);
2069 
2070     return conn;
2071 }
2072 
2073 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2074                  __u8 dst_type, struct bt_iso_qos *qos)
2075 {
2076     struct hci_conn *le;
2077     struct hci_conn *cis;
2078 
2079     /* Convert from ISO socket address type to HCI address type  */
2080     if (dst_type == BDADDR_LE_PUBLIC)
2081         dst_type = ADDR_LE_DEV_PUBLIC;
2082     else
2083         dst_type = ADDR_LE_DEV_RANDOM;
2084 
2085     if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2086         le = hci_connect_le(hdev, dst, dst_type, false,
2087                     BT_SECURITY_LOW,
2088                     HCI_LE_CONN_TIMEOUT,
2089                     HCI_ROLE_SLAVE);
2090     else
2091         le = hci_connect_le_scan(hdev, dst, dst_type,
2092                      BT_SECURITY_LOW,
2093                      HCI_LE_CONN_TIMEOUT,
2094                      CONN_REASON_ISO_CONNECT);
2095     if (IS_ERR(le))
2096         return le;
2097 
2098     hci_iso_qos_setup(hdev, le, &qos->out,
2099               le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2100     hci_iso_qos_setup(hdev, le, &qos->in,
2101               le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2102 
2103     cis = hci_bind_cis(hdev, dst, dst_type, qos);
2104     if (IS_ERR(cis)) {
2105         hci_conn_drop(le);
2106         return cis;
2107     }
2108 
2109     le->link = cis;
2110     cis->link = le;
2111 
2112     hci_conn_hold(cis);
2113 
2114     /* If LE is already connected and CIS handle is already set proceed to
2115      * Create CIS immediately.
2116      */
2117     if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2118         hci_le_create_cis(le);
2119 
2120     return cis;
2121 }
2122 
2123 /* Check link security requirement */
2124 int hci_conn_check_link_mode(struct hci_conn *conn)
2125 {
2126     BT_DBG("hcon %p", conn);
2127 
2128     /* In Secure Connections Only mode, it is required that Secure
2129      * Connections is used and the link is encrypted with AES-CCM
2130      * using a P-256 authenticated combination key.
2131      */
2132     if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2133         if (!hci_conn_sc_enabled(conn) ||
2134             !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2135             conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2136             return 0;
2137     }
2138 
2139      /* AES encryption is required for Level 4:
2140       *
2141       * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2142       * page 1319:
2143       *
2144       * 128-bit equivalent strength for link and encryption keys
2145       * required using FIPS approved algorithms (E0 not allowed,
2146       * SAFER+ not allowed, and P-192 not allowed; encryption key
2147       * not shortened)
2148       */
2149     if (conn->sec_level == BT_SECURITY_FIPS &&
2150         !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2151         bt_dev_err(conn->hdev,
2152                "Invalid security: Missing AES-CCM usage");
2153         return 0;
2154     }
2155 
2156     if (hci_conn_ssp_enabled(conn) &&
2157         !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2158         return 0;
2159 
2160     return 1;
2161 }
2162 
2163 /* Authenticate remote device */
2164 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2165 {
2166     BT_DBG("hcon %p", conn);
2167 
2168     if (conn->pending_sec_level > sec_level)
2169         sec_level = conn->pending_sec_level;
2170 
2171     if (sec_level > conn->sec_level)
2172         conn->pending_sec_level = sec_level;
2173     else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2174         return 1;
2175 
2176     /* Make sure we preserve an existing MITM requirement*/
2177     auth_type |= (conn->auth_type & 0x01);
2178 
2179     conn->auth_type = auth_type;
2180 
2181     if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2182         struct hci_cp_auth_requested cp;
2183 
2184         cp.handle = cpu_to_le16(conn->handle);
2185         hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2186                  sizeof(cp), &cp);
2187 
2188         /* If we're already encrypted set the REAUTH_PEND flag,
2189          * otherwise set the ENCRYPT_PEND.
2190          */
2191         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2192             set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2193         else
2194             set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2195     }
2196 
2197     return 0;
2198 }
2199 
2200 /* Encrypt the link */
2201 static void hci_conn_encrypt(struct hci_conn *conn)
2202 {
2203     BT_DBG("hcon %p", conn);
2204 
2205     if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2206         struct hci_cp_set_conn_encrypt cp;
2207         cp.handle  = cpu_to_le16(conn->handle);
2208         cp.encrypt = 0x01;
2209         hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2210                  &cp);
2211     }
2212 }
2213 
2214 /* Enable security */
2215 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2216               bool initiator)
2217 {
2218     BT_DBG("hcon %p", conn);
2219 
2220     if (conn->type == LE_LINK)
2221         return smp_conn_security(conn, sec_level);
2222 
2223     /* For sdp we don't need the link key. */
2224     if (sec_level == BT_SECURITY_SDP)
2225         return 1;
2226 
2227     /* For non 2.1 devices and low security level we don't need the link
2228        key. */
2229     if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2230         return 1;
2231 
2232     /* For other security levels we need the link key. */
2233     if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2234         goto auth;
2235 
2236     /* An authenticated FIPS approved combination key has sufficient
2237      * security for security level 4. */
2238     if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2239         sec_level == BT_SECURITY_FIPS)
2240         goto encrypt;
2241 
2242     /* An authenticated combination key has sufficient security for
2243        security level 3. */
2244     if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2245          conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2246         sec_level == BT_SECURITY_HIGH)
2247         goto encrypt;
2248 
2249     /* An unauthenticated combination key has sufficient security for
2250        security level 1 and 2. */
2251     if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2252          conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2253         (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2254         goto encrypt;
2255 
2256     /* A combination key has always sufficient security for the security
2257        levels 1 or 2. High security level requires the combination key
2258        is generated using maximum PIN code length (16).
2259        For pre 2.1 units. */
2260     if (conn->key_type == HCI_LK_COMBINATION &&
2261         (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2262          conn->pin_length == 16))
2263         goto encrypt;
2264 
2265 auth:
2266     if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2267         return 0;
2268 
2269     if (initiator)
2270         set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2271 
2272     if (!hci_conn_auth(conn, sec_level, auth_type))
2273         return 0;
2274 
2275 encrypt:
2276     if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2277         /* Ensure that the encryption key size has been read,
2278          * otherwise stall the upper layer responses.
2279          */
2280         if (!conn->enc_key_size)
2281             return 0;
2282 
2283         /* Nothing else needed, all requirements are met */
2284         return 1;
2285     }
2286 
2287     hci_conn_encrypt(conn);
2288     return 0;
2289 }
2290 EXPORT_SYMBOL(hci_conn_security);
2291 
2292 /* Check secure link requirement */
2293 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2294 {
2295     BT_DBG("hcon %p", conn);
2296 
2297     /* Accept if non-secure or higher security level is required */
2298     if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2299         return 1;
2300 
2301     /* Accept if secure or higher security level is already present */
2302     if (conn->sec_level == BT_SECURITY_HIGH ||
2303         conn->sec_level == BT_SECURITY_FIPS)
2304         return 1;
2305 
2306     /* Reject not secure link */
2307     return 0;
2308 }
2309 EXPORT_SYMBOL(hci_conn_check_secure);
2310 
2311 /* Switch role */
2312 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2313 {
2314     BT_DBG("hcon %p", conn);
2315 
2316     if (role == conn->role)
2317         return 1;
2318 
2319     if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2320         struct hci_cp_switch_role cp;
2321         bacpy(&cp.bdaddr, &conn->dst);
2322         cp.role = role;
2323         hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2324     }
2325 
2326     return 0;
2327 }
2328 EXPORT_SYMBOL(hci_conn_switch_role);
2329 
2330 /* Enter active mode */
2331 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2332 {
2333     struct hci_dev *hdev = conn->hdev;
2334 
2335     BT_DBG("hcon %p mode %d", conn, conn->mode);
2336 
2337     if (conn->mode != HCI_CM_SNIFF)
2338         goto timer;
2339 
2340     if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2341         goto timer;
2342 
2343     if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2344         struct hci_cp_exit_sniff_mode cp;
2345         cp.handle = cpu_to_le16(conn->handle);
2346         hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2347     }
2348 
2349 timer:
2350     if (hdev->idle_timeout > 0)
2351         queue_delayed_work(hdev->workqueue, &conn->idle_work,
2352                    msecs_to_jiffies(hdev->idle_timeout));
2353 }
2354 
2355 /* Drop all connection on the device */
2356 void hci_conn_hash_flush(struct hci_dev *hdev)
2357 {
2358     struct hci_conn_hash *h = &hdev->conn_hash;
2359     struct hci_conn *c, *n;
2360 
2361     BT_DBG("hdev %s", hdev->name);
2362 
2363     list_for_each_entry_safe(c, n, &h->list, list) {
2364         c->state = BT_CLOSED;
2365 
2366         hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2367         hci_conn_del(c);
2368     }
2369 }
2370 
2371 /* Check pending connect attempts */
2372 void hci_conn_check_pending(struct hci_dev *hdev)
2373 {
2374     struct hci_conn *conn;
2375 
2376     BT_DBG("hdev %s", hdev->name);
2377 
2378     hci_dev_lock(hdev);
2379 
2380     conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2381     if (conn)
2382         hci_acl_create_connection(conn);
2383 
2384     hci_dev_unlock(hdev);
2385 }
2386 
2387 static u32 get_link_mode(struct hci_conn *conn)
2388 {
2389     u32 link_mode = 0;
2390 
2391     if (conn->role == HCI_ROLE_MASTER)
2392         link_mode |= HCI_LM_MASTER;
2393 
2394     if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2395         link_mode |= HCI_LM_ENCRYPT;
2396 
2397     if (test_bit(HCI_CONN_AUTH, &conn->flags))
2398         link_mode |= HCI_LM_AUTH;
2399 
2400     if (test_bit(HCI_CONN_SECURE, &conn->flags))
2401         link_mode |= HCI_LM_SECURE;
2402 
2403     if (test_bit(HCI_CONN_FIPS, &conn->flags))
2404         link_mode |= HCI_LM_FIPS;
2405 
2406     return link_mode;
2407 }
2408 
2409 int hci_get_conn_list(void __user *arg)
2410 {
2411     struct hci_conn *c;
2412     struct hci_conn_list_req req, *cl;
2413     struct hci_conn_info *ci;
2414     struct hci_dev *hdev;
2415     int n = 0, size, err;
2416 
2417     if (copy_from_user(&req, arg, sizeof(req)))
2418         return -EFAULT;
2419 
2420     if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2421         return -EINVAL;
2422 
2423     size = sizeof(req) + req.conn_num * sizeof(*ci);
2424 
2425     cl = kmalloc(size, GFP_KERNEL);
2426     if (!cl)
2427         return -ENOMEM;
2428 
2429     hdev = hci_dev_get(req.dev_id);
2430     if (!hdev) {
2431         kfree(cl);
2432         return -ENODEV;
2433     }
2434 
2435     ci = cl->conn_info;
2436 
2437     hci_dev_lock(hdev);
2438     list_for_each_entry(c, &hdev->conn_hash.list, list) {
2439         bacpy(&(ci + n)->bdaddr, &c->dst);
2440         (ci + n)->handle = c->handle;
2441         (ci + n)->type  = c->type;
2442         (ci + n)->out   = c->out;
2443         (ci + n)->state = c->state;
2444         (ci + n)->link_mode = get_link_mode(c);
2445         if (++n >= req.conn_num)
2446             break;
2447     }
2448     hci_dev_unlock(hdev);
2449 
2450     cl->dev_id = hdev->id;
2451     cl->conn_num = n;
2452     size = sizeof(req) + n * sizeof(*ci);
2453 
2454     hci_dev_put(hdev);
2455 
2456     err = copy_to_user(arg, cl, size);
2457     kfree(cl);
2458 
2459     return err ? -EFAULT : 0;
2460 }
2461 
2462 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2463 {
2464     struct hci_conn_info_req req;
2465     struct hci_conn_info ci;
2466     struct hci_conn *conn;
2467     char __user *ptr = arg + sizeof(req);
2468 
2469     if (copy_from_user(&req, arg, sizeof(req)))
2470         return -EFAULT;
2471 
2472     hci_dev_lock(hdev);
2473     conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2474     if (conn) {
2475         bacpy(&ci.bdaddr, &conn->dst);
2476         ci.handle = conn->handle;
2477         ci.type  = conn->type;
2478         ci.out   = conn->out;
2479         ci.state = conn->state;
2480         ci.link_mode = get_link_mode(conn);
2481     }
2482     hci_dev_unlock(hdev);
2483 
2484     if (!conn)
2485         return -ENOENT;
2486 
2487     return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2488 }
2489 
2490 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2491 {
2492     struct hci_auth_info_req req;
2493     struct hci_conn *conn;
2494 
2495     if (copy_from_user(&req, arg, sizeof(req)))
2496         return -EFAULT;
2497 
2498     hci_dev_lock(hdev);
2499     conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2500     if (conn)
2501         req.type = conn->auth_type;
2502     hci_dev_unlock(hdev);
2503 
2504     if (!conn)
2505         return -ENOENT;
2506 
2507     return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2508 }
2509 
2510 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2511 {
2512     struct hci_dev *hdev = conn->hdev;
2513     struct hci_chan *chan;
2514 
2515     BT_DBG("%s hcon %p", hdev->name, conn);
2516 
2517     if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2518         BT_DBG("Refusing to create new hci_chan");
2519         return NULL;
2520     }
2521 
2522     chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2523     if (!chan)
2524         return NULL;
2525 
2526     chan->conn = hci_conn_get(conn);
2527     skb_queue_head_init(&chan->data_q);
2528     chan->state = BT_CONNECTED;
2529 
2530     list_add_rcu(&chan->list, &conn->chan_list);
2531 
2532     return chan;
2533 }
2534 
2535 void hci_chan_del(struct hci_chan *chan)
2536 {
2537     struct hci_conn *conn = chan->conn;
2538     struct hci_dev *hdev = conn->hdev;
2539 
2540     BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2541 
2542     list_del_rcu(&chan->list);
2543 
2544     synchronize_rcu();
2545 
2546     /* Prevent new hci_chan's to be created for this hci_conn */
2547     set_bit(HCI_CONN_DROP, &conn->flags);
2548 
2549     hci_conn_put(conn);
2550 
2551     skb_queue_purge(&chan->data_q);
2552     kfree(chan);
2553 }
2554 
2555 void hci_chan_list_flush(struct hci_conn *conn)
2556 {
2557     struct hci_chan *chan, *n;
2558 
2559     BT_DBG("hcon %p", conn);
2560 
2561     list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2562         hci_chan_del(chan);
2563 }
2564 
2565 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2566                          __u16 handle)
2567 {
2568     struct hci_chan *hchan;
2569 
2570     list_for_each_entry(hchan, &hcon->chan_list, list) {
2571         if (hchan->handle == handle)
2572             return hchan;
2573     }
2574 
2575     return NULL;
2576 }
2577 
2578 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2579 {
2580     struct hci_conn_hash *h = &hdev->conn_hash;
2581     struct hci_conn *hcon;
2582     struct hci_chan *hchan = NULL;
2583 
2584     rcu_read_lock();
2585 
2586     list_for_each_entry_rcu(hcon, &h->list, list) {
2587         hchan = __hci_chan_lookup_handle(hcon, handle);
2588         if (hchan)
2589             break;
2590     }
2591 
2592     rcu_read_unlock();
2593 
2594     return hchan;
2595 }
2596 
2597 u32 hci_conn_get_phy(struct hci_conn *conn)
2598 {
2599     u32 phys = 0;
2600 
2601     /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2602      * Table 6.2: Packets defined for synchronous, asynchronous, and
2603      * CPB logical transport types.
2604      */
2605     switch (conn->type) {
2606     case SCO_LINK:
2607         /* SCO logical transport (1 Mb/s):
2608          * HV1, HV2, HV3 and DV.
2609          */
2610         phys |= BT_PHY_BR_1M_1SLOT;
2611 
2612         break;
2613 
2614     case ACL_LINK:
2615         /* ACL logical transport (1 Mb/s) ptt=0:
2616          * DH1, DM3, DH3, DM5 and DH5.
2617          */
2618         phys |= BT_PHY_BR_1M_1SLOT;
2619 
2620         if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2621             phys |= BT_PHY_BR_1M_3SLOT;
2622 
2623         if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2624             phys |= BT_PHY_BR_1M_5SLOT;
2625 
2626         /* ACL logical transport (2 Mb/s) ptt=1:
2627          * 2-DH1, 2-DH3 and 2-DH5.
2628          */
2629         if (!(conn->pkt_type & HCI_2DH1))
2630             phys |= BT_PHY_EDR_2M_1SLOT;
2631 
2632         if (!(conn->pkt_type & HCI_2DH3))
2633             phys |= BT_PHY_EDR_2M_3SLOT;
2634 
2635         if (!(conn->pkt_type & HCI_2DH5))
2636             phys |= BT_PHY_EDR_2M_5SLOT;
2637 
2638         /* ACL logical transport (3 Mb/s) ptt=1:
2639          * 3-DH1, 3-DH3 and 3-DH5.
2640          */
2641         if (!(conn->pkt_type & HCI_3DH1))
2642             phys |= BT_PHY_EDR_3M_1SLOT;
2643 
2644         if (!(conn->pkt_type & HCI_3DH3))
2645             phys |= BT_PHY_EDR_3M_3SLOT;
2646 
2647         if (!(conn->pkt_type & HCI_3DH5))
2648             phys |= BT_PHY_EDR_3M_5SLOT;
2649 
2650         break;
2651 
2652     case ESCO_LINK:
2653         /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2654         phys |= BT_PHY_BR_1M_1SLOT;
2655 
2656         if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2657             phys |= BT_PHY_BR_1M_3SLOT;
2658 
2659         /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2660         if (!(conn->pkt_type & ESCO_2EV3))
2661             phys |= BT_PHY_EDR_2M_1SLOT;
2662 
2663         if (!(conn->pkt_type & ESCO_2EV5))
2664             phys |= BT_PHY_EDR_2M_3SLOT;
2665 
2666         /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2667         if (!(conn->pkt_type & ESCO_3EV3))
2668             phys |= BT_PHY_EDR_3M_1SLOT;
2669 
2670         if (!(conn->pkt_type & ESCO_3EV5))
2671             phys |= BT_PHY_EDR_3M_3SLOT;
2672 
2673         break;
2674 
2675     case LE_LINK:
2676         if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2677             phys |= BT_PHY_LE_1M_TX;
2678 
2679         if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2680             phys |= BT_PHY_LE_1M_RX;
2681 
2682         if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2683             phys |= BT_PHY_LE_2M_TX;
2684 
2685         if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2686             phys |= BT_PHY_LE_2M_RX;
2687 
2688         if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2689             phys |= BT_PHY_LE_CODED_TX;
2690 
2691         if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2692             phys |= BT_PHY_LE_CODED_RX;
2693 
2694         break;
2695     }
2696 
2697     return phys;
2698 }