0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/module.h>
0028 #include <asm/unaligned.h>
0029
0030 #include <net/bluetooth/bluetooth.h>
0031 #include <net/bluetooth/hci_core.h>
0032 #include <net/bluetooth/hci_sock.h>
0033 #include <net/bluetooth/l2cap.h>
0034 #include <net/bluetooth/mgmt.h>
0035
0036 #include "hci_request.h"
0037 #include "smp.h"
0038 #include "mgmt_util.h"
0039 #include "mgmt_config.h"
0040 #include "msft.h"
0041 #include "eir.h"
0042 #include "aosp.h"
0043
0044 #define MGMT_VERSION 1
0045 #define MGMT_REVISION 22
0046
0047 static const u16 mgmt_commands[] = {
0048 MGMT_OP_READ_INDEX_LIST,
0049 MGMT_OP_READ_INFO,
0050 MGMT_OP_SET_POWERED,
0051 MGMT_OP_SET_DISCOVERABLE,
0052 MGMT_OP_SET_CONNECTABLE,
0053 MGMT_OP_SET_FAST_CONNECTABLE,
0054 MGMT_OP_SET_BONDABLE,
0055 MGMT_OP_SET_LINK_SECURITY,
0056 MGMT_OP_SET_SSP,
0057 MGMT_OP_SET_HS,
0058 MGMT_OP_SET_LE,
0059 MGMT_OP_SET_DEV_CLASS,
0060 MGMT_OP_SET_LOCAL_NAME,
0061 MGMT_OP_ADD_UUID,
0062 MGMT_OP_REMOVE_UUID,
0063 MGMT_OP_LOAD_LINK_KEYS,
0064 MGMT_OP_LOAD_LONG_TERM_KEYS,
0065 MGMT_OP_DISCONNECT,
0066 MGMT_OP_GET_CONNECTIONS,
0067 MGMT_OP_PIN_CODE_REPLY,
0068 MGMT_OP_PIN_CODE_NEG_REPLY,
0069 MGMT_OP_SET_IO_CAPABILITY,
0070 MGMT_OP_PAIR_DEVICE,
0071 MGMT_OP_CANCEL_PAIR_DEVICE,
0072 MGMT_OP_UNPAIR_DEVICE,
0073 MGMT_OP_USER_CONFIRM_REPLY,
0074 MGMT_OP_USER_CONFIRM_NEG_REPLY,
0075 MGMT_OP_USER_PASSKEY_REPLY,
0076 MGMT_OP_USER_PASSKEY_NEG_REPLY,
0077 MGMT_OP_READ_LOCAL_OOB_DATA,
0078 MGMT_OP_ADD_REMOTE_OOB_DATA,
0079 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
0080 MGMT_OP_START_DISCOVERY,
0081 MGMT_OP_STOP_DISCOVERY,
0082 MGMT_OP_CONFIRM_NAME,
0083 MGMT_OP_BLOCK_DEVICE,
0084 MGMT_OP_UNBLOCK_DEVICE,
0085 MGMT_OP_SET_DEVICE_ID,
0086 MGMT_OP_SET_ADVERTISING,
0087 MGMT_OP_SET_BREDR,
0088 MGMT_OP_SET_STATIC_ADDRESS,
0089 MGMT_OP_SET_SCAN_PARAMS,
0090 MGMT_OP_SET_SECURE_CONN,
0091 MGMT_OP_SET_DEBUG_KEYS,
0092 MGMT_OP_SET_PRIVACY,
0093 MGMT_OP_LOAD_IRKS,
0094 MGMT_OP_GET_CONN_INFO,
0095 MGMT_OP_GET_CLOCK_INFO,
0096 MGMT_OP_ADD_DEVICE,
0097 MGMT_OP_REMOVE_DEVICE,
0098 MGMT_OP_LOAD_CONN_PARAM,
0099 MGMT_OP_READ_UNCONF_INDEX_LIST,
0100 MGMT_OP_READ_CONFIG_INFO,
0101 MGMT_OP_SET_EXTERNAL_CONFIG,
0102 MGMT_OP_SET_PUBLIC_ADDRESS,
0103 MGMT_OP_START_SERVICE_DISCOVERY,
0104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
0105 MGMT_OP_READ_EXT_INDEX_LIST,
0106 MGMT_OP_READ_ADV_FEATURES,
0107 MGMT_OP_ADD_ADVERTISING,
0108 MGMT_OP_REMOVE_ADVERTISING,
0109 MGMT_OP_GET_ADV_SIZE_INFO,
0110 MGMT_OP_START_LIMITED_DISCOVERY,
0111 MGMT_OP_READ_EXT_INFO,
0112 MGMT_OP_SET_APPEARANCE,
0113 MGMT_OP_GET_PHY_CONFIGURATION,
0114 MGMT_OP_SET_PHY_CONFIGURATION,
0115 MGMT_OP_SET_BLOCKED_KEYS,
0116 MGMT_OP_SET_WIDEBAND_SPEECH,
0117 MGMT_OP_READ_CONTROLLER_CAP,
0118 MGMT_OP_READ_EXP_FEATURES_INFO,
0119 MGMT_OP_SET_EXP_FEATURE,
0120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
0121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
0122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
0123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
0124 MGMT_OP_GET_DEVICE_FLAGS,
0125 MGMT_OP_SET_DEVICE_FLAGS,
0126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
0127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
0128 MGMT_OP_REMOVE_ADV_MONITOR,
0129 MGMT_OP_ADD_EXT_ADV_PARAMS,
0130 MGMT_OP_ADD_EXT_ADV_DATA,
0131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
0132 };
0133
0134 static const u16 mgmt_events[] = {
0135 MGMT_EV_CONTROLLER_ERROR,
0136 MGMT_EV_INDEX_ADDED,
0137 MGMT_EV_INDEX_REMOVED,
0138 MGMT_EV_NEW_SETTINGS,
0139 MGMT_EV_CLASS_OF_DEV_CHANGED,
0140 MGMT_EV_LOCAL_NAME_CHANGED,
0141 MGMT_EV_NEW_LINK_KEY,
0142 MGMT_EV_NEW_LONG_TERM_KEY,
0143 MGMT_EV_DEVICE_CONNECTED,
0144 MGMT_EV_DEVICE_DISCONNECTED,
0145 MGMT_EV_CONNECT_FAILED,
0146 MGMT_EV_PIN_CODE_REQUEST,
0147 MGMT_EV_USER_CONFIRM_REQUEST,
0148 MGMT_EV_USER_PASSKEY_REQUEST,
0149 MGMT_EV_AUTH_FAILED,
0150 MGMT_EV_DEVICE_FOUND,
0151 MGMT_EV_DISCOVERING,
0152 MGMT_EV_DEVICE_BLOCKED,
0153 MGMT_EV_DEVICE_UNBLOCKED,
0154 MGMT_EV_DEVICE_UNPAIRED,
0155 MGMT_EV_PASSKEY_NOTIFY,
0156 MGMT_EV_NEW_IRK,
0157 MGMT_EV_NEW_CSRK,
0158 MGMT_EV_DEVICE_ADDED,
0159 MGMT_EV_DEVICE_REMOVED,
0160 MGMT_EV_NEW_CONN_PARAM,
0161 MGMT_EV_UNCONF_INDEX_ADDED,
0162 MGMT_EV_UNCONF_INDEX_REMOVED,
0163 MGMT_EV_NEW_CONFIG_OPTIONS,
0164 MGMT_EV_EXT_INDEX_ADDED,
0165 MGMT_EV_EXT_INDEX_REMOVED,
0166 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
0167 MGMT_EV_ADVERTISING_ADDED,
0168 MGMT_EV_ADVERTISING_REMOVED,
0169 MGMT_EV_EXT_INFO_CHANGED,
0170 MGMT_EV_PHY_CONFIGURATION_CHANGED,
0171 MGMT_EV_EXP_FEATURE_CHANGED,
0172 MGMT_EV_DEVICE_FLAGS_CHANGED,
0173 MGMT_EV_ADV_MONITOR_ADDED,
0174 MGMT_EV_ADV_MONITOR_REMOVED,
0175 MGMT_EV_CONTROLLER_SUSPEND,
0176 MGMT_EV_CONTROLLER_RESUME,
0177 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
0178 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
0179 };
0180
0181 static const u16 mgmt_untrusted_commands[] = {
0182 MGMT_OP_READ_INDEX_LIST,
0183 MGMT_OP_READ_INFO,
0184 MGMT_OP_READ_UNCONF_INDEX_LIST,
0185 MGMT_OP_READ_CONFIG_INFO,
0186 MGMT_OP_READ_EXT_INDEX_LIST,
0187 MGMT_OP_READ_EXT_INFO,
0188 MGMT_OP_READ_CONTROLLER_CAP,
0189 MGMT_OP_READ_EXP_FEATURES_INFO,
0190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
0191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
0192 };
0193
0194 static const u16 mgmt_untrusted_events[] = {
0195 MGMT_EV_INDEX_ADDED,
0196 MGMT_EV_INDEX_REMOVED,
0197 MGMT_EV_NEW_SETTINGS,
0198 MGMT_EV_CLASS_OF_DEV_CHANGED,
0199 MGMT_EV_LOCAL_NAME_CHANGED,
0200 MGMT_EV_UNCONF_INDEX_ADDED,
0201 MGMT_EV_UNCONF_INDEX_REMOVED,
0202 MGMT_EV_NEW_CONFIG_OPTIONS,
0203 MGMT_EV_EXT_INDEX_ADDED,
0204 MGMT_EV_EXT_INDEX_REMOVED,
0205 MGMT_EV_EXT_INFO_CHANGED,
0206 MGMT_EV_EXP_FEATURE_CHANGED,
0207 };
0208
0209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
0210
0211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
0212 "\x00\x00\x00\x00\x00\x00\x00\x00"
0213
0214
0215 static const u8 mgmt_status_table[] = {
0216 MGMT_STATUS_SUCCESS,
0217 MGMT_STATUS_UNKNOWN_COMMAND,
0218 MGMT_STATUS_NOT_CONNECTED,
0219 MGMT_STATUS_FAILED,
0220 MGMT_STATUS_CONNECT_FAILED,
0221 MGMT_STATUS_AUTH_FAILED,
0222 MGMT_STATUS_AUTH_FAILED,
0223 MGMT_STATUS_NO_RESOURCES,
0224 MGMT_STATUS_TIMEOUT,
0225 MGMT_STATUS_NO_RESOURCES,
0226 MGMT_STATUS_NO_RESOURCES,
0227 MGMT_STATUS_ALREADY_CONNECTED,
0228 MGMT_STATUS_BUSY,
0229 MGMT_STATUS_NO_RESOURCES,
0230 MGMT_STATUS_REJECTED,
0231 MGMT_STATUS_REJECTED,
0232 MGMT_STATUS_TIMEOUT,
0233 MGMT_STATUS_NOT_SUPPORTED,
0234 MGMT_STATUS_INVALID_PARAMS,
0235 MGMT_STATUS_DISCONNECTED,
0236 MGMT_STATUS_NO_RESOURCES,
0237 MGMT_STATUS_DISCONNECTED,
0238 MGMT_STATUS_DISCONNECTED,
0239 MGMT_STATUS_BUSY,
0240 MGMT_STATUS_REJECTED,
0241 MGMT_STATUS_FAILED,
0242 MGMT_STATUS_NOT_SUPPORTED,
0243 MGMT_STATUS_REJECTED,
0244 MGMT_STATUS_REJECTED,
0245 MGMT_STATUS_REJECTED,
0246 MGMT_STATUS_INVALID_PARAMS,
0247 MGMT_STATUS_FAILED,
0248 MGMT_STATUS_NOT_SUPPORTED,
0249 MGMT_STATUS_FAILED,
0250 MGMT_STATUS_TIMEOUT,
0251 MGMT_STATUS_FAILED,
0252 MGMT_STATUS_FAILED,
0253 MGMT_STATUS_REJECTED,
0254 MGMT_STATUS_FAILED,
0255 MGMT_STATUS_NOT_SUPPORTED,
0256 MGMT_STATUS_TIMEOUT,
0257 MGMT_STATUS_NOT_SUPPORTED,
0258 MGMT_STATUS_FAILED,
0259 MGMT_STATUS_FAILED,
0260 MGMT_STATUS_INVALID_PARAMS,
0261 MGMT_STATUS_REJECTED,
0262 MGMT_STATUS_NOT_SUPPORTED,
0263 MGMT_STATUS_REJECTED,
0264 MGMT_STATUS_INVALID_PARAMS,
0265 MGMT_STATUS_FAILED,
0266 MGMT_STATUS_BUSY,
0267 MGMT_STATUS_FAILED,
0268 MGMT_STATUS_FAILED,
0269 MGMT_STATUS_FAILED,
0270 MGMT_STATUS_INVALID_PARAMS,
0271 MGMT_STATUS_NOT_SUPPORTED,
0272 MGMT_STATUS_BUSY,
0273 MGMT_STATUS_REJECTED,
0274 MGMT_STATUS_BUSY,
0275 MGMT_STATUS_INVALID_PARAMS,
0276 MGMT_STATUS_TIMEOUT,
0277 MGMT_STATUS_AUTH_FAILED,
0278 MGMT_STATUS_CONNECT_FAILED,
0279 MGMT_STATUS_CONNECT_FAILED,
0280 };
0281
0282 static u8 mgmt_errno_status(int err)
0283 {
0284 switch (err) {
0285 case 0:
0286 return MGMT_STATUS_SUCCESS;
0287 case -EPERM:
0288 return MGMT_STATUS_REJECTED;
0289 case -EINVAL:
0290 return MGMT_STATUS_INVALID_PARAMS;
0291 case -EOPNOTSUPP:
0292 return MGMT_STATUS_NOT_SUPPORTED;
0293 case -EBUSY:
0294 return MGMT_STATUS_BUSY;
0295 case -ETIMEDOUT:
0296 return MGMT_STATUS_AUTH_FAILED;
0297 case -ENOMEM:
0298 return MGMT_STATUS_NO_RESOURCES;
0299 case -EISCONN:
0300 return MGMT_STATUS_ALREADY_CONNECTED;
0301 case -ENOTCONN:
0302 return MGMT_STATUS_DISCONNECTED;
0303 }
0304
0305 return MGMT_STATUS_FAILED;
0306 }
0307
0308 static u8 mgmt_status(int err)
0309 {
0310 if (err < 0)
0311 return mgmt_errno_status(err);
0312
0313 if (err < ARRAY_SIZE(mgmt_status_table))
0314 return mgmt_status_table[err];
0315
0316 return MGMT_STATUS_FAILED;
0317 }
0318
0319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
0320 u16 len, int flag)
0321 {
0322 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
0323 flag, NULL);
0324 }
0325
0326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
0327 u16 len, int flag, struct sock *skip_sk)
0328 {
0329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
0330 flag, skip_sk);
0331 }
0332
0333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
0334 struct sock *skip_sk)
0335 {
0336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
0337 HCI_SOCK_TRUSTED, skip_sk);
0338 }
0339
0340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
0341 {
0342 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
0343 skip_sk);
0344 }
0345
0346 static u8 le_addr_type(u8 mgmt_addr_type)
0347 {
0348 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
0349 return ADDR_LE_DEV_PUBLIC;
0350 else
0351 return ADDR_LE_DEV_RANDOM;
0352 }
0353
0354 void mgmt_fill_version_info(void *ver)
0355 {
0356 struct mgmt_rp_read_version *rp = ver;
0357
0358 rp->version = MGMT_VERSION;
0359 rp->revision = cpu_to_le16(MGMT_REVISION);
0360 }
0361
0362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
0363 u16 data_len)
0364 {
0365 struct mgmt_rp_read_version rp;
0366
0367 bt_dev_dbg(hdev, "sock %p", sk);
0368
0369 mgmt_fill_version_info(&rp);
0370
0371 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
0372 &rp, sizeof(rp));
0373 }
0374
0375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
0376 u16 data_len)
0377 {
0378 struct mgmt_rp_read_commands *rp;
0379 u16 num_commands, num_events;
0380 size_t rp_size;
0381 int i, err;
0382
0383 bt_dev_dbg(hdev, "sock %p", sk);
0384
0385 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
0386 num_commands = ARRAY_SIZE(mgmt_commands);
0387 num_events = ARRAY_SIZE(mgmt_events);
0388 } else {
0389 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
0390 num_events = ARRAY_SIZE(mgmt_untrusted_events);
0391 }
0392
0393 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
0394
0395 rp = kmalloc(rp_size, GFP_KERNEL);
0396 if (!rp)
0397 return -ENOMEM;
0398
0399 rp->num_commands = cpu_to_le16(num_commands);
0400 rp->num_events = cpu_to_le16(num_events);
0401
0402 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
0403 __le16 *opcode = rp->opcodes;
0404
0405 for (i = 0; i < num_commands; i++, opcode++)
0406 put_unaligned_le16(mgmt_commands[i], opcode);
0407
0408 for (i = 0; i < num_events; i++, opcode++)
0409 put_unaligned_le16(mgmt_events[i], opcode);
0410 } else {
0411 __le16 *opcode = rp->opcodes;
0412
0413 for (i = 0; i < num_commands; i++, opcode++)
0414 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
0415
0416 for (i = 0; i < num_events; i++, opcode++)
0417 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
0418 }
0419
0420 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
0421 rp, rp_size);
0422 kfree(rp);
0423
0424 return err;
0425 }
0426
0427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
0428 u16 data_len)
0429 {
0430 struct mgmt_rp_read_index_list *rp;
0431 struct hci_dev *d;
0432 size_t rp_len;
0433 u16 count;
0434 int err;
0435
0436 bt_dev_dbg(hdev, "sock %p", sk);
0437
0438 read_lock(&hci_dev_list_lock);
0439
0440 count = 0;
0441 list_for_each_entry(d, &hci_dev_list, list) {
0442 if (d->dev_type == HCI_PRIMARY &&
0443 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
0444 count++;
0445 }
0446
0447 rp_len = sizeof(*rp) + (2 * count);
0448 rp = kmalloc(rp_len, GFP_ATOMIC);
0449 if (!rp) {
0450 read_unlock(&hci_dev_list_lock);
0451 return -ENOMEM;
0452 }
0453
0454 count = 0;
0455 list_for_each_entry(d, &hci_dev_list, list) {
0456 if (hci_dev_test_flag(d, HCI_SETUP) ||
0457 hci_dev_test_flag(d, HCI_CONFIG) ||
0458 hci_dev_test_flag(d, HCI_USER_CHANNEL))
0459 continue;
0460
0461
0462
0463
0464 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
0465 continue;
0466
0467 if (d->dev_type == HCI_PRIMARY &&
0468 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
0469 rp->index[count++] = cpu_to_le16(d->id);
0470 bt_dev_dbg(hdev, "Added hci%u", d->id);
0471 }
0472 }
0473
0474 rp->num_controllers = cpu_to_le16(count);
0475 rp_len = sizeof(*rp) + (2 * count);
0476
0477 read_unlock(&hci_dev_list_lock);
0478
0479 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
0480 0, rp, rp_len);
0481
0482 kfree(rp);
0483
0484 return err;
0485 }
0486
0487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
0488 void *data, u16 data_len)
0489 {
0490 struct mgmt_rp_read_unconf_index_list *rp;
0491 struct hci_dev *d;
0492 size_t rp_len;
0493 u16 count;
0494 int err;
0495
0496 bt_dev_dbg(hdev, "sock %p", sk);
0497
0498 read_lock(&hci_dev_list_lock);
0499
0500 count = 0;
0501 list_for_each_entry(d, &hci_dev_list, list) {
0502 if (d->dev_type == HCI_PRIMARY &&
0503 hci_dev_test_flag(d, HCI_UNCONFIGURED))
0504 count++;
0505 }
0506
0507 rp_len = sizeof(*rp) + (2 * count);
0508 rp = kmalloc(rp_len, GFP_ATOMIC);
0509 if (!rp) {
0510 read_unlock(&hci_dev_list_lock);
0511 return -ENOMEM;
0512 }
0513
0514 count = 0;
0515 list_for_each_entry(d, &hci_dev_list, list) {
0516 if (hci_dev_test_flag(d, HCI_SETUP) ||
0517 hci_dev_test_flag(d, HCI_CONFIG) ||
0518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
0519 continue;
0520
0521
0522
0523
0524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
0525 continue;
0526
0527 if (d->dev_type == HCI_PRIMARY &&
0528 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
0529 rp->index[count++] = cpu_to_le16(d->id);
0530 bt_dev_dbg(hdev, "Added hci%u", d->id);
0531 }
0532 }
0533
0534 rp->num_controllers = cpu_to_le16(count);
0535 rp_len = sizeof(*rp) + (2 * count);
0536
0537 read_unlock(&hci_dev_list_lock);
0538
0539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
0540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
0541
0542 kfree(rp);
0543
0544 return err;
0545 }
0546
0547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
0548 void *data, u16 data_len)
0549 {
0550 struct mgmt_rp_read_ext_index_list *rp;
0551 struct hci_dev *d;
0552 u16 count;
0553 int err;
0554
0555 bt_dev_dbg(hdev, "sock %p", sk);
0556
0557 read_lock(&hci_dev_list_lock);
0558
0559 count = 0;
0560 list_for_each_entry(d, &hci_dev_list, list) {
0561 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
0562 count++;
0563 }
0564
0565 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
0566 if (!rp) {
0567 read_unlock(&hci_dev_list_lock);
0568 return -ENOMEM;
0569 }
0570
0571 count = 0;
0572 list_for_each_entry(d, &hci_dev_list, list) {
0573 if (hci_dev_test_flag(d, HCI_SETUP) ||
0574 hci_dev_test_flag(d, HCI_CONFIG) ||
0575 hci_dev_test_flag(d, HCI_USER_CHANNEL))
0576 continue;
0577
0578
0579
0580
0581 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
0582 continue;
0583
0584 if (d->dev_type == HCI_PRIMARY) {
0585 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
0586 rp->entry[count].type = 0x01;
0587 else
0588 rp->entry[count].type = 0x00;
0589 } else if (d->dev_type == HCI_AMP) {
0590 rp->entry[count].type = 0x02;
0591 } else {
0592 continue;
0593 }
0594
0595 rp->entry[count].bus = d->bus;
0596 rp->entry[count++].index = cpu_to_le16(d->id);
0597 bt_dev_dbg(hdev, "Added hci%u", d->id);
0598 }
0599
0600 rp->num_controllers = cpu_to_le16(count);
0601
0602 read_unlock(&hci_dev_list_lock);
0603
0604
0605
0606
0607
0608 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
0609 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
0610 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
0611
0612 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
0613 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
0614 struct_size(rp, entry, count));
0615
0616 kfree(rp);
0617
0618 return err;
0619 }
0620
0621 static bool is_configured(struct hci_dev *hdev)
0622 {
0623 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
0624 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
0625 return false;
0626
0627 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
0628 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
0629 !bacmp(&hdev->public_addr, BDADDR_ANY))
0630 return false;
0631
0632 return true;
0633 }
0634
0635 static __le32 get_missing_options(struct hci_dev *hdev)
0636 {
0637 u32 options = 0;
0638
0639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
0640 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
0641 options |= MGMT_OPTION_EXTERNAL_CONFIG;
0642
0643 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
0644 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
0645 !bacmp(&hdev->public_addr, BDADDR_ANY))
0646 options |= MGMT_OPTION_PUBLIC_ADDRESS;
0647
0648 return cpu_to_le32(options);
0649 }
0650
0651 static int new_options(struct hci_dev *hdev, struct sock *skip)
0652 {
0653 __le32 options = get_missing_options(hdev);
0654
0655 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
0656 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
0657 }
0658
0659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
0660 {
0661 __le32 options = get_missing_options(hdev);
0662
0663 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
0664 sizeof(options));
0665 }
0666
0667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
0668 void *data, u16 data_len)
0669 {
0670 struct mgmt_rp_read_config_info rp;
0671 u32 options = 0;
0672
0673 bt_dev_dbg(hdev, "sock %p", sk);
0674
0675 hci_dev_lock(hdev);
0676
0677 memset(&rp, 0, sizeof(rp));
0678 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
0679
0680 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
0681 options |= MGMT_OPTION_EXTERNAL_CONFIG;
0682
0683 if (hdev->set_bdaddr)
0684 options |= MGMT_OPTION_PUBLIC_ADDRESS;
0685
0686 rp.supported_options = cpu_to_le32(options);
0687 rp.missing_options = get_missing_options(hdev);
0688
0689 hci_dev_unlock(hdev);
0690
0691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
0692 &rp, sizeof(rp));
0693 }
0694
0695 static u32 get_supported_phys(struct hci_dev *hdev)
0696 {
0697 u32 supported_phys = 0;
0698
0699 if (lmp_bredr_capable(hdev)) {
0700 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
0701
0702 if (hdev->features[0][0] & LMP_3SLOT)
0703 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
0704
0705 if (hdev->features[0][0] & LMP_5SLOT)
0706 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
0707
0708 if (lmp_edr_2m_capable(hdev)) {
0709 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
0710
0711 if (lmp_edr_3slot_capable(hdev))
0712 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
0713
0714 if (lmp_edr_5slot_capable(hdev))
0715 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
0716
0717 if (lmp_edr_3m_capable(hdev)) {
0718 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
0719
0720 if (lmp_edr_3slot_capable(hdev))
0721 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
0722
0723 if (lmp_edr_5slot_capable(hdev))
0724 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
0725 }
0726 }
0727 }
0728
0729 if (lmp_le_capable(hdev)) {
0730 supported_phys |= MGMT_PHY_LE_1M_TX;
0731 supported_phys |= MGMT_PHY_LE_1M_RX;
0732
0733 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
0734 supported_phys |= MGMT_PHY_LE_2M_TX;
0735 supported_phys |= MGMT_PHY_LE_2M_RX;
0736 }
0737
0738 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
0739 supported_phys |= MGMT_PHY_LE_CODED_TX;
0740 supported_phys |= MGMT_PHY_LE_CODED_RX;
0741 }
0742 }
0743
0744 return supported_phys;
0745 }
0746
0747 static u32 get_selected_phys(struct hci_dev *hdev)
0748 {
0749 u32 selected_phys = 0;
0750
0751 if (lmp_bredr_capable(hdev)) {
0752 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
0753
0754 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
0755 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
0756
0757 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
0758 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
0759
0760 if (lmp_edr_2m_capable(hdev)) {
0761 if (!(hdev->pkt_type & HCI_2DH1))
0762 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
0763
0764 if (lmp_edr_3slot_capable(hdev) &&
0765 !(hdev->pkt_type & HCI_2DH3))
0766 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
0767
0768 if (lmp_edr_5slot_capable(hdev) &&
0769 !(hdev->pkt_type & HCI_2DH5))
0770 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
0771
0772 if (lmp_edr_3m_capable(hdev)) {
0773 if (!(hdev->pkt_type & HCI_3DH1))
0774 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
0775
0776 if (lmp_edr_3slot_capable(hdev) &&
0777 !(hdev->pkt_type & HCI_3DH3))
0778 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
0779
0780 if (lmp_edr_5slot_capable(hdev) &&
0781 !(hdev->pkt_type & HCI_3DH5))
0782 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
0783 }
0784 }
0785 }
0786
0787 if (lmp_le_capable(hdev)) {
0788 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
0789 selected_phys |= MGMT_PHY_LE_1M_TX;
0790
0791 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
0792 selected_phys |= MGMT_PHY_LE_1M_RX;
0793
0794 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
0795 selected_phys |= MGMT_PHY_LE_2M_TX;
0796
0797 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
0798 selected_phys |= MGMT_PHY_LE_2M_RX;
0799
0800 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
0801 selected_phys |= MGMT_PHY_LE_CODED_TX;
0802
0803 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
0804 selected_phys |= MGMT_PHY_LE_CODED_RX;
0805 }
0806
0807 return selected_phys;
0808 }
0809
0810 static u32 get_configurable_phys(struct hci_dev *hdev)
0811 {
0812 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
0813 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
0814 }
0815
0816 static u32 get_supported_settings(struct hci_dev *hdev)
0817 {
0818 u32 settings = 0;
0819
0820 settings |= MGMT_SETTING_POWERED;
0821 settings |= MGMT_SETTING_BONDABLE;
0822 settings |= MGMT_SETTING_DEBUG_KEYS;
0823 settings |= MGMT_SETTING_CONNECTABLE;
0824 settings |= MGMT_SETTING_DISCOVERABLE;
0825
0826 if (lmp_bredr_capable(hdev)) {
0827 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
0828 settings |= MGMT_SETTING_FAST_CONNECTABLE;
0829 settings |= MGMT_SETTING_BREDR;
0830 settings |= MGMT_SETTING_LINK_SECURITY;
0831
0832 if (lmp_ssp_capable(hdev)) {
0833 settings |= MGMT_SETTING_SSP;
0834 if (IS_ENABLED(CONFIG_BT_HS))
0835 settings |= MGMT_SETTING_HS;
0836 }
0837
0838 if (lmp_sc_capable(hdev))
0839 settings |= MGMT_SETTING_SECURE_CONN;
0840
0841 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
0842 &hdev->quirks))
0843 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
0844 }
0845
0846 if (lmp_le_capable(hdev)) {
0847 settings |= MGMT_SETTING_LE;
0848 settings |= MGMT_SETTING_SECURE_CONN;
0849 settings |= MGMT_SETTING_PRIVACY;
0850 settings |= MGMT_SETTING_STATIC_ADDRESS;
0851 settings |= MGMT_SETTING_ADVERTISING;
0852 }
0853
0854 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
0855 hdev->set_bdaddr)
0856 settings |= MGMT_SETTING_CONFIGURATION;
0857
0858 settings |= MGMT_SETTING_PHY_CONFIGURATION;
0859
0860 return settings;
0861 }
0862
0863 static u32 get_current_settings(struct hci_dev *hdev)
0864 {
0865 u32 settings = 0;
0866
0867 if (hdev_is_powered(hdev))
0868 settings |= MGMT_SETTING_POWERED;
0869
0870 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
0871 settings |= MGMT_SETTING_CONNECTABLE;
0872
0873 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
0874 settings |= MGMT_SETTING_FAST_CONNECTABLE;
0875
0876 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
0877 settings |= MGMT_SETTING_DISCOVERABLE;
0878
0879 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
0880 settings |= MGMT_SETTING_BONDABLE;
0881
0882 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
0883 settings |= MGMT_SETTING_BREDR;
0884
0885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
0886 settings |= MGMT_SETTING_LE;
0887
0888 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
0889 settings |= MGMT_SETTING_LINK_SECURITY;
0890
0891 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
0892 settings |= MGMT_SETTING_SSP;
0893
0894 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
0895 settings |= MGMT_SETTING_HS;
0896
0897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
0898 settings |= MGMT_SETTING_ADVERTISING;
0899
0900 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
0901 settings |= MGMT_SETTING_SECURE_CONN;
0902
0903 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
0904 settings |= MGMT_SETTING_DEBUG_KEYS;
0905
0906 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
0907 settings |= MGMT_SETTING_PRIVACY;
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
0922 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
0923 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
0924 if (bacmp(&hdev->static_addr, BDADDR_ANY))
0925 settings |= MGMT_SETTING_STATIC_ADDRESS;
0926 }
0927
0928 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
0929 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
0930
0931 return settings;
0932 }
0933
0934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
0935 {
0936 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
0937 }
0938
0939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
0940 {
0941 struct mgmt_pending_cmd *cmd;
0942
0943
0944
0945
0946 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
0947 if (cmd) {
0948 struct mgmt_mode *cp = cmd->param;
0949 if (cp->val == 0x01)
0950 return LE_AD_GENERAL;
0951 else if (cp->val == 0x02)
0952 return LE_AD_LIMITED;
0953 } else {
0954 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
0955 return LE_AD_LIMITED;
0956 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
0957 return LE_AD_GENERAL;
0958 }
0959
0960 return 0;
0961 }
0962
0963 bool mgmt_get_connectable(struct hci_dev *hdev)
0964 {
0965 struct mgmt_pending_cmd *cmd;
0966
0967
0968
0969
0970 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
0971 if (cmd) {
0972 struct mgmt_mode *cp = cmd->param;
0973
0974 return cp->val;
0975 }
0976
0977 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
0978 }
0979
0980 static int service_cache_sync(struct hci_dev *hdev, void *data)
0981 {
0982 hci_update_eir_sync(hdev);
0983 hci_update_class_sync(hdev);
0984
0985 return 0;
0986 }
0987
0988 static void service_cache_off(struct work_struct *work)
0989 {
0990 struct hci_dev *hdev = container_of(work, struct hci_dev,
0991 service_cache.work);
0992
0993 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
0994 return;
0995
0996 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
0997 }
0998
0999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001
1002
1003
1004
1005 if (ext_adv_capable(hdev))
1006 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 else
1008 return hci_enable_advertising_sync(hdev);
1009 }
1010
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 rpa_expired.work);
1015
1016 bt_dev_dbg(hdev, "");
1017
1018 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019
1020 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 return;
1022
1023 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025
1026 static void discov_off(struct work_struct *work)
1027 {
1028 struct hci_dev *hdev = container_of(work, struct hci_dev,
1029 discov_off.work);
1030
1031 bt_dev_dbg(hdev, "");
1032
1033 hci_dev_lock(hdev);
1034
1035
1036
1037
1038
1039
1040 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1041 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1042 hdev->discov_timeout = 0;
1043
1044 hci_update_discoverable(hdev);
1045
1046 mgmt_new_settings(hdev);
1047
1048 hci_dev_unlock(hdev);
1049 }
1050
1051 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1052 {
1053 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1054 return;
1055
1056 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1057
1058 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1059 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1060 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1061
1062
1063
1064
1065
1066
1067 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1068 }
1069
1070 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1071 void *data, u16 data_len)
1072 {
1073 struct mgmt_rp_read_info rp;
1074
1075 bt_dev_dbg(hdev, "sock %p", sk);
1076
1077 hci_dev_lock(hdev);
1078
1079 memset(&rp, 0, sizeof(rp));
1080
1081 bacpy(&rp.bdaddr, &hdev->bdaddr);
1082
1083 rp.version = hdev->hci_ver;
1084 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1085
1086 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1087 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1088
1089 memcpy(rp.dev_class, hdev->dev_class, 3);
1090
1091 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1092 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1093
1094 hci_dev_unlock(hdev);
1095
1096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1097 sizeof(rp));
1098 }
1099
1100 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1101 {
1102 u16 eir_len = 0;
1103 size_t name_len;
1104
1105 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1107 hdev->dev_class, 3);
1108
1109 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1110 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1111 hdev->appearance);
1112
1113 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1114 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1115 hdev->dev_name, name_len);
1116
1117 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1118 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1119 hdev->short_name, name_len);
1120
1121 return eir_len;
1122 }
1123
1124 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1125 void *data, u16 data_len)
1126 {
1127 char buf[512];
1128 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1129 u16 eir_len;
1130
1131 bt_dev_dbg(hdev, "sock %p", sk);
1132
1133 memset(&buf, 0, sizeof(buf));
1134
1135 hci_dev_lock(hdev);
1136
1137 bacpy(&rp->bdaddr, &hdev->bdaddr);
1138
1139 rp->version = hdev->hci_ver;
1140 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1141
1142 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1143 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1144
1145
1146 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1147 rp->eir_len = cpu_to_le16(eir_len);
1148
1149 hci_dev_unlock(hdev);
1150
1151
1152
1153
1154
1155
1156 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1157 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1158 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1159
1160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1161 sizeof(*rp) + eir_len);
1162 }
1163
1164 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1165 {
1166 char buf[512];
1167 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1168 u16 eir_len;
1169
1170 memset(buf, 0, sizeof(buf));
1171
1172 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1173 ev->eir_len = cpu_to_le16(eir_len);
1174
1175 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1176 sizeof(*ev) + eir_len,
1177 HCI_MGMT_EXT_INFO_EVENTS, skip);
1178 }
1179
1180 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1181 {
1182 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1183
1184 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1185 sizeof(settings));
1186 }
1187
1188 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1189 {
1190 struct mgmt_ev_advertising_added ev;
1191
1192 ev.instance = instance;
1193
1194 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1195 }
1196
1197 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1198 u8 instance)
1199 {
1200 struct mgmt_ev_advertising_removed ev;
1201
1202 ev.instance = instance;
1203
1204 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1205 }
1206
1207 static void cancel_adv_timeout(struct hci_dev *hdev)
1208 {
1209 if (hdev->adv_instance_timeout) {
1210 hdev->adv_instance_timeout = 0;
1211 cancel_delayed_work(&hdev->adv_instance_expire);
1212 }
1213 }
1214
1215
1216 static void restart_le_actions(struct hci_dev *hdev)
1217 {
1218 struct hci_conn_params *p;
1219
1220 list_for_each_entry(p, &hdev->le_conn_params, list) {
1221
1222
1223
1224 list_del_init(&p->action);
1225
1226 switch (p->auto_connect) {
1227 case HCI_AUTO_CONN_DIRECT:
1228 case HCI_AUTO_CONN_ALWAYS:
1229 list_add(&p->action, &hdev->pend_le_conns);
1230 break;
1231 case HCI_AUTO_CONN_REPORT:
1232 list_add(&p->action, &hdev->pend_le_reports);
1233 break;
1234 default:
1235 break;
1236 }
1237 }
1238 }
1239
1240 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1243
1244 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1245 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1246 }
1247
1248 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1249 {
1250 struct mgmt_pending_cmd *cmd = data;
1251 struct mgmt_mode *cp;
1252
1253
1254 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1255 return;
1256
1257 cp = cmd->param;
1258
1259 bt_dev_dbg(hdev, "err %d", err);
1260
1261 if (!err) {
1262 if (cp->val) {
1263 hci_dev_lock(hdev);
1264 restart_le_actions(hdev);
1265 hci_update_passive_scan(hdev);
1266 hci_dev_unlock(hdev);
1267 }
1268
1269 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1270
1271
1272
1273
1274 if (cp->val)
1275 new_settings(hdev, cmd->sk);
1276 } else {
1277 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1278 mgmt_status(err));
1279 }
1280
1281 mgmt_pending_remove(cmd);
1282 }
1283
1284 static int set_powered_sync(struct hci_dev *hdev, void *data)
1285 {
1286 struct mgmt_pending_cmd *cmd = data;
1287 struct mgmt_mode *cp = cmd->param;
1288
1289 BT_DBG("%s", hdev->name);
1290
1291 return hci_set_powered_sync(hdev, cp->val);
1292 }
1293
1294 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1295 u16 len)
1296 {
1297 struct mgmt_mode *cp = data;
1298 struct mgmt_pending_cmd *cmd;
1299 int err;
1300
1301 bt_dev_dbg(hdev, "sock %p", sk);
1302
1303 if (cp->val != 0x00 && cp->val != 0x01)
1304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1305 MGMT_STATUS_INVALID_PARAMS);
1306
1307 hci_dev_lock(hdev);
1308
1309 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1310 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1311 MGMT_STATUS_BUSY);
1312 goto failed;
1313 }
1314
1315 if (!!cp->val == hdev_is_powered(hdev)) {
1316 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1317 goto failed;
1318 }
1319
1320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1321 if (!cmd) {
1322 err = -ENOMEM;
1323 goto failed;
1324 }
1325
1326 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1327 mgmt_set_powered_complete);
1328
1329 if (err < 0)
1330 mgmt_pending_remove(cmd);
1331
1332 failed:
1333 hci_dev_unlock(hdev);
1334 return err;
1335 }
1336
1337 int mgmt_new_settings(struct hci_dev *hdev)
1338 {
1339 return new_settings(hdev, NULL);
1340 }
1341
1342 struct cmd_lookup {
1343 struct sock *sk;
1344 struct hci_dev *hdev;
1345 u8 mgmt_status;
1346 };
1347
1348 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1349 {
1350 struct cmd_lookup *match = data;
1351
1352 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1353
1354 list_del(&cmd->list);
1355
1356 if (match->sk == NULL) {
1357 match->sk = cmd->sk;
1358 sock_hold(match->sk);
1359 }
1360
1361 mgmt_pending_free(cmd);
1362 }
1363
1364 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1365 {
1366 u8 *status = data;
1367
1368 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1369 mgmt_pending_remove(cmd);
1370 }
1371
1372 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1373 {
1374 if (cmd->cmd_complete) {
1375 u8 *status = data;
1376
1377 cmd->cmd_complete(cmd, *status);
1378 mgmt_pending_remove(cmd);
1379
1380 return;
1381 }
1382
1383 cmd_status_rsp(cmd, data);
1384 }
1385
1386 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1387 {
1388 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1389 cmd->param, cmd->param_len);
1390 }
1391
1392 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1393 {
1394 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1395 cmd->param, sizeof(struct mgmt_addr_info));
1396 }
1397
1398 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1399 {
1400 if (!lmp_bredr_capable(hdev))
1401 return MGMT_STATUS_NOT_SUPPORTED;
1402 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1403 return MGMT_STATUS_REJECTED;
1404 else
1405 return MGMT_STATUS_SUCCESS;
1406 }
1407
1408 static u8 mgmt_le_support(struct hci_dev *hdev)
1409 {
1410 if (!lmp_le_capable(hdev))
1411 return MGMT_STATUS_NOT_SUPPORTED;
1412 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1413 return MGMT_STATUS_REJECTED;
1414 else
1415 return MGMT_STATUS_SUCCESS;
1416 }
1417
1418 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1419 int err)
1420 {
1421 struct mgmt_pending_cmd *cmd = data;
1422
1423 bt_dev_dbg(hdev, "err %d", err);
1424
1425
1426 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1427 return;
1428
1429 hci_dev_lock(hdev);
1430
1431 if (err) {
1432 u8 mgmt_err = mgmt_status(err);
1433 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1434 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1435 goto done;
1436 }
1437
1438 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1439 hdev->discov_timeout > 0) {
1440 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1442 }
1443
1444 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1445 new_settings(hdev, cmd->sk);
1446
1447 done:
1448 mgmt_pending_remove(cmd);
1449 hci_dev_unlock(hdev);
1450 }
1451
1452 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1453 {
1454 BT_DBG("%s", hdev->name);
1455
1456 return hci_update_discoverable_sync(hdev);
1457 }
1458
1459 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1460 u16 len)
1461 {
1462 struct mgmt_cp_set_discoverable *cp = data;
1463 struct mgmt_pending_cmd *cmd;
1464 u16 timeout;
1465 int err;
1466
1467 bt_dev_dbg(hdev, "sock %p", sk);
1468
1469 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1470 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472 MGMT_STATUS_REJECTED);
1473
1474 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1476 MGMT_STATUS_INVALID_PARAMS);
1477
1478 timeout = __le16_to_cpu(cp->timeout);
1479
1480
1481
1482
1483 if ((cp->val == 0x00 && timeout > 0) ||
1484 (cp->val == 0x02 && timeout == 0))
1485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1486 MGMT_STATUS_INVALID_PARAMS);
1487
1488 hci_dev_lock(hdev);
1489
1490 if (!hdev_is_powered(hdev) && timeout > 0) {
1491 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1492 MGMT_STATUS_NOT_POWERED);
1493 goto failed;
1494 }
1495
1496 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1497 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1498 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1499 MGMT_STATUS_BUSY);
1500 goto failed;
1501 }
1502
1503 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1504 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1505 MGMT_STATUS_REJECTED);
1506 goto failed;
1507 }
1508
1509 if (hdev->advertising_paused) {
1510 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1511 MGMT_STATUS_BUSY);
1512 goto failed;
1513 }
1514
1515 if (!hdev_is_powered(hdev)) {
1516 bool changed = false;
1517
1518
1519
1520
1521
1522 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1523 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1524 changed = true;
1525 }
1526
1527 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1528 if (err < 0)
1529 goto failed;
1530
1531 if (changed)
1532 err = new_settings(hdev, sk);
1533
1534 goto failed;
1535 }
1536
1537
1538
1539
1540
1541 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1542 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1543 HCI_LIMITED_DISCOVERABLE)) {
1544 cancel_delayed_work(&hdev->discov_off);
1545 hdev->discov_timeout = timeout;
1546
1547 if (cp->val && hdev->discov_timeout > 0) {
1548 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1549 queue_delayed_work(hdev->req_workqueue,
1550 &hdev->discov_off, to);
1551 }
1552
1553 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1554 goto failed;
1555 }
1556
1557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1558 if (!cmd) {
1559 err = -ENOMEM;
1560 goto failed;
1561 }
1562
1563
1564
1565
1566
1567 cancel_delayed_work(&hdev->discov_off);
1568 hdev->discov_timeout = timeout;
1569
1570 if (cp->val)
1571 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1572 else
1573 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1574
1575
1576 if (cp->val == 0x02)
1577 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1578 else
1579 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1580
1581 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1582 mgmt_set_discoverable_complete);
1583
1584 if (err < 0)
1585 mgmt_pending_remove(cmd);
1586
1587 failed:
1588 hci_dev_unlock(hdev);
1589 return err;
1590 }
1591
1592 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1593 int err)
1594 {
1595 struct mgmt_pending_cmd *cmd = data;
1596
1597 bt_dev_dbg(hdev, "err %d", err);
1598
1599
1600 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1601 return;
1602
1603 hci_dev_lock(hdev);
1604
1605 if (err) {
1606 u8 mgmt_err = mgmt_status(err);
1607 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1608 goto done;
1609 }
1610
1611 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1612 new_settings(hdev, cmd->sk);
1613
1614 done:
1615 if (cmd)
1616 mgmt_pending_remove(cmd);
1617
1618 hci_dev_unlock(hdev);
1619 }
1620
1621 static int set_connectable_update_settings(struct hci_dev *hdev,
1622 struct sock *sk, u8 val)
1623 {
1624 bool changed = false;
1625 int err;
1626
1627 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1628 changed = true;
1629
1630 if (val) {
1631 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1632 } else {
1633 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1634 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1635 }
1636
1637 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1638 if (err < 0)
1639 return err;
1640
1641 if (changed) {
1642 hci_update_scan(hdev);
1643 hci_update_passive_scan(hdev);
1644 return new_settings(hdev, sk);
1645 }
1646
1647 return 0;
1648 }
1649
1650 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1651 {
1652 BT_DBG("%s", hdev->name);
1653
1654 return hci_update_connectable_sync(hdev);
1655 }
1656
1657 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1658 u16 len)
1659 {
1660 struct mgmt_mode *cp = data;
1661 struct mgmt_pending_cmd *cmd;
1662 int err;
1663
1664 bt_dev_dbg(hdev, "sock %p", sk);
1665
1666 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1667 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1669 MGMT_STATUS_REJECTED);
1670
1671 if (cp->val != 0x00 && cp->val != 0x01)
1672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1673 MGMT_STATUS_INVALID_PARAMS);
1674
1675 hci_dev_lock(hdev);
1676
1677 if (!hdev_is_powered(hdev)) {
1678 err = set_connectable_update_settings(hdev, sk, cp->val);
1679 goto failed;
1680 }
1681
1682 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1683 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1684 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1685 MGMT_STATUS_BUSY);
1686 goto failed;
1687 }
1688
1689 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1690 if (!cmd) {
1691 err = -ENOMEM;
1692 goto failed;
1693 }
1694
1695 if (cp->val) {
1696 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1697 } else {
1698 if (hdev->discov_timeout > 0)
1699 cancel_delayed_work(&hdev->discov_off);
1700
1701 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1702 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1703 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1704 }
1705
1706 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1707 mgmt_set_connectable_complete);
1708
1709 if (err < 0)
1710 mgmt_pending_remove(cmd);
1711
1712 failed:
1713 hci_dev_unlock(hdev);
1714 return err;
1715 }
1716
1717 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1718 u16 len)
1719 {
1720 struct mgmt_mode *cp = data;
1721 bool changed;
1722 int err;
1723
1724 bt_dev_dbg(hdev, "sock %p", sk);
1725
1726 if (cp->val != 0x00 && cp->val != 0x01)
1727 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1728 MGMT_STATUS_INVALID_PARAMS);
1729
1730 hci_dev_lock(hdev);
1731
1732 if (cp->val)
1733 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1734 else
1735 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1736
1737 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1738 if (err < 0)
1739 goto unlock;
1740
1741 if (changed) {
1742
1743
1744
1745 hci_update_discoverable(hdev);
1746
1747 err = new_settings(hdev, sk);
1748 }
1749
1750 unlock:
1751 hci_dev_unlock(hdev);
1752 return err;
1753 }
1754
1755 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1756 u16 len)
1757 {
1758 struct mgmt_mode *cp = data;
1759 struct mgmt_pending_cmd *cmd;
1760 u8 val, status;
1761 int err;
1762
1763 bt_dev_dbg(hdev, "sock %p", sk);
1764
1765 status = mgmt_bredr_support(hdev);
1766 if (status)
1767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 status);
1769
1770 if (cp->val != 0x00 && cp->val != 0x01)
1771 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1772 MGMT_STATUS_INVALID_PARAMS);
1773
1774 hci_dev_lock(hdev);
1775
1776 if (!hdev_is_powered(hdev)) {
1777 bool changed = false;
1778
1779 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1780 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1781 changed = true;
1782 }
1783
1784 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1785 if (err < 0)
1786 goto failed;
1787
1788 if (changed)
1789 err = new_settings(hdev, sk);
1790
1791 goto failed;
1792 }
1793
1794 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1795 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1796 MGMT_STATUS_BUSY);
1797 goto failed;
1798 }
1799
1800 val = !!cp->val;
1801
1802 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1803 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1804 goto failed;
1805 }
1806
1807 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1808 if (!cmd) {
1809 err = -ENOMEM;
1810 goto failed;
1811 }
1812
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1814 if (err < 0) {
1815 mgmt_pending_remove(cmd);
1816 goto failed;
1817 }
1818
1819 failed:
1820 hci_dev_unlock(hdev);
1821 return err;
1822 }
1823
1824 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1825 {
1826 struct cmd_lookup match = { NULL, hdev };
1827 struct mgmt_pending_cmd *cmd = data;
1828 struct mgmt_mode *cp = cmd->param;
1829 u8 enable = cp->val;
1830 bool changed;
1831
1832
1833 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1834 return;
1835
1836 if (err) {
1837 u8 mgmt_err = mgmt_status(err);
1838
1839 if (enable && hci_dev_test_and_clear_flag(hdev,
1840 HCI_SSP_ENABLED)) {
1841 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1842 new_settings(hdev, NULL);
1843 }
1844
1845 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1846 &mgmt_err);
1847 return;
1848 }
1849
1850 if (enable) {
1851 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1852 } else {
1853 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1854
1855 if (!changed)
1856 changed = hci_dev_test_and_clear_flag(hdev,
1857 HCI_HS_ENABLED);
1858 else
1859 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1860 }
1861
1862 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1863
1864 if (changed)
1865 new_settings(hdev, match.sk);
1866
1867 if (match.sk)
1868 sock_put(match.sk);
1869
1870 hci_update_eir_sync(hdev);
1871 }
1872
1873 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1874 {
1875 struct mgmt_pending_cmd *cmd = data;
1876 struct mgmt_mode *cp = cmd->param;
1877 bool changed = false;
1878 int err;
1879
1880 if (cp->val)
1881 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1882
1883 err = hci_write_ssp_mode_sync(hdev, cp->val);
1884
1885 if (!err && changed)
1886 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1887
1888 return err;
1889 }
1890
1891 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1892 {
1893 struct mgmt_mode *cp = data;
1894 struct mgmt_pending_cmd *cmd;
1895 u8 status;
1896 int err;
1897
1898 bt_dev_dbg(hdev, "sock %p", sk);
1899
1900 status = mgmt_bredr_support(hdev);
1901 if (status)
1902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1903
1904 if (!lmp_ssp_capable(hdev))
1905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1906 MGMT_STATUS_NOT_SUPPORTED);
1907
1908 if (cp->val != 0x00 && cp->val != 0x01)
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1910 MGMT_STATUS_INVALID_PARAMS);
1911
1912 hci_dev_lock(hdev);
1913
1914 if (!hdev_is_powered(hdev)) {
1915 bool changed;
1916
1917 if (cp->val) {
1918 changed = !hci_dev_test_and_set_flag(hdev,
1919 HCI_SSP_ENABLED);
1920 } else {
1921 changed = hci_dev_test_and_clear_flag(hdev,
1922 HCI_SSP_ENABLED);
1923 if (!changed)
1924 changed = hci_dev_test_and_clear_flag(hdev,
1925 HCI_HS_ENABLED);
1926 else
1927 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1928 }
1929
1930 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1931 if (err < 0)
1932 goto failed;
1933
1934 if (changed)
1935 err = new_settings(hdev, sk);
1936
1937 goto failed;
1938 }
1939
1940 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1942 MGMT_STATUS_BUSY);
1943 goto failed;
1944 }
1945
1946 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1947 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1948 goto failed;
1949 }
1950
1951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1952 if (!cmd)
1953 err = -ENOMEM;
1954 else
1955 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1956 set_ssp_complete);
1957
1958 if (err < 0) {
1959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1960 MGMT_STATUS_FAILED);
1961
1962 if (cmd)
1963 mgmt_pending_remove(cmd);
1964 }
1965
1966 failed:
1967 hci_dev_unlock(hdev);
1968 return err;
1969 }
1970
1971 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 struct mgmt_mode *cp = data;
1974 bool changed;
1975 u8 status;
1976 int err;
1977
1978 bt_dev_dbg(hdev, "sock %p", sk);
1979
1980 if (!IS_ENABLED(CONFIG_BT_HS))
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1982 MGMT_STATUS_NOT_SUPPORTED);
1983
1984 status = mgmt_bredr_support(hdev);
1985 if (status)
1986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1987
1988 if (!lmp_ssp_capable(hdev))
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1990 MGMT_STATUS_NOT_SUPPORTED);
1991
1992 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1994 MGMT_STATUS_REJECTED);
1995
1996 if (cp->val != 0x00 && cp->val != 0x01)
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1998 MGMT_STATUS_INVALID_PARAMS);
1999
2000 hci_dev_lock(hdev);
2001
2002 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2004 MGMT_STATUS_BUSY);
2005 goto unlock;
2006 }
2007
2008 if (cp->val) {
2009 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2010 } else {
2011 if (hdev_is_powered(hdev)) {
2012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2013 MGMT_STATUS_REJECTED);
2014 goto unlock;
2015 }
2016
2017 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2018 }
2019
2020 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2021 if (err < 0)
2022 goto unlock;
2023
2024 if (changed)
2025 err = new_settings(hdev, sk);
2026
2027 unlock:
2028 hci_dev_unlock(hdev);
2029 return err;
2030 }
2031
2032 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2033 {
2034 struct cmd_lookup match = { NULL, hdev };
2035 u8 status = mgmt_status(err);
2036
2037 bt_dev_dbg(hdev, "err %d", err);
2038
2039 if (status) {
2040 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2041 &status);
2042 return;
2043 }
2044
2045 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2046
2047 new_settings(hdev, match.sk);
2048
2049 if (match.sk)
2050 sock_put(match.sk);
2051 }
2052
2053 static int set_le_sync(struct hci_dev *hdev, void *data)
2054 {
2055 struct mgmt_pending_cmd *cmd = data;
2056 struct mgmt_mode *cp = cmd->param;
2057 u8 val = !!cp->val;
2058 int err;
2059
2060 if (!val) {
2061 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2062 hci_disable_advertising_sync(hdev);
2063
2064 if (ext_adv_capable(hdev))
2065 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2066 } else {
2067 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2068 }
2069
2070 err = hci_write_le_host_supported_sync(hdev, val, 0);
2071
2072
2073
2074
2075
2076
2077 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2078 if (ext_adv_capable(hdev)) {
2079 int status;
2080
2081 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2082 if (!status)
2083 hci_update_scan_rsp_data_sync(hdev, 0x00);
2084 } else {
2085 hci_update_adv_data_sync(hdev, 0x00);
2086 hci_update_scan_rsp_data_sync(hdev, 0x00);
2087 }
2088
2089 hci_update_passive_scan(hdev);
2090 }
2091
2092 return err;
2093 }
2094
2095 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 struct mgmt_mode *cp = data;
2098 struct mgmt_pending_cmd *cmd;
2099 int err;
2100 u8 val, enabled;
2101
2102 bt_dev_dbg(hdev, "sock %p", sk);
2103
2104 if (!lmp_le_capable(hdev))
2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2106 MGMT_STATUS_NOT_SUPPORTED);
2107
2108 if (cp->val != 0x00 && cp->val != 0x01)
2109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2110 MGMT_STATUS_INVALID_PARAMS);
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2122 if (cp->val == 0x01)
2123 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2124
2125 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2126 MGMT_STATUS_REJECTED);
2127 }
2128
2129 hci_dev_lock(hdev);
2130
2131 val = !!cp->val;
2132 enabled = lmp_host_le_capable(hdev);
2133
2134 if (!val)
2135 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2136
2137 if (!hdev_is_powered(hdev) || val == enabled) {
2138 bool changed = false;
2139
2140 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2141 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2142 changed = true;
2143 }
2144
2145 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2146 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2147 changed = true;
2148 }
2149
2150 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2151 if (err < 0)
2152 goto unlock;
2153
2154 if (changed)
2155 err = new_settings(hdev, sk);
2156
2157 goto unlock;
2158 }
2159
2160 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2161 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2163 MGMT_STATUS_BUSY);
2164 goto unlock;
2165 }
2166
2167 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2168 if (!cmd)
2169 err = -ENOMEM;
2170 else
2171 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2172 set_le_complete);
2173
2174 if (err < 0) {
2175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 MGMT_STATUS_FAILED);
2177
2178 if (cmd)
2179 mgmt_pending_remove(cmd);
2180 }
2181
2182 unlock:
2183 hci_dev_unlock(hdev);
2184 return err;
2185 }
2186
2187
2188
2189
2190
2191
2192
2193 static bool pending_eir_or_class(struct hci_dev *hdev)
2194 {
2195 struct mgmt_pending_cmd *cmd;
2196
2197 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2198 switch (cmd->opcode) {
2199 case MGMT_OP_ADD_UUID:
2200 case MGMT_OP_REMOVE_UUID:
2201 case MGMT_OP_SET_DEV_CLASS:
2202 case MGMT_OP_SET_POWERED:
2203 return true;
2204 }
2205 }
2206
2207 return false;
2208 }
2209
2210 static const u8 bluetooth_base_uuid[] = {
2211 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2212 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2213 };
2214
2215 static u8 get_uuid_size(const u8 *uuid)
2216 {
2217 u32 val;
2218
2219 if (memcmp(uuid, bluetooth_base_uuid, 12))
2220 return 128;
2221
2222 val = get_unaligned_le32(&uuid[12]);
2223 if (val > 0xffff)
2224 return 32;
2225
2226 return 16;
2227 }
2228
2229 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2230 {
2231 struct mgmt_pending_cmd *cmd = data;
2232
2233 bt_dev_dbg(hdev, "err %d", err);
2234
2235 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2236 mgmt_status(err), hdev->dev_class, 3);
2237
2238 mgmt_pending_free(cmd);
2239 }
2240
2241 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2242 {
2243 int err;
2244
2245 err = hci_update_class_sync(hdev);
2246 if (err)
2247 return err;
2248
2249 return hci_update_eir_sync(hdev);
2250 }
2251
2252 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2253 {
2254 struct mgmt_cp_add_uuid *cp = data;
2255 struct mgmt_pending_cmd *cmd;
2256 struct bt_uuid *uuid;
2257 int err;
2258
2259 bt_dev_dbg(hdev, "sock %p", sk);
2260
2261 hci_dev_lock(hdev);
2262
2263 if (pending_eir_or_class(hdev)) {
2264 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2265 MGMT_STATUS_BUSY);
2266 goto failed;
2267 }
2268
2269 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2270 if (!uuid) {
2271 err = -ENOMEM;
2272 goto failed;
2273 }
2274
2275 memcpy(uuid->uuid, cp->uuid, 16);
2276 uuid->svc_hint = cp->svc_hint;
2277 uuid->size = get_uuid_size(cp->uuid);
2278
2279 list_add_tail(&uuid->list, &hdev->uuids);
2280
2281 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2282 if (!cmd) {
2283 err = -ENOMEM;
2284 goto failed;
2285 }
2286
2287 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2288 if (err < 0) {
2289 mgmt_pending_free(cmd);
2290 goto failed;
2291 }
2292
2293 failed:
2294 hci_dev_unlock(hdev);
2295 return err;
2296 }
2297
2298 static bool enable_service_cache(struct hci_dev *hdev)
2299 {
2300 if (!hdev_is_powered(hdev))
2301 return false;
2302
2303 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2304 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2305 CACHE_TIMEOUT);
2306 return true;
2307 }
2308
2309 return false;
2310 }
2311
2312 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2313 {
2314 int err;
2315
2316 err = hci_update_class_sync(hdev);
2317 if (err)
2318 return err;
2319
2320 return hci_update_eir_sync(hdev);
2321 }
2322
2323 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2324 u16 len)
2325 {
2326 struct mgmt_cp_remove_uuid *cp = data;
2327 struct mgmt_pending_cmd *cmd;
2328 struct bt_uuid *match, *tmp;
2329 static const u8 bt_uuid_any[] = {
2330 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2331 };
2332 int err, found;
2333
2334 bt_dev_dbg(hdev, "sock %p", sk);
2335
2336 hci_dev_lock(hdev);
2337
2338 if (pending_eir_or_class(hdev)) {
2339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2340 MGMT_STATUS_BUSY);
2341 goto unlock;
2342 }
2343
2344 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2345 hci_uuids_clear(hdev);
2346
2347 if (enable_service_cache(hdev)) {
2348 err = mgmt_cmd_complete(sk, hdev->id,
2349 MGMT_OP_REMOVE_UUID,
2350 0, hdev->dev_class, 3);
2351 goto unlock;
2352 }
2353
2354 goto update_class;
2355 }
2356
2357 found = 0;
2358
2359 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2360 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2361 continue;
2362
2363 list_del(&match->list);
2364 kfree(match);
2365 found++;
2366 }
2367
2368 if (found == 0) {
2369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2370 MGMT_STATUS_INVALID_PARAMS);
2371 goto unlock;
2372 }
2373
2374 update_class:
2375 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2376 if (!cmd) {
2377 err = -ENOMEM;
2378 goto unlock;
2379 }
2380
2381 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2382 mgmt_class_complete);
2383 if (err < 0)
2384 mgmt_pending_free(cmd);
2385
2386 unlock:
2387 hci_dev_unlock(hdev);
2388 return err;
2389 }
2390
2391 static int set_class_sync(struct hci_dev *hdev, void *data)
2392 {
2393 int err = 0;
2394
2395 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2396 cancel_delayed_work_sync(&hdev->service_cache);
2397 err = hci_update_eir_sync(hdev);
2398 }
2399
2400 if (err)
2401 return err;
2402
2403 return hci_update_class_sync(hdev);
2404 }
2405
2406 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2407 u16 len)
2408 {
2409 struct mgmt_cp_set_dev_class *cp = data;
2410 struct mgmt_pending_cmd *cmd;
2411 int err;
2412
2413 bt_dev_dbg(hdev, "sock %p", sk);
2414
2415 if (!lmp_bredr_capable(hdev))
2416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2417 MGMT_STATUS_NOT_SUPPORTED);
2418
2419 hci_dev_lock(hdev);
2420
2421 if (pending_eir_or_class(hdev)) {
2422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2423 MGMT_STATUS_BUSY);
2424 goto unlock;
2425 }
2426
2427 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2429 MGMT_STATUS_INVALID_PARAMS);
2430 goto unlock;
2431 }
2432
2433 hdev->major_class = cp->major;
2434 hdev->minor_class = cp->minor;
2435
2436 if (!hdev_is_powered(hdev)) {
2437 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2438 hdev->dev_class, 3);
2439 goto unlock;
2440 }
2441
2442 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2443 if (!cmd) {
2444 err = -ENOMEM;
2445 goto unlock;
2446 }
2447
2448 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2449 mgmt_class_complete);
2450 if (err < 0)
2451 mgmt_pending_free(cmd);
2452
2453 unlock:
2454 hci_dev_unlock(hdev);
2455 return err;
2456 }
2457
2458 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2459 u16 len)
2460 {
2461 struct mgmt_cp_load_link_keys *cp = data;
2462 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2463 sizeof(struct mgmt_link_key_info));
2464 u16 key_count, expected_len;
2465 bool changed;
2466 int i;
2467
2468 bt_dev_dbg(hdev, "sock %p", sk);
2469
2470 if (!lmp_bredr_capable(hdev))
2471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2472 MGMT_STATUS_NOT_SUPPORTED);
2473
2474 key_count = __le16_to_cpu(cp->key_count);
2475 if (key_count > max_key_count) {
2476 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2477 key_count);
2478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2479 MGMT_STATUS_INVALID_PARAMS);
2480 }
2481
2482 expected_len = struct_size(cp, keys, key_count);
2483 if (expected_len != len) {
2484 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2485 expected_len, len);
2486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2487 MGMT_STATUS_INVALID_PARAMS);
2488 }
2489
2490 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2492 MGMT_STATUS_INVALID_PARAMS);
2493
2494 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2495 key_count);
2496
2497 for (i = 0; i < key_count; i++) {
2498 struct mgmt_link_key_info *key = &cp->keys[i];
2499
2500 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2501 return mgmt_cmd_status(sk, hdev->id,
2502 MGMT_OP_LOAD_LINK_KEYS,
2503 MGMT_STATUS_INVALID_PARAMS);
2504 }
2505
2506 hci_dev_lock(hdev);
2507
2508 hci_link_keys_clear(hdev);
2509
2510 if (cp->debug_keys)
2511 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2512 else
2513 changed = hci_dev_test_and_clear_flag(hdev,
2514 HCI_KEEP_DEBUG_KEYS);
2515
2516 if (changed)
2517 new_settings(hdev, NULL);
2518
2519 for (i = 0; i < key_count; i++) {
2520 struct mgmt_link_key_info *key = &cp->keys[i];
2521
2522 if (hci_is_blocked_key(hdev,
2523 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2524 key->val)) {
2525 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2526 &key->addr.bdaddr);
2527 continue;
2528 }
2529
2530
2531
2532
2533 if (key->type == HCI_LK_DEBUG_COMBINATION)
2534 continue;
2535
2536 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2537 key->type, key->pin_len, NULL);
2538 }
2539
2540 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2541
2542 hci_dev_unlock(hdev);
2543
2544 return 0;
2545 }
2546
2547 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548 u8 addr_type, struct sock *skip_sk)
2549 {
2550 struct mgmt_ev_device_unpaired ev;
2551
2552 bacpy(&ev.addr.bdaddr, bdaddr);
2553 ev.addr.type = addr_type;
2554
2555 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2556 skip_sk);
2557 }
2558
2559 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2560 {
2561 struct mgmt_pending_cmd *cmd = data;
2562 struct mgmt_cp_unpair_device *cp = cmd->param;
2563
2564 if (!err)
2565 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2566
2567 cmd->cmd_complete(cmd, err);
2568 mgmt_pending_free(cmd);
2569 }
2570
2571 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2572 {
2573 struct mgmt_pending_cmd *cmd = data;
2574 struct mgmt_cp_unpair_device *cp = cmd->param;
2575 struct hci_conn *conn;
2576
2577 if (cp->addr.type == BDADDR_BREDR)
2578 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2579 &cp->addr.bdaddr);
2580 else
2581 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2582 le_addr_type(cp->addr.type));
2583
2584 if (!conn)
2585 return 0;
2586
2587 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2588 }
2589
2590 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2591 u16 len)
2592 {
2593 struct mgmt_cp_unpair_device *cp = data;
2594 struct mgmt_rp_unpair_device rp;
2595 struct hci_conn_params *params;
2596 struct mgmt_pending_cmd *cmd;
2597 struct hci_conn *conn;
2598 u8 addr_type;
2599 int err;
2600
2601 memset(&rp, 0, sizeof(rp));
2602 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2603 rp.addr.type = cp->addr.type;
2604
2605 if (!bdaddr_type_is_valid(cp->addr.type))
2606 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2607 MGMT_STATUS_INVALID_PARAMS,
2608 &rp, sizeof(rp));
2609
2610 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2611 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2612 MGMT_STATUS_INVALID_PARAMS,
2613 &rp, sizeof(rp));
2614
2615 hci_dev_lock(hdev);
2616
2617 if (!hdev_is_powered(hdev)) {
2618 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2619 MGMT_STATUS_NOT_POWERED, &rp,
2620 sizeof(rp));
2621 goto unlock;
2622 }
2623
2624 if (cp->addr.type == BDADDR_BREDR) {
2625
2626
2627
2628
2629
2630
2631
2632 if (cp->disconnect)
2633 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2634 &cp->addr.bdaddr);
2635 else
2636 conn = NULL;
2637
2638 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2639 if (err < 0) {
2640 err = mgmt_cmd_complete(sk, hdev->id,
2641 MGMT_OP_UNPAIR_DEVICE,
2642 MGMT_STATUS_NOT_PAIRED, &rp,
2643 sizeof(rp));
2644 goto unlock;
2645 }
2646
2647 goto done;
2648 }
2649
2650
2651 addr_type = le_addr_type(cp->addr.type);
2652
2653
2654 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2655 if (err < 0) {
2656 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2657 MGMT_STATUS_NOT_PAIRED, &rp,
2658 sizeof(rp));
2659 goto unlock;
2660 }
2661
2662 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2663 if (!conn) {
2664 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2665 goto done;
2666 }
2667
2668
2669
2670
2671
2672 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2673
2674
2675 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2676 if (params) {
2677 if (params->explicit_connect)
2678 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2679 else
2680 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2681 }
2682
2683
2684
2685
2686 if (!cp->disconnect)
2687 conn = NULL;
2688
2689 done:
2690
2691
2692
2693 if (!conn) {
2694 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2695 &rp, sizeof(rp));
2696 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2697 goto unlock;
2698 }
2699
2700 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2701 sizeof(*cp));
2702 if (!cmd) {
2703 err = -ENOMEM;
2704 goto unlock;
2705 }
2706
2707 cmd->cmd_complete = addr_cmd_complete;
2708
2709 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
2710 unpair_device_complete);
2711 if (err < 0)
2712 mgmt_pending_free(cmd);
2713
2714 unlock:
2715 hci_dev_unlock(hdev);
2716 return err;
2717 }
2718
2719 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2720 u16 len)
2721 {
2722 struct mgmt_cp_disconnect *cp = data;
2723 struct mgmt_rp_disconnect rp;
2724 struct mgmt_pending_cmd *cmd;
2725 struct hci_conn *conn;
2726 int err;
2727
2728 bt_dev_dbg(hdev, "sock %p", sk);
2729
2730 memset(&rp, 0, sizeof(rp));
2731 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2732 rp.addr.type = cp->addr.type;
2733
2734 if (!bdaddr_type_is_valid(cp->addr.type))
2735 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2736 MGMT_STATUS_INVALID_PARAMS,
2737 &rp, sizeof(rp));
2738
2739 hci_dev_lock(hdev);
2740
2741 if (!test_bit(HCI_UP, &hdev->flags)) {
2742 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2743 MGMT_STATUS_NOT_POWERED, &rp,
2744 sizeof(rp));
2745 goto failed;
2746 }
2747
2748 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2750 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2751 goto failed;
2752 }
2753
2754 if (cp->addr.type == BDADDR_BREDR)
2755 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2756 &cp->addr.bdaddr);
2757 else
2758 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2759 le_addr_type(cp->addr.type));
2760
2761 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2762 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2763 MGMT_STATUS_NOT_CONNECTED, &rp,
2764 sizeof(rp));
2765 goto failed;
2766 }
2767
2768 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2769 if (!cmd) {
2770 err = -ENOMEM;
2771 goto failed;
2772 }
2773
2774 cmd->cmd_complete = generic_cmd_complete;
2775
2776 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2777 if (err < 0)
2778 mgmt_pending_remove(cmd);
2779
2780 failed:
2781 hci_dev_unlock(hdev);
2782 return err;
2783 }
2784
2785 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2786 {
2787 switch (link_type) {
2788 case LE_LINK:
2789 switch (addr_type) {
2790 case ADDR_LE_DEV_PUBLIC:
2791 return BDADDR_LE_PUBLIC;
2792
2793 default:
2794
2795 return BDADDR_LE_RANDOM;
2796 }
2797
2798 default:
2799
2800 return BDADDR_BREDR;
2801 }
2802 }
2803
2804 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2805 u16 data_len)
2806 {
2807 struct mgmt_rp_get_connections *rp;
2808 struct hci_conn *c;
2809 int err;
2810 u16 i;
2811
2812 bt_dev_dbg(hdev, "sock %p", sk);
2813
2814 hci_dev_lock(hdev);
2815
2816 if (!hdev_is_powered(hdev)) {
2817 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2818 MGMT_STATUS_NOT_POWERED);
2819 goto unlock;
2820 }
2821
2822 i = 0;
2823 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2824 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2825 i++;
2826 }
2827
2828 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2829 if (!rp) {
2830 err = -ENOMEM;
2831 goto unlock;
2832 }
2833
2834 i = 0;
2835 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2836 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2837 continue;
2838 bacpy(&rp->addr[i].bdaddr, &c->dst);
2839 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2840 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2841 continue;
2842 i++;
2843 }
2844
2845 rp->conn_count = cpu_to_le16(i);
2846
2847
2848 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2849 struct_size(rp, addr, i));
2850
2851 kfree(rp);
2852
2853 unlock:
2854 hci_dev_unlock(hdev);
2855 return err;
2856 }
2857
2858 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2859 struct mgmt_cp_pin_code_neg_reply *cp)
2860 {
2861 struct mgmt_pending_cmd *cmd;
2862 int err;
2863
2864 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2865 sizeof(*cp));
2866 if (!cmd)
2867 return -ENOMEM;
2868
2869 cmd->cmd_complete = addr_cmd_complete;
2870
2871 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2872 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2873 if (err < 0)
2874 mgmt_pending_remove(cmd);
2875
2876 return err;
2877 }
2878
2879 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2880 u16 len)
2881 {
2882 struct hci_conn *conn;
2883 struct mgmt_cp_pin_code_reply *cp = data;
2884 struct hci_cp_pin_code_reply reply;
2885 struct mgmt_pending_cmd *cmd;
2886 int err;
2887
2888 bt_dev_dbg(hdev, "sock %p", sk);
2889
2890 hci_dev_lock(hdev);
2891
2892 if (!hdev_is_powered(hdev)) {
2893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2894 MGMT_STATUS_NOT_POWERED);
2895 goto failed;
2896 }
2897
2898 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2899 if (!conn) {
2900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2901 MGMT_STATUS_NOT_CONNECTED);
2902 goto failed;
2903 }
2904
2905 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2906 struct mgmt_cp_pin_code_neg_reply ncp;
2907
2908 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2909
2910 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2911
2912 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2913 if (err >= 0)
2914 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2915 MGMT_STATUS_INVALID_PARAMS);
2916
2917 goto failed;
2918 }
2919
2920 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2921 if (!cmd) {
2922 err = -ENOMEM;
2923 goto failed;
2924 }
2925
2926 cmd->cmd_complete = addr_cmd_complete;
2927
2928 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2929 reply.pin_len = cp->pin_len;
2930 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2931
2932 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2933 if (err < 0)
2934 mgmt_pending_remove(cmd);
2935
2936 failed:
2937 hci_dev_unlock(hdev);
2938 return err;
2939 }
2940
2941 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2942 u16 len)
2943 {
2944 struct mgmt_cp_set_io_capability *cp = data;
2945
2946 bt_dev_dbg(hdev, "sock %p", sk);
2947
2948 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2950 MGMT_STATUS_INVALID_PARAMS);
2951
2952 hci_dev_lock(hdev);
2953
2954 hdev->io_capability = cp->io_capability;
2955
2956 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2957
2958 hci_dev_unlock(hdev);
2959
2960 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2961 NULL, 0);
2962 }
2963
2964 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2965 {
2966 struct hci_dev *hdev = conn->hdev;
2967 struct mgmt_pending_cmd *cmd;
2968
2969 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2970 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2971 continue;
2972
2973 if (cmd->user_data != conn)
2974 continue;
2975
2976 return cmd;
2977 }
2978
2979 return NULL;
2980 }
2981
2982 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2983 {
2984 struct mgmt_rp_pair_device rp;
2985 struct hci_conn *conn = cmd->user_data;
2986 int err;
2987
2988 bacpy(&rp.addr.bdaddr, &conn->dst);
2989 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2990
2991 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2992 status, &rp, sizeof(rp));
2993
2994
2995 conn->connect_cfm_cb = NULL;
2996 conn->security_cfm_cb = NULL;
2997 conn->disconn_cfm_cb = NULL;
2998
2999 hci_conn_drop(conn);
3000
3001
3002
3003
3004 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3005
3006 hci_conn_put(conn);
3007
3008 return err;
3009 }
3010
3011 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3012 {
3013 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3014 struct mgmt_pending_cmd *cmd;
3015
3016 cmd = find_pairing(conn);
3017 if (cmd) {
3018 cmd->cmd_complete(cmd, status);
3019 mgmt_pending_remove(cmd);
3020 }
3021 }
3022
3023 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3024 {
3025 struct mgmt_pending_cmd *cmd;
3026
3027 BT_DBG("status %u", status);
3028
3029 cmd = find_pairing(conn);
3030 if (!cmd) {
3031 BT_DBG("Unable to find a pending command");
3032 return;
3033 }
3034
3035 cmd->cmd_complete(cmd, mgmt_status(status));
3036 mgmt_pending_remove(cmd);
3037 }
3038
3039 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3040 {
3041 struct mgmt_pending_cmd *cmd;
3042
3043 BT_DBG("status %u", status);
3044
3045 if (!status)
3046 return;
3047
3048 cmd = find_pairing(conn);
3049 if (!cmd) {
3050 BT_DBG("Unable to find a pending command");
3051 return;
3052 }
3053
3054 cmd->cmd_complete(cmd, mgmt_status(status));
3055 mgmt_pending_remove(cmd);
3056 }
3057
3058 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3059 u16 len)
3060 {
3061 struct mgmt_cp_pair_device *cp = data;
3062 struct mgmt_rp_pair_device rp;
3063 struct mgmt_pending_cmd *cmd;
3064 u8 sec_level, auth_type;
3065 struct hci_conn *conn;
3066 int err;
3067
3068 bt_dev_dbg(hdev, "sock %p", sk);
3069
3070 memset(&rp, 0, sizeof(rp));
3071 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3072 rp.addr.type = cp->addr.type;
3073
3074 if (!bdaddr_type_is_valid(cp->addr.type))
3075 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3076 MGMT_STATUS_INVALID_PARAMS,
3077 &rp, sizeof(rp));
3078
3079 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3080 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3081 MGMT_STATUS_INVALID_PARAMS,
3082 &rp, sizeof(rp));
3083
3084 hci_dev_lock(hdev);
3085
3086 if (!hdev_is_powered(hdev)) {
3087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3088 MGMT_STATUS_NOT_POWERED, &rp,
3089 sizeof(rp));
3090 goto unlock;
3091 }
3092
3093 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3095 MGMT_STATUS_ALREADY_PAIRED, &rp,
3096 sizeof(rp));
3097 goto unlock;
3098 }
3099
3100 sec_level = BT_SECURITY_MEDIUM;
3101 auth_type = HCI_AT_DEDICATED_BONDING;
3102
3103 if (cp->addr.type == BDADDR_BREDR) {
3104 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3105 auth_type, CONN_REASON_PAIR_DEVICE);
3106 } else {
3107 u8 addr_type = le_addr_type(cp->addr.type);
3108 struct hci_conn_params *p;
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3120
3121 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3122 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3123
3124 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3125 sec_level, HCI_LE_CONN_TIMEOUT,
3126 CONN_REASON_PAIR_DEVICE);
3127 }
3128
3129 if (IS_ERR(conn)) {
3130 int status;
3131
3132 if (PTR_ERR(conn) == -EBUSY)
3133 status = MGMT_STATUS_BUSY;
3134 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3135 status = MGMT_STATUS_NOT_SUPPORTED;
3136 else if (PTR_ERR(conn) == -ECONNREFUSED)
3137 status = MGMT_STATUS_REJECTED;
3138 else
3139 status = MGMT_STATUS_CONNECT_FAILED;
3140
3141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3142 status, &rp, sizeof(rp));
3143 goto unlock;
3144 }
3145
3146 if (conn->connect_cfm_cb) {
3147 hci_conn_drop(conn);
3148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3149 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3150 goto unlock;
3151 }
3152
3153 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3154 if (!cmd) {
3155 err = -ENOMEM;
3156 hci_conn_drop(conn);
3157 goto unlock;
3158 }
3159
3160 cmd->cmd_complete = pairing_complete;
3161
3162
3163 if (cp->addr.type == BDADDR_BREDR) {
3164 conn->connect_cfm_cb = pairing_complete_cb;
3165 conn->security_cfm_cb = pairing_complete_cb;
3166 conn->disconn_cfm_cb = pairing_complete_cb;
3167 } else {
3168 conn->connect_cfm_cb = le_pairing_complete_cb;
3169 conn->security_cfm_cb = le_pairing_complete_cb;
3170 conn->disconn_cfm_cb = le_pairing_complete_cb;
3171 }
3172
3173 conn->io_capability = cp->io_cap;
3174 cmd->user_data = hci_conn_get(conn);
3175
3176 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3177 hci_conn_security(conn, sec_level, auth_type, true)) {
3178 cmd->cmd_complete(cmd, 0);
3179 mgmt_pending_remove(cmd);
3180 }
3181
3182 err = 0;
3183
3184 unlock:
3185 hci_dev_unlock(hdev);
3186 return err;
3187 }
3188
3189 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3190 u16 len)
3191 {
3192 struct mgmt_addr_info *addr = data;
3193 struct mgmt_pending_cmd *cmd;
3194 struct hci_conn *conn;
3195 int err;
3196
3197 bt_dev_dbg(hdev, "sock %p", sk);
3198
3199 hci_dev_lock(hdev);
3200
3201 if (!hdev_is_powered(hdev)) {
3202 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3203 MGMT_STATUS_NOT_POWERED);
3204 goto unlock;
3205 }
3206
3207 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3208 if (!cmd) {
3209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3210 MGMT_STATUS_INVALID_PARAMS);
3211 goto unlock;
3212 }
3213
3214 conn = cmd->user_data;
3215
3216 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3217 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3218 MGMT_STATUS_INVALID_PARAMS);
3219 goto unlock;
3220 }
3221
3222 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3223 mgmt_pending_remove(cmd);
3224
3225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3226 addr, sizeof(*addr));
3227
3228
3229
3230
3231
3232 if (addr->type == BDADDR_BREDR)
3233 hci_remove_link_key(hdev, &addr->bdaddr);
3234 else
3235 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3236 le_addr_type(addr->type));
3237
3238 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3239 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3240
3241 unlock:
3242 hci_dev_unlock(hdev);
3243 return err;
3244 }
3245
3246 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3247 struct mgmt_addr_info *addr, u16 mgmt_op,
3248 u16 hci_op, __le32 passkey)
3249 {
3250 struct mgmt_pending_cmd *cmd;
3251 struct hci_conn *conn;
3252 int err;
3253
3254 hci_dev_lock(hdev);
3255
3256 if (!hdev_is_powered(hdev)) {
3257 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3258 MGMT_STATUS_NOT_POWERED, addr,
3259 sizeof(*addr));
3260 goto done;
3261 }
3262
3263 if (addr->type == BDADDR_BREDR)
3264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3265 else
3266 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3267 le_addr_type(addr->type));
3268
3269 if (!conn) {
3270 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3271 MGMT_STATUS_NOT_CONNECTED, addr,
3272 sizeof(*addr));
3273 goto done;
3274 }
3275
3276 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3277 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3278 if (!err)
3279 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3280 MGMT_STATUS_SUCCESS, addr,
3281 sizeof(*addr));
3282 else
3283 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3284 MGMT_STATUS_FAILED, addr,
3285 sizeof(*addr));
3286
3287 goto done;
3288 }
3289
3290 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3291 if (!cmd) {
3292 err = -ENOMEM;
3293 goto done;
3294 }
3295
3296 cmd->cmd_complete = addr_cmd_complete;
3297
3298
3299 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3300 struct hci_cp_user_passkey_reply cp;
3301
3302 bacpy(&cp.bdaddr, &addr->bdaddr);
3303 cp.passkey = passkey;
3304 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3305 } else
3306 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3307 &addr->bdaddr);
3308
3309 if (err < 0)
3310 mgmt_pending_remove(cmd);
3311
3312 done:
3313 hci_dev_unlock(hdev);
3314 return err;
3315 }
3316
3317 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3318 void *data, u16 len)
3319 {
3320 struct mgmt_cp_pin_code_neg_reply *cp = data;
3321
3322 bt_dev_dbg(hdev, "sock %p", sk);
3323
3324 return user_pairing_resp(sk, hdev, &cp->addr,
3325 MGMT_OP_PIN_CODE_NEG_REPLY,
3326 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3327 }
3328
3329 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3330 u16 len)
3331 {
3332 struct mgmt_cp_user_confirm_reply *cp = data;
3333
3334 bt_dev_dbg(hdev, "sock %p", sk);
3335
3336 if (len != sizeof(*cp))
3337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3338 MGMT_STATUS_INVALID_PARAMS);
3339
3340 return user_pairing_resp(sk, hdev, &cp->addr,
3341 MGMT_OP_USER_CONFIRM_REPLY,
3342 HCI_OP_USER_CONFIRM_REPLY, 0);
3343 }
3344
3345 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3346 void *data, u16 len)
3347 {
3348 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3349
3350 bt_dev_dbg(hdev, "sock %p", sk);
3351
3352 return user_pairing_resp(sk, hdev, &cp->addr,
3353 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3354 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3355 }
3356
3357 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3358 u16 len)
3359 {
3360 struct mgmt_cp_user_passkey_reply *cp = data;
3361
3362 bt_dev_dbg(hdev, "sock %p", sk);
3363
3364 return user_pairing_resp(sk, hdev, &cp->addr,
3365 MGMT_OP_USER_PASSKEY_REPLY,
3366 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3367 }
3368
3369 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370 void *data, u16 len)
3371 {
3372 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3373
3374 bt_dev_dbg(hdev, "sock %p", sk);
3375
3376 return user_pairing_resp(sk, hdev, &cp->addr,
3377 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3378 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3379 }
3380
3381 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3382 {
3383 struct adv_info *adv_instance;
3384
3385 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3386 if (!adv_instance)
3387 return 0;
3388
3389
3390 if (!(adv_instance->flags & flags))
3391 return 0;
3392
3393 cancel_adv_timeout(hdev);
3394
3395 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3396 if (!adv_instance)
3397 return 0;
3398
3399 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3400
3401 return 0;
3402 }
3403
3404 static int name_changed_sync(struct hci_dev *hdev, void *data)
3405 {
3406 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3407 }
3408
3409 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3410 {
3411 struct mgmt_pending_cmd *cmd = data;
3412 struct mgmt_cp_set_local_name *cp = cmd->param;
3413 u8 status = mgmt_status(err);
3414
3415 bt_dev_dbg(hdev, "err %d", err);
3416
3417 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3418 return;
3419
3420 if (status) {
3421 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3422 status);
3423 } else {
3424 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3425 cp, sizeof(*cp));
3426
3427 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3428 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3429 }
3430
3431 mgmt_pending_remove(cmd);
3432 }
3433
3434 static int set_name_sync(struct hci_dev *hdev, void *data)
3435 {
3436 if (lmp_bredr_capable(hdev)) {
3437 hci_update_name_sync(hdev);
3438 hci_update_eir_sync(hdev);
3439 }
3440
3441
3442
3443
3444 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3445 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3446
3447 return 0;
3448 }
3449
3450 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3451 u16 len)
3452 {
3453 struct mgmt_cp_set_local_name *cp = data;
3454 struct mgmt_pending_cmd *cmd;
3455 int err;
3456
3457 bt_dev_dbg(hdev, "sock %p", sk);
3458
3459 hci_dev_lock(hdev);
3460
3461
3462
3463
3464 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3465 !memcmp(hdev->short_name, cp->short_name,
3466 sizeof(hdev->short_name))) {
3467 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3468 data, len);
3469 goto failed;
3470 }
3471
3472 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3473
3474 if (!hdev_is_powered(hdev)) {
3475 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3476
3477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3478 data, len);
3479 if (err < 0)
3480 goto failed;
3481
3482 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3483 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3484 ext_info_changed(hdev, sk);
3485
3486 goto failed;
3487 }
3488
3489 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3490 if (!cmd)
3491 err = -ENOMEM;
3492 else
3493 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3494 set_name_complete);
3495
3496 if (err < 0) {
3497 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3498 MGMT_STATUS_FAILED);
3499
3500 if (cmd)
3501 mgmt_pending_remove(cmd);
3502
3503 goto failed;
3504 }
3505
3506 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3507
3508 failed:
3509 hci_dev_unlock(hdev);
3510 return err;
3511 }
3512
3513 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3514 {
3515 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3516 }
3517
3518 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3519 u16 len)
3520 {
3521 struct mgmt_cp_set_appearance *cp = data;
3522 u16 appearance;
3523 int err;
3524
3525 bt_dev_dbg(hdev, "sock %p", sk);
3526
3527 if (!lmp_le_capable(hdev))
3528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3529 MGMT_STATUS_NOT_SUPPORTED);
3530
3531 appearance = le16_to_cpu(cp->appearance);
3532
3533 hci_dev_lock(hdev);
3534
3535 if (hdev->appearance != appearance) {
3536 hdev->appearance = appearance;
3537
3538 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3539 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3540 NULL);
3541
3542 ext_info_changed(hdev, sk);
3543 }
3544
3545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3546 0);
3547
3548 hci_dev_unlock(hdev);
3549
3550 return err;
3551 }
3552
3553 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3554 void *data, u16 len)
3555 {
3556 struct mgmt_rp_get_phy_configuration rp;
3557
3558 bt_dev_dbg(hdev, "sock %p", sk);
3559
3560 hci_dev_lock(hdev);
3561
3562 memset(&rp, 0, sizeof(rp));
3563
3564 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3565 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3566 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3567
3568 hci_dev_unlock(hdev);
3569
3570 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3571 &rp, sizeof(rp));
3572 }
3573
3574 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3575 {
3576 struct mgmt_ev_phy_configuration_changed ev;
3577
3578 memset(&ev, 0, sizeof(ev));
3579
3580 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3581
3582 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3583 sizeof(ev), skip);
3584 }
3585
3586 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3587 {
3588 struct mgmt_pending_cmd *cmd = data;
3589 struct sk_buff *skb = cmd->skb;
3590 u8 status = mgmt_status(err);
3591
3592 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3593 return;
3594
3595 if (!status) {
3596 if (!skb)
3597 status = MGMT_STATUS_FAILED;
3598 else if (IS_ERR(skb))
3599 status = mgmt_status(PTR_ERR(skb));
3600 else
3601 status = mgmt_status(skb->data[0]);
3602 }
3603
3604 bt_dev_dbg(hdev, "status %d", status);
3605
3606 if (status) {
3607 mgmt_cmd_status(cmd->sk, hdev->id,
3608 MGMT_OP_SET_PHY_CONFIGURATION, status);
3609 } else {
3610 mgmt_cmd_complete(cmd->sk, hdev->id,
3611 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3612 NULL, 0);
3613
3614 mgmt_phy_configuration_changed(hdev, cmd->sk);
3615 }
3616
3617 if (skb && !IS_ERR(skb))
3618 kfree_skb(skb);
3619
3620 mgmt_pending_remove(cmd);
3621 }
3622
3623 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3624 {
3625 struct mgmt_pending_cmd *cmd = data;
3626 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3627 struct hci_cp_le_set_default_phy cp_phy;
3628 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3629
3630 memset(&cp_phy, 0, sizeof(cp_phy));
3631
3632 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3633 cp_phy.all_phys |= 0x01;
3634
3635 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3636 cp_phy.all_phys |= 0x02;
3637
3638 if (selected_phys & MGMT_PHY_LE_1M_TX)
3639 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3640
3641 if (selected_phys & MGMT_PHY_LE_2M_TX)
3642 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3643
3644 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3645 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3646
3647 if (selected_phys & MGMT_PHY_LE_1M_RX)
3648 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3649
3650 if (selected_phys & MGMT_PHY_LE_2M_RX)
3651 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3652
3653 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3654 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3655
3656 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3657 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3658
3659 return 0;
3660 }
3661
3662 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3663 void *data, u16 len)
3664 {
3665 struct mgmt_cp_set_phy_configuration *cp = data;
3666 struct mgmt_pending_cmd *cmd;
3667 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3668 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3669 bool changed = false;
3670 int err;
3671
3672 bt_dev_dbg(hdev, "sock %p", sk);
3673
3674 configurable_phys = get_configurable_phys(hdev);
3675 supported_phys = get_supported_phys(hdev);
3676 selected_phys = __le32_to_cpu(cp->selected_phys);
3677
3678 if (selected_phys & ~supported_phys)
3679 return mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_PHY_CONFIGURATION,
3681 MGMT_STATUS_INVALID_PARAMS);
3682
3683 unconfigure_phys = supported_phys & ~configurable_phys;
3684
3685 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3686 return mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_PHY_CONFIGURATION,
3688 MGMT_STATUS_INVALID_PARAMS);
3689
3690 if (selected_phys == get_selected_phys(hdev))
3691 return mgmt_cmd_complete(sk, hdev->id,
3692 MGMT_OP_SET_PHY_CONFIGURATION,
3693 0, NULL, 0);
3694
3695 hci_dev_lock(hdev);
3696
3697 if (!hdev_is_powered(hdev)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_PHY_CONFIGURATION,
3700 MGMT_STATUS_REJECTED);
3701 goto unlock;
3702 }
3703
3704 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3705 err = mgmt_cmd_status(sk, hdev->id,
3706 MGMT_OP_SET_PHY_CONFIGURATION,
3707 MGMT_STATUS_BUSY);
3708 goto unlock;
3709 }
3710
3711 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3712 pkt_type |= (HCI_DH3 | HCI_DM3);
3713 else
3714 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3715
3716 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3717 pkt_type |= (HCI_DH5 | HCI_DM5);
3718 else
3719 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3720
3721 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3722 pkt_type &= ~HCI_2DH1;
3723 else
3724 pkt_type |= HCI_2DH1;
3725
3726 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3727 pkt_type &= ~HCI_2DH3;
3728 else
3729 pkt_type |= HCI_2DH3;
3730
3731 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3732 pkt_type &= ~HCI_2DH5;
3733 else
3734 pkt_type |= HCI_2DH5;
3735
3736 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3737 pkt_type &= ~HCI_3DH1;
3738 else
3739 pkt_type |= HCI_3DH1;
3740
3741 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3742 pkt_type &= ~HCI_3DH3;
3743 else
3744 pkt_type |= HCI_3DH3;
3745
3746 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3747 pkt_type &= ~HCI_3DH5;
3748 else
3749 pkt_type |= HCI_3DH5;
3750
3751 if (pkt_type != hdev->pkt_type) {
3752 hdev->pkt_type = pkt_type;
3753 changed = true;
3754 }
3755
3756 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3757 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3758 if (changed)
3759 mgmt_phy_configuration_changed(hdev, sk);
3760
3761 err = mgmt_cmd_complete(sk, hdev->id,
3762 MGMT_OP_SET_PHY_CONFIGURATION,
3763 0, NULL, 0);
3764
3765 goto unlock;
3766 }
3767
3768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3769 len);
3770 if (!cmd)
3771 err = -ENOMEM;
3772 else
3773 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3774 set_default_phy_complete);
3775
3776 if (err < 0) {
3777 err = mgmt_cmd_status(sk, hdev->id,
3778 MGMT_OP_SET_PHY_CONFIGURATION,
3779 MGMT_STATUS_FAILED);
3780
3781 if (cmd)
3782 mgmt_pending_remove(cmd);
3783 }
3784
3785 unlock:
3786 hci_dev_unlock(hdev);
3787
3788 return err;
3789 }
3790
3791 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3792 u16 len)
3793 {
3794 int err = MGMT_STATUS_SUCCESS;
3795 struct mgmt_cp_set_blocked_keys *keys = data;
3796 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3797 sizeof(struct mgmt_blocked_key_info));
3798 u16 key_count, expected_len;
3799 int i;
3800
3801 bt_dev_dbg(hdev, "sock %p", sk);
3802
3803 key_count = __le16_to_cpu(keys->key_count);
3804 if (key_count > max_key_count) {
3805 bt_dev_err(hdev, "too big key_count value %u", key_count);
3806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3807 MGMT_STATUS_INVALID_PARAMS);
3808 }
3809
3810 expected_len = struct_size(keys, keys, key_count);
3811 if (expected_len != len) {
3812 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3813 expected_len, len);
3814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3815 MGMT_STATUS_INVALID_PARAMS);
3816 }
3817
3818 hci_dev_lock(hdev);
3819
3820 hci_blocked_keys_clear(hdev);
3821
3822 for (i = 0; i < key_count; ++i) {
3823 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3824
3825 if (!b) {
3826 err = MGMT_STATUS_NO_RESOURCES;
3827 break;
3828 }
3829
3830 b->type = keys->keys[i].type;
3831 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3832 list_add_rcu(&b->list, &hdev->blocked_keys);
3833 }
3834 hci_dev_unlock(hdev);
3835
3836 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3837 err, NULL, 0);
3838 }
3839
3840 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3841 void *data, u16 len)
3842 {
3843 struct mgmt_mode *cp = data;
3844 int err;
3845 bool changed = false;
3846
3847 bt_dev_dbg(hdev, "sock %p", sk);
3848
3849 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3850 return mgmt_cmd_status(sk, hdev->id,
3851 MGMT_OP_SET_WIDEBAND_SPEECH,
3852 MGMT_STATUS_NOT_SUPPORTED);
3853
3854 if (cp->val != 0x00 && cp->val != 0x01)
3855 return mgmt_cmd_status(sk, hdev->id,
3856 MGMT_OP_SET_WIDEBAND_SPEECH,
3857 MGMT_STATUS_INVALID_PARAMS);
3858
3859 hci_dev_lock(hdev);
3860
3861 if (hdev_is_powered(hdev) &&
3862 !!cp->val != hci_dev_test_flag(hdev,
3863 HCI_WIDEBAND_SPEECH_ENABLED)) {
3864 err = mgmt_cmd_status(sk, hdev->id,
3865 MGMT_OP_SET_WIDEBAND_SPEECH,
3866 MGMT_STATUS_REJECTED);
3867 goto unlock;
3868 }
3869
3870 if (cp->val)
3871 changed = !hci_dev_test_and_set_flag(hdev,
3872 HCI_WIDEBAND_SPEECH_ENABLED);
3873 else
3874 changed = hci_dev_test_and_clear_flag(hdev,
3875 HCI_WIDEBAND_SPEECH_ENABLED);
3876
3877 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3878 if (err < 0)
3879 goto unlock;
3880
3881 if (changed)
3882 err = new_settings(hdev, sk);
3883
3884 unlock:
3885 hci_dev_unlock(hdev);
3886 return err;
3887 }
3888
3889 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 data_len)
3891 {
3892 char buf[20];
3893 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3894 u16 cap_len = 0;
3895 u8 flags = 0;
3896 u8 tx_power_range[2];
3897
3898 bt_dev_dbg(hdev, "sock %p", sk);
3899
3900 memset(&buf, 0, sizeof(buf));
3901
3902 hci_dev_lock(hdev);
3903
3904
3905
3906
3907
3908
3909
3910 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3911 flags |= 0x01;
3912
3913 flags |= 0x02;
3914
3915
3916
3917
3918 if (hdev->commands[20] & 0x10)
3919 flags |= 0x04;
3920
3921 flags |= 0x08;
3922
3923 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3924 &flags, 1);
3925
3926
3927
3928
3929 if (hdev->commands[41] & 0x08)
3930 cap_len = eir_append_le16(rp->cap, cap_len,
3931 MGMT_CAP_MAX_ENC_KEY_SIZE,
3932 hdev->max_enc_key_size);
3933
3934 cap_len = eir_append_le16(rp->cap, cap_len,
3935 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3936 SMP_MAX_ENC_KEY_SIZE);
3937
3938
3939
3940
3941 if (hdev->commands[38] & 0x80) {
3942 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3943 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3944 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3945 tx_power_range, 2);
3946 }
3947
3948 rp->cap_len = cpu_to_le16(cap_len);
3949
3950 hci_dev_unlock(hdev);
3951
3952 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3953 rp, sizeof(*rp) + cap_len);
3954 }
3955
3956 #ifdef CONFIG_BT_FEATURE_DEBUG
3957
3958 static const u8 debug_uuid[16] = {
3959 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3960 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3961 };
3962 #endif
3963
3964
3965 static const u8 quality_report_uuid[16] = {
3966 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3967 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3968 };
3969
3970
3971 static const u8 offload_codecs_uuid[16] = {
3972 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3973 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3974 };
3975
3976
3977 static const u8 le_simultaneous_roles_uuid[16] = {
3978 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3979 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3980 };
3981
3982
3983 static const u8 rpa_resolution_uuid[16] = {
3984 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3985 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3986 };
3987
3988
3989 static const u8 iso_socket_uuid[16] = {
3990 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
3991 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
3992 };
3993
3994 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3995 void *data, u16 data_len)
3996 {
3997 char buf[122];
3998 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3999 u16 idx = 0;
4000 u32 flags;
4001
4002 bt_dev_dbg(hdev, "sock %p", sk);
4003
4004 memset(&buf, 0, sizeof(buf));
4005
4006 #ifdef CONFIG_BT_FEATURE_DEBUG
4007 if (!hdev) {
4008 flags = bt_dbg_get() ? BIT(0) : 0;
4009
4010 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4011 rp->features[idx].flags = cpu_to_le32(flags);
4012 idx++;
4013 }
4014 #endif
4015
4016 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4017 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4018 flags = BIT(0);
4019 else
4020 flags = 0;
4021
4022 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4023 rp->features[idx].flags = cpu_to_le32(flags);
4024 idx++;
4025 }
4026
4027 if (hdev && ll_privacy_capable(hdev)) {
4028 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4029 flags = BIT(0) | BIT(1);
4030 else
4031 flags = BIT(1);
4032
4033 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4034 rp->features[idx].flags = cpu_to_le32(flags);
4035 idx++;
4036 }
4037
4038 if (hdev && (aosp_has_quality_report(hdev) ||
4039 hdev->set_quality_report)) {
4040 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4041 flags = BIT(0);
4042 else
4043 flags = 0;
4044
4045 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4046 rp->features[idx].flags = cpu_to_le32(flags);
4047 idx++;
4048 }
4049
4050 if (hdev && hdev->get_data_path_id) {
4051 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4052 flags = BIT(0);
4053 else
4054 flags = 0;
4055
4056 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4057 rp->features[idx].flags = cpu_to_le32(flags);
4058 idx++;
4059 }
4060
4061 if (IS_ENABLED(CONFIG_BT_LE)) {
4062 flags = iso_enabled() ? BIT(0) : 0;
4063 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4064 rp->features[idx].flags = cpu_to_le32(flags);
4065 idx++;
4066 }
4067
4068 rp->feature_count = cpu_to_le16(idx);
4069
4070
4071
4072
4073 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4074
4075 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076 MGMT_OP_READ_EXP_FEATURES_INFO,
4077 0, rp, sizeof(*rp) + (20 * idx));
4078 }
4079
4080 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4081 struct sock *skip)
4082 {
4083 struct mgmt_ev_exp_feature_changed ev;
4084
4085 memset(&ev, 0, sizeof(ev));
4086 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4087 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4088
4089
4090 if (enabled && privacy_mode_capable(hdev))
4091 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4092 else
4093 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4094
4095 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4096 &ev, sizeof(ev),
4097 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4098
4099 }
4100
4101 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4102 bool enabled, struct sock *skip)
4103 {
4104 struct mgmt_ev_exp_feature_changed ev;
4105
4106 memset(&ev, 0, sizeof(ev));
4107 memcpy(ev.uuid, uuid, 16);
4108 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4109
4110 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4111 &ev, sizeof(ev),
4112 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4113 }
4114
4115 #define EXP_FEAT(_uuid, _set_func) \
4116 { \
4117 .uuid = _uuid, \
4118 .set_func = _set_func, \
4119 }
4120
4121
4122 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4123 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4124 {
4125 struct mgmt_rp_set_exp_feature rp;
4126
4127 memset(rp.uuid, 0, 16);
4128 rp.flags = cpu_to_le32(0);
4129
4130 #ifdef CONFIG_BT_FEATURE_DEBUG
4131 if (!hdev) {
4132 bool changed = bt_dbg_get();
4133
4134 bt_dbg_set(false);
4135
4136 if (changed)
4137 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4138 }
4139 #endif
4140
4141 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4142 bool changed;
4143
4144 changed = hci_dev_test_and_clear_flag(hdev,
4145 HCI_ENABLE_LL_PRIVACY);
4146 if (changed)
4147 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4148 sk);
4149 }
4150
4151 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4152
4153 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4154 MGMT_OP_SET_EXP_FEATURE, 0,
4155 &rp, sizeof(rp));
4156 }
4157
4158 #ifdef CONFIG_BT_FEATURE_DEBUG
4159 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4160 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4161 {
4162 struct mgmt_rp_set_exp_feature rp;
4163
4164 bool val, changed;
4165 int err;
4166
4167
4168 if (hdev)
4169 return mgmt_cmd_status(sk, hdev->id,
4170 MGMT_OP_SET_EXP_FEATURE,
4171 MGMT_STATUS_INVALID_INDEX);
4172
4173
4174 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4175 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4176 MGMT_OP_SET_EXP_FEATURE,
4177 MGMT_STATUS_INVALID_PARAMS);
4178
4179
4180 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4181 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182 MGMT_OP_SET_EXP_FEATURE,
4183 MGMT_STATUS_INVALID_PARAMS);
4184
4185 val = !!cp->param[0];
4186 changed = val ? !bt_dbg_get() : bt_dbg_get();
4187 bt_dbg_set(val);
4188
4189 memcpy(rp.uuid, debug_uuid, 16);
4190 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4191
4192 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4193
4194 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4195 MGMT_OP_SET_EXP_FEATURE, 0,
4196 &rp, sizeof(rp));
4197
4198 if (changed)
4199 exp_feature_changed(hdev, debug_uuid, val, sk);
4200
4201 return err;
4202 }
4203 #endif
4204
4205 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4206 struct mgmt_cp_set_exp_feature *cp,
4207 u16 data_len)
4208 {
4209 struct mgmt_rp_set_exp_feature rp;
4210 bool val, changed;
4211 int err;
4212 u32 flags;
4213
4214
4215 if (!hdev)
4216 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4217 MGMT_OP_SET_EXP_FEATURE,
4218 MGMT_STATUS_INVALID_INDEX);
4219
4220
4221 if (hdev_is_powered(hdev))
4222 return mgmt_cmd_status(sk, hdev->id,
4223 MGMT_OP_SET_EXP_FEATURE,
4224 MGMT_STATUS_REJECTED);
4225
4226
4227 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4228 return mgmt_cmd_status(sk, hdev->id,
4229 MGMT_OP_SET_EXP_FEATURE,
4230 MGMT_STATUS_INVALID_PARAMS);
4231
4232
4233 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4234 return mgmt_cmd_status(sk, hdev->id,
4235 MGMT_OP_SET_EXP_FEATURE,
4236 MGMT_STATUS_INVALID_PARAMS);
4237
4238 val = !!cp->param[0];
4239
4240 if (val) {
4241 changed = !hci_dev_test_and_set_flag(hdev,
4242 HCI_ENABLE_LL_PRIVACY);
4243 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4244
4245
4246 flags = BIT(0) | BIT(1);
4247 } else {
4248 changed = hci_dev_test_and_clear_flag(hdev,
4249 HCI_ENABLE_LL_PRIVACY);
4250
4251
4252 flags = BIT(1);
4253 }
4254
4255 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4256 rp.flags = cpu_to_le32(flags);
4257
4258 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259
4260 err = mgmt_cmd_complete(sk, hdev->id,
4261 MGMT_OP_SET_EXP_FEATURE, 0,
4262 &rp, sizeof(rp));
4263
4264 if (changed)
4265 exp_ll_privacy_feature_changed(val, hdev, sk);
4266
4267 return err;
4268 }
4269
4270 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4271 struct mgmt_cp_set_exp_feature *cp,
4272 u16 data_len)
4273 {
4274 struct mgmt_rp_set_exp_feature rp;
4275 bool val, changed;
4276 int err;
4277
4278
4279 if (!hdev)
4280 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 MGMT_OP_SET_EXP_FEATURE,
4282 MGMT_STATUS_INVALID_INDEX);
4283
4284
4285 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 return mgmt_cmd_status(sk, hdev->id,
4287 MGMT_OP_SET_EXP_FEATURE,
4288 MGMT_STATUS_INVALID_PARAMS);
4289
4290
4291 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 return mgmt_cmd_status(sk, hdev->id,
4293 MGMT_OP_SET_EXP_FEATURE,
4294 MGMT_STATUS_INVALID_PARAMS);
4295
4296 hci_req_sync_lock(hdev);
4297
4298 val = !!cp->param[0];
4299 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4300
4301 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4302 err = mgmt_cmd_status(sk, hdev->id,
4303 MGMT_OP_SET_EXP_FEATURE,
4304 MGMT_STATUS_NOT_SUPPORTED);
4305 goto unlock_quality_report;
4306 }
4307
4308 if (changed) {
4309 if (hdev->set_quality_report)
4310 err = hdev->set_quality_report(hdev, val);
4311 else
4312 err = aosp_set_quality_report(hdev, val);
4313
4314 if (err) {
4315 err = mgmt_cmd_status(sk, hdev->id,
4316 MGMT_OP_SET_EXP_FEATURE,
4317 MGMT_STATUS_FAILED);
4318 goto unlock_quality_report;
4319 }
4320
4321 if (val)
4322 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4323 else
4324 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4325 }
4326
4327 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4328
4329 memcpy(rp.uuid, quality_report_uuid, 16);
4330 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4331 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4332
4333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4334 &rp, sizeof(rp));
4335
4336 if (changed)
4337 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4338
4339 unlock_quality_report:
4340 hci_req_sync_unlock(hdev);
4341 return err;
4342 }
4343
4344 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4345 struct mgmt_cp_set_exp_feature *cp,
4346 u16 data_len)
4347 {
4348 bool val, changed;
4349 int err;
4350 struct mgmt_rp_set_exp_feature rp;
4351
4352
4353 if (!hdev)
4354 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4355 MGMT_OP_SET_EXP_FEATURE,
4356 MGMT_STATUS_INVALID_INDEX);
4357
4358
4359 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4360 return mgmt_cmd_status(sk, hdev->id,
4361 MGMT_OP_SET_EXP_FEATURE,
4362 MGMT_STATUS_INVALID_PARAMS);
4363
4364
4365 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4366 return mgmt_cmd_status(sk, hdev->id,
4367 MGMT_OP_SET_EXP_FEATURE,
4368 MGMT_STATUS_INVALID_PARAMS);
4369
4370 val = !!cp->param[0];
4371 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4372
4373 if (!hdev->get_data_path_id) {
4374 return mgmt_cmd_status(sk, hdev->id,
4375 MGMT_OP_SET_EXP_FEATURE,
4376 MGMT_STATUS_NOT_SUPPORTED);
4377 }
4378
4379 if (changed) {
4380 if (val)
4381 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4382 else
4383 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4384 }
4385
4386 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4387 val, changed);
4388
4389 memcpy(rp.uuid, offload_codecs_uuid, 16);
4390 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4391 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4392 err = mgmt_cmd_complete(sk, hdev->id,
4393 MGMT_OP_SET_EXP_FEATURE, 0,
4394 &rp, sizeof(rp));
4395
4396 if (changed)
4397 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4398
4399 return err;
4400 }
4401
4402 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4403 struct mgmt_cp_set_exp_feature *cp,
4404 u16 data_len)
4405 {
4406 bool val, changed;
4407 int err;
4408 struct mgmt_rp_set_exp_feature rp;
4409
4410
4411 if (!hdev)
4412 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4413 MGMT_OP_SET_EXP_FEATURE,
4414 MGMT_STATUS_INVALID_INDEX);
4415
4416
4417 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4418 return mgmt_cmd_status(sk, hdev->id,
4419 MGMT_OP_SET_EXP_FEATURE,
4420 MGMT_STATUS_INVALID_PARAMS);
4421
4422
4423 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4424 return mgmt_cmd_status(sk, hdev->id,
4425 MGMT_OP_SET_EXP_FEATURE,
4426 MGMT_STATUS_INVALID_PARAMS);
4427
4428 val = !!cp->param[0];
4429 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4430
4431 if (!hci_dev_le_state_simultaneous(hdev)) {
4432 return mgmt_cmd_status(sk, hdev->id,
4433 MGMT_OP_SET_EXP_FEATURE,
4434 MGMT_STATUS_NOT_SUPPORTED);
4435 }
4436
4437 if (changed) {
4438 if (val)
4439 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4440 else
4441 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4442 }
4443
4444 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4445 val, changed);
4446
4447 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4448 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4449 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4450 err = mgmt_cmd_complete(sk, hdev->id,
4451 MGMT_OP_SET_EXP_FEATURE, 0,
4452 &rp, sizeof(rp));
4453
4454 if (changed)
4455 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4456
4457 return err;
4458 }
4459
4460 #ifdef CONFIG_BT_LE
4461 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4462 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4463 {
4464 struct mgmt_rp_set_exp_feature rp;
4465 bool val, changed = false;
4466 int err;
4467
4468
4469 if (hdev)
4470 return mgmt_cmd_status(sk, hdev->id,
4471 MGMT_OP_SET_EXP_FEATURE,
4472 MGMT_STATUS_INVALID_INDEX);
4473
4474
4475 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4476 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4477 MGMT_OP_SET_EXP_FEATURE,
4478 MGMT_STATUS_INVALID_PARAMS);
4479
4480
4481 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4482 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4483 MGMT_OP_SET_EXP_FEATURE,
4484 MGMT_STATUS_INVALID_PARAMS);
4485
4486 val = cp->param[0] ? true : false;
4487 if (val)
4488 err = iso_init();
4489 else
4490 err = iso_exit();
4491
4492 if (!err)
4493 changed = true;
4494
4495 memcpy(rp.uuid, iso_socket_uuid, 16);
4496 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4497
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4499
4500 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4501 MGMT_OP_SET_EXP_FEATURE, 0,
4502 &rp, sizeof(rp));
4503
4504 if (changed)
4505 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4506
4507 return err;
4508 }
4509 #endif
4510
4511 static const struct mgmt_exp_feature {
4512 const u8 *uuid;
4513 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4514 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4515 } exp_features[] = {
4516 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518 EXP_FEAT(debug_uuid, set_debug_func),
4519 #endif
4520 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4521 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4522 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4523 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4524 #ifdef CONFIG_BT_LE
4525 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4526 #endif
4527
4528
4529 EXP_FEAT(NULL, NULL)
4530 };
4531
4532 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4533 void *data, u16 data_len)
4534 {
4535 struct mgmt_cp_set_exp_feature *cp = data;
4536 size_t i = 0;
4537
4538 bt_dev_dbg(hdev, "sock %p", sk);
4539
4540 for (i = 0; exp_features[i].uuid; i++) {
4541 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4542 return exp_features[i].set_func(sk, hdev, cp, data_len);
4543 }
4544
4545 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4546 MGMT_OP_SET_EXP_FEATURE,
4547 MGMT_STATUS_NOT_SUPPORTED);
4548 }
4549
4550 static u32 get_params_flags(struct hci_dev *hdev,
4551 struct hci_conn_params *params)
4552 {
4553 u32 flags = hdev->conn_flags;
4554
4555
4556
4557
4558
4559 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4560 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
4561 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4562
4563 return flags;
4564 }
4565
4566 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4567 u16 data_len)
4568 {
4569 struct mgmt_cp_get_device_flags *cp = data;
4570 struct mgmt_rp_get_device_flags rp;
4571 struct bdaddr_list_with_flags *br_params;
4572 struct hci_conn_params *params;
4573 u32 supported_flags;
4574 u32 current_flags = 0;
4575 u8 status = MGMT_STATUS_INVALID_PARAMS;
4576
4577 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4578 &cp->addr.bdaddr, cp->addr.type);
4579
4580 hci_dev_lock(hdev);
4581
4582 supported_flags = hdev->conn_flags;
4583
4584 memset(&rp, 0, sizeof(rp));
4585
4586 if (cp->addr.type == BDADDR_BREDR) {
4587 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4588 &cp->addr.bdaddr,
4589 cp->addr.type);
4590 if (!br_params)
4591 goto done;
4592
4593 current_flags = br_params->flags;
4594 } else {
4595 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4596 le_addr_type(cp->addr.type));
4597 if (!params)
4598 goto done;
4599
4600 supported_flags = get_params_flags(hdev, params);
4601 current_flags = params->flags;
4602 }
4603
4604 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4605 rp.addr.type = cp->addr.type;
4606 rp.supported_flags = cpu_to_le32(supported_flags);
4607 rp.current_flags = cpu_to_le32(current_flags);
4608
4609 status = MGMT_STATUS_SUCCESS;
4610
4611 done:
4612 hci_dev_unlock(hdev);
4613
4614 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4615 &rp, sizeof(rp));
4616 }
4617
4618 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4619 bdaddr_t *bdaddr, u8 bdaddr_type,
4620 u32 supported_flags, u32 current_flags)
4621 {
4622 struct mgmt_ev_device_flags_changed ev;
4623
4624 bacpy(&ev.addr.bdaddr, bdaddr);
4625 ev.addr.type = bdaddr_type;
4626 ev.supported_flags = cpu_to_le32(supported_flags);
4627 ev.current_flags = cpu_to_le32(current_flags);
4628
4629 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4630 }
4631
4632 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4633 u16 len)
4634 {
4635 struct mgmt_cp_set_device_flags *cp = data;
4636 struct bdaddr_list_with_flags *br_params;
4637 struct hci_conn_params *params;
4638 u8 status = MGMT_STATUS_INVALID_PARAMS;
4639 u32 supported_flags;
4640 u32 current_flags = __le32_to_cpu(cp->current_flags);
4641
4642 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4643 &cp->addr.bdaddr, cp->addr.type, current_flags);
4644
4645
4646 supported_flags = hdev->conn_flags;
4647
4648 if ((supported_flags | current_flags) != supported_flags) {
4649 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4650 current_flags, supported_flags);
4651 goto done;
4652 }
4653
4654 hci_dev_lock(hdev);
4655
4656 if (cp->addr.type == BDADDR_BREDR) {
4657 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4658 &cp->addr.bdaddr,
4659 cp->addr.type);
4660
4661 if (br_params) {
4662 br_params->flags = current_flags;
4663 status = MGMT_STATUS_SUCCESS;
4664 } else {
4665 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4666 &cp->addr.bdaddr, cp->addr.type);
4667 }
4668
4669 goto unlock;
4670 }
4671
4672 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4673 le_addr_type(cp->addr.type));
4674 if (!params) {
4675 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4676 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
4677 goto unlock;
4678 }
4679
4680 supported_flags = get_params_flags(hdev, params);
4681
4682 if ((supported_flags | current_flags) != supported_flags) {
4683 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4684 current_flags, supported_flags);
4685 goto unlock;
4686 }
4687
4688 params->flags = current_flags;
4689 status = MGMT_STATUS_SUCCESS;
4690
4691
4692
4693
4694 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4695 hci_update_passive_scan(hdev);
4696
4697 unlock:
4698 hci_dev_unlock(hdev);
4699
4700 done:
4701 if (status == MGMT_STATUS_SUCCESS)
4702 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4703 supported_flags, current_flags);
4704
4705 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4706 &cp->addr, sizeof(cp->addr));
4707 }
4708
4709 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4710 u16 handle)
4711 {
4712 struct mgmt_ev_adv_monitor_added ev;
4713
4714 ev.monitor_handle = cpu_to_le16(handle);
4715
4716 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4717 }
4718
4719 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4720 {
4721 struct mgmt_ev_adv_monitor_removed ev;
4722 struct mgmt_pending_cmd *cmd;
4723 struct sock *sk_skip = NULL;
4724 struct mgmt_cp_remove_adv_monitor *cp;
4725
4726 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4727 if (cmd) {
4728 cp = cmd->param;
4729
4730 if (cp->monitor_handle)
4731 sk_skip = cmd->sk;
4732 }
4733
4734 ev.monitor_handle = cpu_to_le16(handle);
4735
4736 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4737 }
4738
4739 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4740 void *data, u16 len)
4741 {
4742 struct adv_monitor *monitor = NULL;
4743 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4744 int handle, err;
4745 size_t rp_size = 0;
4746 __u32 supported = 0;
4747 __u32 enabled = 0;
4748 __u16 num_handles = 0;
4749 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4750
4751 BT_DBG("request for %s", hdev->name);
4752
4753 hci_dev_lock(hdev);
4754
4755 if (msft_monitor_supported(hdev))
4756 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4757
4758 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4759 handles[num_handles++] = monitor->handle;
4760
4761 hci_dev_unlock(hdev);
4762
4763 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4764 rp = kmalloc(rp_size, GFP_KERNEL);
4765 if (!rp)
4766 return -ENOMEM;
4767
4768
4769 enabled = supported;
4770
4771 rp->supported_features = cpu_to_le32(supported);
4772 rp->enabled_features = cpu_to_le32(enabled);
4773 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4774 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4775 rp->num_handles = cpu_to_le16(num_handles);
4776 if (num_handles)
4777 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4778
4779 err = mgmt_cmd_complete(sk, hdev->id,
4780 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4781 MGMT_STATUS_SUCCESS, rp, rp_size);
4782
4783 kfree(rp);
4784
4785 return err;
4786 }
4787
4788 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
4789 void *data, int status)
4790 {
4791 struct mgmt_rp_add_adv_patterns_monitor rp;
4792 struct mgmt_pending_cmd *cmd = data;
4793 struct adv_monitor *monitor = cmd->user_data;
4794
4795 hci_dev_lock(hdev);
4796
4797 rp.monitor_handle = cpu_to_le16(monitor->handle);
4798
4799 if (!status) {
4800 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4801 hdev->adv_monitors_cnt++;
4802 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4803 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4804 hci_update_passive_scan(hdev);
4805 }
4806
4807 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4808 mgmt_status(status), &rp, sizeof(rp));
4809 mgmt_pending_remove(cmd);
4810
4811 hci_dev_unlock(hdev);
4812 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4813 rp.monitor_handle, status);
4814 }
4815
4816 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
4817 {
4818 struct mgmt_pending_cmd *cmd = data;
4819 struct adv_monitor *monitor = cmd->user_data;
4820
4821 return hci_add_adv_monitor(hdev, monitor);
4822 }
4823
4824 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4825 struct adv_monitor *m, u8 status,
4826 void *data, u16 len, u16 op)
4827 {
4828 struct mgmt_pending_cmd *cmd;
4829 int err;
4830
4831 hci_dev_lock(hdev);
4832
4833 if (status)
4834 goto unlock;
4835
4836 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4837 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4838 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4839 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4840 status = MGMT_STATUS_BUSY;
4841 goto unlock;
4842 }
4843
4844 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4845 if (!cmd) {
4846 status = MGMT_STATUS_NO_RESOURCES;
4847 goto unlock;
4848 }
4849
4850 cmd->user_data = m;
4851 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
4852 mgmt_add_adv_patterns_monitor_complete);
4853 if (err) {
4854 if (err == -ENOMEM)
4855 status = MGMT_STATUS_NO_RESOURCES;
4856 else
4857 status = MGMT_STATUS_FAILED;
4858
4859 goto unlock;
4860 }
4861
4862 hci_dev_unlock(hdev);
4863
4864 return 0;
4865
4866 unlock:
4867 hci_free_adv_monitor(hdev, m);
4868 hci_dev_unlock(hdev);
4869 return mgmt_cmd_status(sk, hdev->id, op, status);
4870 }
4871
4872 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4873 struct mgmt_adv_rssi_thresholds *rssi)
4874 {
4875 if (rssi) {
4876 m->rssi.low_threshold = rssi->low_threshold;
4877 m->rssi.low_threshold_timeout =
4878 __le16_to_cpu(rssi->low_threshold_timeout);
4879 m->rssi.high_threshold = rssi->high_threshold;
4880 m->rssi.high_threshold_timeout =
4881 __le16_to_cpu(rssi->high_threshold_timeout);
4882 m->rssi.sampling_period = rssi->sampling_period;
4883 } else {
4884
4885
4886
4887
4888
4889 m->rssi.low_threshold = -127;
4890 m->rssi.low_threshold_timeout = 60;
4891 m->rssi.high_threshold = -127;
4892 m->rssi.high_threshold_timeout = 0;
4893 m->rssi.sampling_period = 0;
4894 }
4895 }
4896
4897 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4898 struct mgmt_adv_pattern *patterns)
4899 {
4900 u8 offset = 0, length = 0;
4901 struct adv_pattern *p = NULL;
4902 int i;
4903
4904 for (i = 0; i < pattern_count; i++) {
4905 offset = patterns[i].offset;
4906 length = patterns[i].length;
4907 if (offset >= HCI_MAX_AD_LENGTH ||
4908 length > HCI_MAX_AD_LENGTH ||
4909 (offset + length) > HCI_MAX_AD_LENGTH)
4910 return MGMT_STATUS_INVALID_PARAMS;
4911
4912 p = kmalloc(sizeof(*p), GFP_KERNEL);
4913 if (!p)
4914 return MGMT_STATUS_NO_RESOURCES;
4915
4916 p->ad_type = patterns[i].ad_type;
4917 p->offset = patterns[i].offset;
4918 p->length = patterns[i].length;
4919 memcpy(p->value, patterns[i].value, p->length);
4920
4921 INIT_LIST_HEAD(&p->list);
4922 list_add(&p->list, &m->patterns);
4923 }
4924
4925 return MGMT_STATUS_SUCCESS;
4926 }
4927
4928 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4929 void *data, u16 len)
4930 {
4931 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4932 struct adv_monitor *m = NULL;
4933 u8 status = MGMT_STATUS_SUCCESS;
4934 size_t expected_size = sizeof(*cp);
4935
4936 BT_DBG("request for %s", hdev->name);
4937
4938 if (len <= sizeof(*cp)) {
4939 status = MGMT_STATUS_INVALID_PARAMS;
4940 goto done;
4941 }
4942
4943 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4944 if (len != expected_size) {
4945 status = MGMT_STATUS_INVALID_PARAMS;
4946 goto done;
4947 }
4948
4949 m = kzalloc(sizeof(*m), GFP_KERNEL);
4950 if (!m) {
4951 status = MGMT_STATUS_NO_RESOURCES;
4952 goto done;
4953 }
4954
4955 INIT_LIST_HEAD(&m->patterns);
4956
4957 parse_adv_monitor_rssi(m, NULL);
4958 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4959
4960 done:
4961 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4962 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4963 }
4964
4965 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4966 void *data, u16 len)
4967 {
4968 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4969 struct adv_monitor *m = NULL;
4970 u8 status = MGMT_STATUS_SUCCESS;
4971 size_t expected_size = sizeof(*cp);
4972
4973 BT_DBG("request for %s", hdev->name);
4974
4975 if (len <= sizeof(*cp)) {
4976 status = MGMT_STATUS_INVALID_PARAMS;
4977 goto done;
4978 }
4979
4980 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4981 if (len != expected_size) {
4982 status = MGMT_STATUS_INVALID_PARAMS;
4983 goto done;
4984 }
4985
4986 m = kzalloc(sizeof(*m), GFP_KERNEL);
4987 if (!m) {
4988 status = MGMT_STATUS_NO_RESOURCES;
4989 goto done;
4990 }
4991
4992 INIT_LIST_HEAD(&m->patterns);
4993
4994 parse_adv_monitor_rssi(m, &cp->rssi);
4995 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4996
4997 done:
4998 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4999 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5000 }
5001
5002 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5003 void *data, int status)
5004 {
5005 struct mgmt_rp_remove_adv_monitor rp;
5006 struct mgmt_pending_cmd *cmd = data;
5007 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5008
5009 hci_dev_lock(hdev);
5010
5011 rp.monitor_handle = cp->monitor_handle;
5012
5013 if (!status)
5014 hci_update_passive_scan(hdev);
5015
5016 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5017 mgmt_status(status), &rp, sizeof(rp));
5018 mgmt_pending_remove(cmd);
5019
5020 hci_dev_unlock(hdev);
5021 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5022 rp.monitor_handle, status);
5023 }
5024
5025 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5026 {
5027 struct mgmt_pending_cmd *cmd = data;
5028 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5029 u16 handle = __le16_to_cpu(cp->monitor_handle);
5030
5031 if (!handle)
5032 return hci_remove_all_adv_monitor(hdev);
5033
5034 return hci_remove_single_adv_monitor(hdev, handle);
5035 }
5036
5037 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5038 void *data, u16 len)
5039 {
5040 struct mgmt_pending_cmd *cmd;
5041 int err, status;
5042
5043 hci_dev_lock(hdev);
5044
5045 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5046 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5047 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5048 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5049 status = MGMT_STATUS_BUSY;
5050 goto unlock;
5051 }
5052
5053 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5054 if (!cmd) {
5055 status = MGMT_STATUS_NO_RESOURCES;
5056 goto unlock;
5057 }
5058
5059 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5060 mgmt_remove_adv_monitor_complete);
5061
5062 if (err) {
5063 mgmt_pending_remove(cmd);
5064
5065 if (err == -ENOMEM)
5066 status = MGMT_STATUS_NO_RESOURCES;
5067 else
5068 status = MGMT_STATUS_FAILED;
5069
5070 goto unlock;
5071 }
5072
5073 hci_dev_unlock(hdev);
5074
5075 return 0;
5076
5077 unlock:
5078 hci_dev_unlock(hdev);
5079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5080 status);
5081 }
5082
5083 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5084 {
5085 struct mgmt_rp_read_local_oob_data mgmt_rp;
5086 size_t rp_size = sizeof(mgmt_rp);
5087 struct mgmt_pending_cmd *cmd = data;
5088 struct sk_buff *skb = cmd->skb;
5089 u8 status = mgmt_status(err);
5090
5091 if (!status) {
5092 if (!skb)
5093 status = MGMT_STATUS_FAILED;
5094 else if (IS_ERR(skb))
5095 status = mgmt_status(PTR_ERR(skb));
5096 else
5097 status = mgmt_status(skb->data[0]);
5098 }
5099
5100 bt_dev_dbg(hdev, "status %d", status);
5101
5102 if (status) {
5103 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5104 goto remove;
5105 }
5106
5107 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5108
5109 if (!bredr_sc_enabled(hdev)) {
5110 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5111
5112 if (skb->len < sizeof(*rp)) {
5113 mgmt_cmd_status(cmd->sk, hdev->id,
5114 MGMT_OP_READ_LOCAL_OOB_DATA,
5115 MGMT_STATUS_FAILED);
5116 goto remove;
5117 }
5118
5119 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5120 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5121
5122 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5123 } else {
5124 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5125
5126 if (skb->len < sizeof(*rp)) {
5127 mgmt_cmd_status(cmd->sk, hdev->id,
5128 MGMT_OP_READ_LOCAL_OOB_DATA,
5129 MGMT_STATUS_FAILED);
5130 goto remove;
5131 }
5132
5133 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5134 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5135
5136 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5137 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5138 }
5139
5140 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5141 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5142
5143 remove:
5144 if (skb && !IS_ERR(skb))
5145 kfree_skb(skb);
5146
5147 mgmt_pending_free(cmd);
5148 }
5149
5150 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5151 {
5152 struct mgmt_pending_cmd *cmd = data;
5153
5154 if (bredr_sc_enabled(hdev))
5155 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5156 else
5157 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5158
5159 if (IS_ERR(cmd->skb))
5160 return PTR_ERR(cmd->skb);
5161 else
5162 return 0;
5163 }
5164
5165 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5166 void *data, u16 data_len)
5167 {
5168 struct mgmt_pending_cmd *cmd;
5169 int err;
5170
5171 bt_dev_dbg(hdev, "sock %p", sk);
5172
5173 hci_dev_lock(hdev);
5174
5175 if (!hdev_is_powered(hdev)) {
5176 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5177 MGMT_STATUS_NOT_POWERED);
5178 goto unlock;
5179 }
5180
5181 if (!lmp_ssp_capable(hdev)) {
5182 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5183 MGMT_STATUS_NOT_SUPPORTED);
5184 goto unlock;
5185 }
5186
5187 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5188 if (!cmd)
5189 err = -ENOMEM;
5190 else
5191 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5192 read_local_oob_data_complete);
5193
5194 if (err < 0) {
5195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5196 MGMT_STATUS_FAILED);
5197
5198 if (cmd)
5199 mgmt_pending_free(cmd);
5200 }
5201
5202 unlock:
5203 hci_dev_unlock(hdev);
5204 return err;
5205 }
5206
5207 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5208 void *data, u16 len)
5209 {
5210 struct mgmt_addr_info *addr = data;
5211 int err;
5212
5213 bt_dev_dbg(hdev, "sock %p", sk);
5214
5215 if (!bdaddr_type_is_valid(addr->type))
5216 return mgmt_cmd_complete(sk, hdev->id,
5217 MGMT_OP_ADD_REMOTE_OOB_DATA,
5218 MGMT_STATUS_INVALID_PARAMS,
5219 addr, sizeof(*addr));
5220
5221 hci_dev_lock(hdev);
5222
5223 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5224 struct mgmt_cp_add_remote_oob_data *cp = data;
5225 u8 status;
5226
5227 if (cp->addr.type != BDADDR_BREDR) {
5228 err = mgmt_cmd_complete(sk, hdev->id,
5229 MGMT_OP_ADD_REMOTE_OOB_DATA,
5230 MGMT_STATUS_INVALID_PARAMS,
5231 &cp->addr, sizeof(cp->addr));
5232 goto unlock;
5233 }
5234
5235 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5236 cp->addr.type, cp->hash,
5237 cp->rand, NULL, NULL);
5238 if (err < 0)
5239 status = MGMT_STATUS_FAILED;
5240 else
5241 status = MGMT_STATUS_SUCCESS;
5242
5243 err = mgmt_cmd_complete(sk, hdev->id,
5244 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5245 &cp->addr, sizeof(cp->addr));
5246 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5247 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5248 u8 *rand192, *hash192, *rand256, *hash256;
5249 u8 status;
5250
5251 if (bdaddr_type_is_le(cp->addr.type)) {
5252
5253
5254
5255 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5256 memcmp(cp->hash192, ZERO_KEY, 16)) {
5257 err = mgmt_cmd_complete(sk, hdev->id,
5258 MGMT_OP_ADD_REMOTE_OOB_DATA,
5259 MGMT_STATUS_INVALID_PARAMS,
5260 addr, sizeof(*addr));
5261 goto unlock;
5262 }
5263
5264 rand192 = NULL;
5265 hash192 = NULL;
5266 } else {
5267
5268
5269
5270 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5271 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5272 rand192 = NULL;
5273 hash192 = NULL;
5274 } else {
5275 rand192 = cp->rand192;
5276 hash192 = cp->hash192;
5277 }
5278 }
5279
5280
5281
5282
5283 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5284 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5285 rand256 = NULL;
5286 hash256 = NULL;
5287 } else {
5288 rand256 = cp->rand256;
5289 hash256 = cp->hash256;
5290 }
5291
5292 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5293 cp->addr.type, hash192, rand192,
5294 hash256, rand256);
5295 if (err < 0)
5296 status = MGMT_STATUS_FAILED;
5297 else
5298 status = MGMT_STATUS_SUCCESS;
5299
5300 err = mgmt_cmd_complete(sk, hdev->id,
5301 MGMT_OP_ADD_REMOTE_OOB_DATA,
5302 status, &cp->addr, sizeof(cp->addr));
5303 } else {
5304 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5305 len);
5306 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5307 MGMT_STATUS_INVALID_PARAMS);
5308 }
5309
5310 unlock:
5311 hci_dev_unlock(hdev);
5312 return err;
5313 }
5314
5315 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5316 void *data, u16 len)
5317 {
5318 struct mgmt_cp_remove_remote_oob_data *cp = data;
5319 u8 status;
5320 int err;
5321
5322 bt_dev_dbg(hdev, "sock %p", sk);
5323
5324 if (cp->addr.type != BDADDR_BREDR)
5325 return mgmt_cmd_complete(sk, hdev->id,
5326 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5327 MGMT_STATUS_INVALID_PARAMS,
5328 &cp->addr, sizeof(cp->addr));
5329
5330 hci_dev_lock(hdev);
5331
5332 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5333 hci_remote_oob_data_clear(hdev);
5334 status = MGMT_STATUS_SUCCESS;
5335 goto done;
5336 }
5337
5338 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5339 if (err < 0)
5340 status = MGMT_STATUS_INVALID_PARAMS;
5341 else
5342 status = MGMT_STATUS_SUCCESS;
5343
5344 done:
5345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5346 status, &cp->addr, sizeof(cp->addr));
5347
5348 hci_dev_unlock(hdev);
5349 return err;
5350 }
5351
5352 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5353 {
5354 struct mgmt_pending_cmd *cmd;
5355
5356 bt_dev_dbg(hdev, "status %u", status);
5357
5358 hci_dev_lock(hdev);
5359
5360 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5361 if (!cmd)
5362 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5363
5364 if (!cmd)
5365 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5366
5367 if (cmd) {
5368 cmd->cmd_complete(cmd, mgmt_status(status));
5369 mgmt_pending_remove(cmd);
5370 }
5371
5372 hci_dev_unlock(hdev);
5373 }
5374
5375 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5376 uint8_t *mgmt_status)
5377 {
5378 switch (type) {
5379 case DISCOV_TYPE_LE:
5380 *mgmt_status = mgmt_le_support(hdev);
5381 if (*mgmt_status)
5382 return false;
5383 break;
5384 case DISCOV_TYPE_INTERLEAVED:
5385 *mgmt_status = mgmt_le_support(hdev);
5386 if (*mgmt_status)
5387 return false;
5388 fallthrough;
5389 case DISCOV_TYPE_BREDR:
5390 *mgmt_status = mgmt_bredr_support(hdev);
5391 if (*mgmt_status)
5392 return false;
5393 break;
5394 default:
5395 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5396 return false;
5397 }
5398
5399 return true;
5400 }
5401
5402 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5403 {
5404 struct mgmt_pending_cmd *cmd = data;
5405
5406 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5407 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5408 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5409 return;
5410
5411 bt_dev_dbg(hdev, "err %d", err);
5412
5413 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5414 cmd->param, 1);
5415 mgmt_pending_remove(cmd);
5416
5417 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5418 DISCOVERY_FINDING);
5419 }
5420
5421 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5422 {
5423 return hci_start_discovery_sync(hdev);
5424 }
5425
5426 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5427 u16 op, void *data, u16 len)
5428 {
5429 struct mgmt_cp_start_discovery *cp = data;
5430 struct mgmt_pending_cmd *cmd;
5431 u8 status;
5432 int err;
5433
5434 bt_dev_dbg(hdev, "sock %p", sk);
5435
5436 hci_dev_lock(hdev);
5437
5438 if (!hdev_is_powered(hdev)) {
5439 err = mgmt_cmd_complete(sk, hdev->id, op,
5440 MGMT_STATUS_NOT_POWERED,
5441 &cp->type, sizeof(cp->type));
5442 goto failed;
5443 }
5444
5445 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5446 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5447 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5448 &cp->type, sizeof(cp->type));
5449 goto failed;
5450 }
5451
5452 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5453 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5454 &cp->type, sizeof(cp->type));
5455 goto failed;
5456 }
5457
5458
5459 if (hdev->discovery_paused) {
5460 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5461 &cp->type, sizeof(cp->type));
5462 goto failed;
5463 }
5464
5465
5466
5467
5468 hci_discovery_filter_clear(hdev);
5469
5470 hdev->discovery.type = cp->type;
5471 hdev->discovery.report_invalid_rssi = false;
5472 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5473 hdev->discovery.limited = true;
5474 else
5475 hdev->discovery.limited = false;
5476
5477 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5478 if (!cmd) {
5479 err = -ENOMEM;
5480 goto failed;
5481 }
5482
5483 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5484 start_discovery_complete);
5485 if (err < 0) {
5486 mgmt_pending_remove(cmd);
5487 goto failed;
5488 }
5489
5490 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5491
5492 failed:
5493 hci_dev_unlock(hdev);
5494 return err;
5495 }
5496
5497 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5498 void *data, u16 len)
5499 {
5500 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5501 data, len);
5502 }
5503
5504 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5505 void *data, u16 len)
5506 {
5507 return start_discovery_internal(sk, hdev,
5508 MGMT_OP_START_LIMITED_DISCOVERY,
5509 data, len);
5510 }
5511
5512 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5513 void *data, u16 len)
5514 {
5515 struct mgmt_cp_start_service_discovery *cp = data;
5516 struct mgmt_pending_cmd *cmd;
5517 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5518 u16 uuid_count, expected_len;
5519 u8 status;
5520 int err;
5521
5522 bt_dev_dbg(hdev, "sock %p", sk);
5523
5524 hci_dev_lock(hdev);
5525
5526 if (!hdev_is_powered(hdev)) {
5527 err = mgmt_cmd_complete(sk, hdev->id,
5528 MGMT_OP_START_SERVICE_DISCOVERY,
5529 MGMT_STATUS_NOT_POWERED,
5530 &cp->type, sizeof(cp->type));
5531 goto failed;
5532 }
5533
5534 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5535 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5536 err = mgmt_cmd_complete(sk, hdev->id,
5537 MGMT_OP_START_SERVICE_DISCOVERY,
5538 MGMT_STATUS_BUSY, &cp->type,
5539 sizeof(cp->type));
5540 goto failed;
5541 }
5542
5543 if (hdev->discovery_paused) {
5544 err = mgmt_cmd_complete(sk, hdev->id,
5545 MGMT_OP_START_SERVICE_DISCOVERY,
5546 MGMT_STATUS_BUSY, &cp->type,
5547 sizeof(cp->type));
5548 goto failed;
5549 }
5550
5551 uuid_count = __le16_to_cpu(cp->uuid_count);
5552 if (uuid_count > max_uuid_count) {
5553 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5554 uuid_count);
5555 err = mgmt_cmd_complete(sk, hdev->id,
5556 MGMT_OP_START_SERVICE_DISCOVERY,
5557 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5558 sizeof(cp->type));
5559 goto failed;
5560 }
5561
5562 expected_len = sizeof(*cp) + uuid_count * 16;
5563 if (expected_len != len) {
5564 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5565 expected_len, len);
5566 err = mgmt_cmd_complete(sk, hdev->id,
5567 MGMT_OP_START_SERVICE_DISCOVERY,
5568 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5569 sizeof(cp->type));
5570 goto failed;
5571 }
5572
5573 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5574 err = mgmt_cmd_complete(sk, hdev->id,
5575 MGMT_OP_START_SERVICE_DISCOVERY,
5576 status, &cp->type, sizeof(cp->type));
5577 goto failed;
5578 }
5579
5580 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5581 hdev, data, len);
5582 if (!cmd) {
5583 err = -ENOMEM;
5584 goto failed;
5585 }
5586
5587
5588
5589
5590 hci_discovery_filter_clear(hdev);
5591
5592 hdev->discovery.result_filtering = true;
5593 hdev->discovery.type = cp->type;
5594 hdev->discovery.rssi = cp->rssi;
5595 hdev->discovery.uuid_count = uuid_count;
5596
5597 if (uuid_count > 0) {
5598 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5599 GFP_KERNEL);
5600 if (!hdev->discovery.uuids) {
5601 err = mgmt_cmd_complete(sk, hdev->id,
5602 MGMT_OP_START_SERVICE_DISCOVERY,
5603 MGMT_STATUS_FAILED,
5604 &cp->type, sizeof(cp->type));
5605 mgmt_pending_remove(cmd);
5606 goto failed;
5607 }
5608 }
5609
5610 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5611 start_discovery_complete);
5612 if (err < 0) {
5613 mgmt_pending_remove(cmd);
5614 goto failed;
5615 }
5616
5617 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5618
5619 failed:
5620 hci_dev_unlock(hdev);
5621 return err;
5622 }
5623
5624 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5625 {
5626 struct mgmt_pending_cmd *cmd;
5627
5628 bt_dev_dbg(hdev, "status %u", status);
5629
5630 hci_dev_lock(hdev);
5631
5632 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5633 if (cmd) {
5634 cmd->cmd_complete(cmd, mgmt_status(status));
5635 mgmt_pending_remove(cmd);
5636 }
5637
5638 hci_dev_unlock(hdev);
5639 }
5640
5641 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5642 {
5643 struct mgmt_pending_cmd *cmd = data;
5644
5645 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5646 return;
5647
5648 bt_dev_dbg(hdev, "err %d", err);
5649
5650 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5651 cmd->param, 1);
5652 mgmt_pending_remove(cmd);
5653
5654 if (!err)
5655 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5656 }
5657
5658 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5659 {
5660 return hci_stop_discovery_sync(hdev);
5661 }
5662
5663 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5664 u16 len)
5665 {
5666 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5667 struct mgmt_pending_cmd *cmd;
5668 int err;
5669
5670 bt_dev_dbg(hdev, "sock %p", sk);
5671
5672 hci_dev_lock(hdev);
5673
5674 if (!hci_discovery_active(hdev)) {
5675 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5676 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5677 sizeof(mgmt_cp->type));
5678 goto unlock;
5679 }
5680
5681 if (hdev->discovery.type != mgmt_cp->type) {
5682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5683 MGMT_STATUS_INVALID_PARAMS,
5684 &mgmt_cp->type, sizeof(mgmt_cp->type));
5685 goto unlock;
5686 }
5687
5688 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5689 if (!cmd) {
5690 err = -ENOMEM;
5691 goto unlock;
5692 }
5693
5694 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5695 stop_discovery_complete);
5696 if (err < 0) {
5697 mgmt_pending_remove(cmd);
5698 goto unlock;
5699 }
5700
5701 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5702
5703 unlock:
5704 hci_dev_unlock(hdev);
5705 return err;
5706 }
5707
5708 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5709 u16 len)
5710 {
5711 struct mgmt_cp_confirm_name *cp = data;
5712 struct inquiry_entry *e;
5713 int err;
5714
5715 bt_dev_dbg(hdev, "sock %p", sk);
5716
5717 hci_dev_lock(hdev);
5718
5719 if (!hci_discovery_active(hdev)) {
5720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5721 MGMT_STATUS_FAILED, &cp->addr,
5722 sizeof(cp->addr));
5723 goto failed;
5724 }
5725
5726 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5727 if (!e) {
5728 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5729 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5730 sizeof(cp->addr));
5731 goto failed;
5732 }
5733
5734 if (cp->name_known) {
5735 e->name_state = NAME_KNOWN;
5736 list_del(&e->list);
5737 } else {
5738 e->name_state = NAME_NEEDED;
5739 hci_inquiry_cache_update_resolve(hdev, e);
5740 }
5741
5742 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5743 &cp->addr, sizeof(cp->addr));
5744
5745 failed:
5746 hci_dev_unlock(hdev);
5747 return err;
5748 }
5749
5750 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5751 u16 len)
5752 {
5753 struct mgmt_cp_block_device *cp = data;
5754 u8 status;
5755 int err;
5756
5757 bt_dev_dbg(hdev, "sock %p", sk);
5758
5759 if (!bdaddr_type_is_valid(cp->addr.type))
5760 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5761 MGMT_STATUS_INVALID_PARAMS,
5762 &cp->addr, sizeof(cp->addr));
5763
5764 hci_dev_lock(hdev);
5765
5766 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5767 cp->addr.type);
5768 if (err < 0) {
5769 status = MGMT_STATUS_FAILED;
5770 goto done;
5771 }
5772
5773 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5774 sk);
5775 status = MGMT_STATUS_SUCCESS;
5776
5777 done:
5778 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5779 &cp->addr, sizeof(cp->addr));
5780
5781 hci_dev_unlock(hdev);
5782
5783 return err;
5784 }
5785
5786 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5787 u16 len)
5788 {
5789 struct mgmt_cp_unblock_device *cp = data;
5790 u8 status;
5791 int err;
5792
5793 bt_dev_dbg(hdev, "sock %p", sk);
5794
5795 if (!bdaddr_type_is_valid(cp->addr.type))
5796 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5797 MGMT_STATUS_INVALID_PARAMS,
5798 &cp->addr, sizeof(cp->addr));
5799
5800 hci_dev_lock(hdev);
5801
5802 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5803 cp->addr.type);
5804 if (err < 0) {
5805 status = MGMT_STATUS_INVALID_PARAMS;
5806 goto done;
5807 }
5808
5809 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5810 sk);
5811 status = MGMT_STATUS_SUCCESS;
5812
5813 done:
5814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5815 &cp->addr, sizeof(cp->addr));
5816
5817 hci_dev_unlock(hdev);
5818
5819 return err;
5820 }
5821
5822 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5823 {
5824 return hci_update_eir_sync(hdev);
5825 }
5826
5827 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5828 u16 len)
5829 {
5830 struct mgmt_cp_set_device_id *cp = data;
5831 int err;
5832 __u16 source;
5833
5834 bt_dev_dbg(hdev, "sock %p", sk);
5835
5836 source = __le16_to_cpu(cp->source);
5837
5838 if (source > 0x0002)
5839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5840 MGMT_STATUS_INVALID_PARAMS);
5841
5842 hci_dev_lock(hdev);
5843
5844 hdev->devid_source = source;
5845 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5846 hdev->devid_product = __le16_to_cpu(cp->product);
5847 hdev->devid_version = __le16_to_cpu(cp->version);
5848
5849 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5850 NULL, 0);
5851
5852 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5853
5854 hci_dev_unlock(hdev);
5855
5856 return err;
5857 }
5858
5859 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5860 {
5861 if (err)
5862 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5863 else
5864 bt_dev_dbg(hdev, "status %d", err);
5865 }
5866
5867 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5868 {
5869 struct cmd_lookup match = { NULL, hdev };
5870 u8 instance;
5871 struct adv_info *adv_instance;
5872 u8 status = mgmt_status(err);
5873
5874 if (status) {
5875 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5876 cmd_status_rsp, &status);
5877 return;
5878 }
5879
5880 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5881 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5882 else
5883 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5884
5885 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5886 &match);
5887
5888 new_settings(hdev, match.sk);
5889
5890 if (match.sk)
5891 sock_put(match.sk);
5892
5893
5894
5895
5896 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5897 list_empty(&hdev->adv_instances))
5898 return;
5899
5900 instance = hdev->cur_adv_instance;
5901 if (!instance) {
5902 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5903 struct adv_info, list);
5904 if (!adv_instance)
5905 return;
5906
5907 instance = adv_instance->instance;
5908 }
5909
5910 err = hci_schedule_adv_instance_sync(hdev, instance, true);
5911
5912 enable_advertising_instance(hdev, err);
5913 }
5914
5915 static int set_adv_sync(struct hci_dev *hdev, void *data)
5916 {
5917 struct mgmt_pending_cmd *cmd = data;
5918 struct mgmt_mode *cp = cmd->param;
5919 u8 val = !!cp->val;
5920
5921 if (cp->val == 0x02)
5922 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5923 else
5924 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5925
5926 cancel_adv_timeout(hdev);
5927
5928 if (val) {
5929
5930
5931
5932
5933 hdev->cur_adv_instance = 0x00;
5934
5935 if (ext_adv_capable(hdev)) {
5936 hci_start_ext_adv_sync(hdev, 0x00);
5937 } else {
5938 hci_update_adv_data_sync(hdev, 0x00);
5939 hci_update_scan_rsp_data_sync(hdev, 0x00);
5940 hci_enable_advertising_sync(hdev);
5941 }
5942 } else {
5943 hci_disable_advertising_sync(hdev);
5944 }
5945
5946 return 0;
5947 }
5948
5949 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5950 u16 len)
5951 {
5952 struct mgmt_mode *cp = data;
5953 struct mgmt_pending_cmd *cmd;
5954 u8 val, status;
5955 int err;
5956
5957 bt_dev_dbg(hdev, "sock %p", sk);
5958
5959 status = mgmt_le_support(hdev);
5960 if (status)
5961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5962 status);
5963
5964 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5966 MGMT_STATUS_INVALID_PARAMS);
5967
5968 if (hdev->advertising_paused)
5969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5970 MGMT_STATUS_BUSY);
5971
5972 hci_dev_lock(hdev);
5973
5974 val = !!cp->val;
5975
5976
5977
5978
5979
5980
5981 if (!hdev_is_powered(hdev) ||
5982 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5983 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5984 hci_conn_num(hdev, LE_LINK) > 0 ||
5985 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5986 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5987 bool changed;
5988
5989 if (cp->val) {
5990 hdev->cur_adv_instance = 0x00;
5991 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5992 if (cp->val == 0x02)
5993 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5994 else
5995 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5996 } else {
5997 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5998 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5999 }
6000
6001 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6002 if (err < 0)
6003 goto unlock;
6004
6005 if (changed)
6006 err = new_settings(hdev, sk);
6007
6008 goto unlock;
6009 }
6010
6011 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6012 pending_find(MGMT_OP_SET_LE, hdev)) {
6013 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6014 MGMT_STATUS_BUSY);
6015 goto unlock;
6016 }
6017
6018 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6019 if (!cmd)
6020 err = -ENOMEM;
6021 else
6022 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6023 set_advertising_complete);
6024
6025 if (err < 0 && cmd)
6026 mgmt_pending_remove(cmd);
6027
6028 unlock:
6029 hci_dev_unlock(hdev);
6030 return err;
6031 }
6032
6033 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6034 void *data, u16 len)
6035 {
6036 struct mgmt_cp_set_static_address *cp = data;
6037 int err;
6038
6039 bt_dev_dbg(hdev, "sock %p", sk);
6040
6041 if (!lmp_le_capable(hdev))
6042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6043 MGMT_STATUS_NOT_SUPPORTED);
6044
6045 if (hdev_is_powered(hdev))
6046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6047 MGMT_STATUS_REJECTED);
6048
6049 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6050 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6051 return mgmt_cmd_status(sk, hdev->id,
6052 MGMT_OP_SET_STATIC_ADDRESS,
6053 MGMT_STATUS_INVALID_PARAMS);
6054
6055
6056 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6057 return mgmt_cmd_status(sk, hdev->id,
6058 MGMT_OP_SET_STATIC_ADDRESS,
6059 MGMT_STATUS_INVALID_PARAMS);
6060 }
6061
6062 hci_dev_lock(hdev);
6063
6064 bacpy(&hdev->static_addr, &cp->bdaddr);
6065
6066 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6067 if (err < 0)
6068 goto unlock;
6069
6070 err = new_settings(hdev, sk);
6071
6072 unlock:
6073 hci_dev_unlock(hdev);
6074 return err;
6075 }
6076
6077 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6078 void *data, u16 len)
6079 {
6080 struct mgmt_cp_set_scan_params *cp = data;
6081 __u16 interval, window;
6082 int err;
6083
6084 bt_dev_dbg(hdev, "sock %p", sk);
6085
6086 if (!lmp_le_capable(hdev))
6087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6088 MGMT_STATUS_NOT_SUPPORTED);
6089
6090 interval = __le16_to_cpu(cp->interval);
6091
6092 if (interval < 0x0004 || interval > 0x4000)
6093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6094 MGMT_STATUS_INVALID_PARAMS);
6095
6096 window = __le16_to_cpu(cp->window);
6097
6098 if (window < 0x0004 || window > 0x4000)
6099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6100 MGMT_STATUS_INVALID_PARAMS);
6101
6102 if (window > interval)
6103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6104 MGMT_STATUS_INVALID_PARAMS);
6105
6106 hci_dev_lock(hdev);
6107
6108 hdev->le_scan_interval = interval;
6109 hdev->le_scan_window = window;
6110
6111 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6112 NULL, 0);
6113
6114
6115
6116
6117 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6118 hdev->discovery.state == DISCOVERY_STOPPED)
6119 hci_update_passive_scan(hdev);
6120
6121 hci_dev_unlock(hdev);
6122
6123 return err;
6124 }
6125
6126 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6127 {
6128 struct mgmt_pending_cmd *cmd = data;
6129
6130 bt_dev_dbg(hdev, "err %d", err);
6131
6132 if (err) {
6133 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6134 mgmt_status(err));
6135 } else {
6136 struct mgmt_mode *cp = cmd->param;
6137
6138 if (cp->val)
6139 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6140 else
6141 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6142
6143 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6144 new_settings(hdev, cmd->sk);
6145 }
6146
6147 mgmt_pending_free(cmd);
6148 }
6149
6150 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6151 {
6152 struct mgmt_pending_cmd *cmd = data;
6153 struct mgmt_mode *cp = cmd->param;
6154
6155 return hci_write_fast_connectable_sync(hdev, cp->val);
6156 }
6157
6158 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6159 void *data, u16 len)
6160 {
6161 struct mgmt_mode *cp = data;
6162 struct mgmt_pending_cmd *cmd;
6163 int err;
6164
6165 bt_dev_dbg(hdev, "sock %p", sk);
6166
6167 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6168 hdev->hci_ver < BLUETOOTH_VER_1_2)
6169 return mgmt_cmd_status(sk, hdev->id,
6170 MGMT_OP_SET_FAST_CONNECTABLE,
6171 MGMT_STATUS_NOT_SUPPORTED);
6172
6173 if (cp->val != 0x00 && cp->val != 0x01)
6174 return mgmt_cmd_status(sk, hdev->id,
6175 MGMT_OP_SET_FAST_CONNECTABLE,
6176 MGMT_STATUS_INVALID_PARAMS);
6177
6178 hci_dev_lock(hdev);
6179
6180 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6181 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6182 goto unlock;
6183 }
6184
6185 if (!hdev_is_powered(hdev)) {
6186 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6187 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6188 new_settings(hdev, sk);
6189 goto unlock;
6190 }
6191
6192 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6193 len);
6194 if (!cmd)
6195 err = -ENOMEM;
6196 else
6197 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6198 fast_connectable_complete);
6199
6200 if (err < 0) {
6201 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6202 MGMT_STATUS_FAILED);
6203
6204 if (cmd)
6205 mgmt_pending_free(cmd);
6206 }
6207
6208 unlock:
6209 hci_dev_unlock(hdev);
6210
6211 return err;
6212 }
6213
6214 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6215 {
6216 struct mgmt_pending_cmd *cmd = data;
6217
6218 bt_dev_dbg(hdev, "err %d", err);
6219
6220 if (err) {
6221 u8 mgmt_err = mgmt_status(err);
6222
6223
6224
6225
6226 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6227
6228 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6229 } else {
6230 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6231 new_settings(hdev, cmd->sk);
6232 }
6233
6234 mgmt_pending_free(cmd);
6235 }
6236
6237 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6238 {
6239 int status;
6240
6241 status = hci_write_fast_connectable_sync(hdev, false);
6242
6243 if (!status)
6244 status = hci_update_scan_sync(hdev);
6245
6246
6247
6248
6249 if (!status)
6250 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6251
6252 return status;
6253 }
6254
6255 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6256 {
6257 struct mgmt_mode *cp = data;
6258 struct mgmt_pending_cmd *cmd;
6259 int err;
6260
6261 bt_dev_dbg(hdev, "sock %p", sk);
6262
6263 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6264 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6265 MGMT_STATUS_NOT_SUPPORTED);
6266
6267 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6268 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6269 MGMT_STATUS_REJECTED);
6270
6271 if (cp->val != 0x00 && cp->val != 0x01)
6272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6273 MGMT_STATUS_INVALID_PARAMS);
6274
6275 hci_dev_lock(hdev);
6276
6277 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6278 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6279 goto unlock;
6280 }
6281
6282 if (!hdev_is_powered(hdev)) {
6283 if (!cp->val) {
6284 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6285 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6286 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6287 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6288 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6289 }
6290
6291 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6292
6293 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6294 if (err < 0)
6295 goto unlock;
6296
6297 err = new_settings(hdev, sk);
6298 goto unlock;
6299 }
6300
6301
6302 if (!cp->val) {
6303 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6304 MGMT_STATUS_REJECTED);
6305 goto unlock;
6306 } else {
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6322 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6323 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6324 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6325 MGMT_STATUS_REJECTED);
6326 goto unlock;
6327 }
6328 }
6329
6330 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6331 if (!cmd)
6332 err = -ENOMEM;
6333 else
6334 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6335 set_bredr_complete);
6336
6337 if (err < 0) {
6338 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6339 MGMT_STATUS_FAILED);
6340 if (cmd)
6341 mgmt_pending_free(cmd);
6342
6343 goto unlock;
6344 }
6345
6346
6347
6348
6349 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6350
6351 unlock:
6352 hci_dev_unlock(hdev);
6353 return err;
6354 }
6355
6356 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6357 {
6358 struct mgmt_pending_cmd *cmd = data;
6359 struct mgmt_mode *cp;
6360
6361 bt_dev_dbg(hdev, "err %d", err);
6362
6363 if (err) {
6364 u8 mgmt_err = mgmt_status(err);
6365
6366 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6367 goto done;
6368 }
6369
6370 cp = cmd->param;
6371
6372 switch (cp->val) {
6373 case 0x00:
6374 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6375 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6376 break;
6377 case 0x01:
6378 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6379 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6380 break;
6381 case 0x02:
6382 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6383 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6384 break;
6385 }
6386
6387 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6388 new_settings(hdev, cmd->sk);
6389
6390 done:
6391 mgmt_pending_free(cmd);
6392 }
6393
6394 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6395 {
6396 struct mgmt_pending_cmd *cmd = data;
6397 struct mgmt_mode *cp = cmd->param;
6398 u8 val = !!cp->val;
6399
6400
6401 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6402
6403 return hci_write_sc_support_sync(hdev, val);
6404 }
6405
6406 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6407 void *data, u16 len)
6408 {
6409 struct mgmt_mode *cp = data;
6410 struct mgmt_pending_cmd *cmd;
6411 u8 val;
6412 int err;
6413
6414 bt_dev_dbg(hdev, "sock %p", sk);
6415
6416 if (!lmp_sc_capable(hdev) &&
6417 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6419 MGMT_STATUS_NOT_SUPPORTED);
6420
6421 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6422 lmp_sc_capable(hdev) &&
6423 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6425 MGMT_STATUS_REJECTED);
6426
6427 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6429 MGMT_STATUS_INVALID_PARAMS);
6430
6431 hci_dev_lock(hdev);
6432
6433 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6434 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6435 bool changed;
6436
6437 if (cp->val) {
6438 changed = !hci_dev_test_and_set_flag(hdev,
6439 HCI_SC_ENABLED);
6440 if (cp->val == 0x02)
6441 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6442 else
6443 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6444 } else {
6445 changed = hci_dev_test_and_clear_flag(hdev,
6446 HCI_SC_ENABLED);
6447 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6448 }
6449
6450 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6451 if (err < 0)
6452 goto failed;
6453
6454 if (changed)
6455 err = new_settings(hdev, sk);
6456
6457 goto failed;
6458 }
6459
6460 val = !!cp->val;
6461
6462 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6464 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6465 goto failed;
6466 }
6467
6468 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6469 if (!cmd)
6470 err = -ENOMEM;
6471 else
6472 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6473 set_secure_conn_complete);
6474
6475 if (err < 0) {
6476 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6477 MGMT_STATUS_FAILED);
6478 if (cmd)
6479 mgmt_pending_free(cmd);
6480 }
6481
6482 failed:
6483 hci_dev_unlock(hdev);
6484 return err;
6485 }
6486
6487 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6488 void *data, u16 len)
6489 {
6490 struct mgmt_mode *cp = data;
6491 bool changed, use_changed;
6492 int err;
6493
6494 bt_dev_dbg(hdev, "sock %p", sk);
6495
6496 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6498 MGMT_STATUS_INVALID_PARAMS);
6499
6500 hci_dev_lock(hdev);
6501
6502 if (cp->val)
6503 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6504 else
6505 changed = hci_dev_test_and_clear_flag(hdev,
6506 HCI_KEEP_DEBUG_KEYS);
6507
6508 if (cp->val == 0x02)
6509 use_changed = !hci_dev_test_and_set_flag(hdev,
6510 HCI_USE_DEBUG_KEYS);
6511 else
6512 use_changed = hci_dev_test_and_clear_flag(hdev,
6513 HCI_USE_DEBUG_KEYS);
6514
6515 if (hdev_is_powered(hdev) && use_changed &&
6516 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6517 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6518 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6519 sizeof(mode), &mode);
6520 }
6521
6522 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6523 if (err < 0)
6524 goto unlock;
6525
6526 if (changed)
6527 err = new_settings(hdev, sk);
6528
6529 unlock:
6530 hci_dev_unlock(hdev);
6531 return err;
6532 }
6533
6534 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6535 u16 len)
6536 {
6537 struct mgmt_cp_set_privacy *cp = cp_data;
6538 bool changed;
6539 int err;
6540
6541 bt_dev_dbg(hdev, "sock %p", sk);
6542
6543 if (!lmp_le_capable(hdev))
6544 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6545 MGMT_STATUS_NOT_SUPPORTED);
6546
6547 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6548 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6549 MGMT_STATUS_INVALID_PARAMS);
6550
6551 if (hdev_is_powered(hdev))
6552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6553 MGMT_STATUS_REJECTED);
6554
6555 hci_dev_lock(hdev);
6556
6557
6558
6559
6560 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6561
6562 if (cp->privacy) {
6563 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6564 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6565 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6566 hci_adv_instances_set_rpa_expired(hdev, true);
6567 if (cp->privacy == 0x02)
6568 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6569 else
6570 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6571 } else {
6572 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6573 memset(hdev->irk, 0, sizeof(hdev->irk));
6574 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6575 hci_adv_instances_set_rpa_expired(hdev, false);
6576 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6577 }
6578
6579 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6580 if (err < 0)
6581 goto unlock;
6582
6583 if (changed)
6584 err = new_settings(hdev, sk);
6585
6586 unlock:
6587 hci_dev_unlock(hdev);
6588 return err;
6589 }
6590
6591 static bool irk_is_valid(struct mgmt_irk_info *irk)
6592 {
6593 switch (irk->addr.type) {
6594 case BDADDR_LE_PUBLIC:
6595 return true;
6596
6597 case BDADDR_LE_RANDOM:
6598
6599 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6600 return false;
6601 return true;
6602 }
6603
6604 return false;
6605 }
6606
6607 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6608 u16 len)
6609 {
6610 struct mgmt_cp_load_irks *cp = cp_data;
6611 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6612 sizeof(struct mgmt_irk_info));
6613 u16 irk_count, expected_len;
6614 int i, err;
6615
6616 bt_dev_dbg(hdev, "sock %p", sk);
6617
6618 if (!lmp_le_capable(hdev))
6619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6620 MGMT_STATUS_NOT_SUPPORTED);
6621
6622 irk_count = __le16_to_cpu(cp->irk_count);
6623 if (irk_count > max_irk_count) {
6624 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6625 irk_count);
6626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6627 MGMT_STATUS_INVALID_PARAMS);
6628 }
6629
6630 expected_len = struct_size(cp, irks, irk_count);
6631 if (expected_len != len) {
6632 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6633 expected_len, len);
6634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6635 MGMT_STATUS_INVALID_PARAMS);
6636 }
6637
6638 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6639
6640 for (i = 0; i < irk_count; i++) {
6641 struct mgmt_irk_info *key = &cp->irks[i];
6642
6643 if (!irk_is_valid(key))
6644 return mgmt_cmd_status(sk, hdev->id,
6645 MGMT_OP_LOAD_IRKS,
6646 MGMT_STATUS_INVALID_PARAMS);
6647 }
6648
6649 hci_dev_lock(hdev);
6650
6651 hci_smp_irks_clear(hdev);
6652
6653 for (i = 0; i < irk_count; i++) {
6654 struct mgmt_irk_info *irk = &cp->irks[i];
6655
6656 if (hci_is_blocked_key(hdev,
6657 HCI_BLOCKED_KEY_TYPE_IRK,
6658 irk->val)) {
6659 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6660 &irk->addr.bdaddr);
6661 continue;
6662 }
6663
6664 hci_add_irk(hdev, &irk->addr.bdaddr,
6665 le_addr_type(irk->addr.type), irk->val,
6666 BDADDR_ANY);
6667 }
6668
6669 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6670
6671 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6672
6673 hci_dev_unlock(hdev);
6674
6675 return err;
6676 }
6677
6678 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6679 {
6680 if (key->initiator != 0x00 && key->initiator != 0x01)
6681 return false;
6682
6683 switch (key->addr.type) {
6684 case BDADDR_LE_PUBLIC:
6685 return true;
6686
6687 case BDADDR_LE_RANDOM:
6688
6689 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6690 return false;
6691 return true;
6692 }
6693
6694 return false;
6695 }
6696
6697 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6698 void *cp_data, u16 len)
6699 {
6700 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6701 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6702 sizeof(struct mgmt_ltk_info));
6703 u16 key_count, expected_len;
6704 int i, err;
6705
6706 bt_dev_dbg(hdev, "sock %p", sk);
6707
6708 if (!lmp_le_capable(hdev))
6709 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6710 MGMT_STATUS_NOT_SUPPORTED);
6711
6712 key_count = __le16_to_cpu(cp->key_count);
6713 if (key_count > max_key_count) {
6714 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6715 key_count);
6716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6717 MGMT_STATUS_INVALID_PARAMS);
6718 }
6719
6720 expected_len = struct_size(cp, keys, key_count);
6721 if (expected_len != len) {
6722 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6723 expected_len, len);
6724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6725 MGMT_STATUS_INVALID_PARAMS);
6726 }
6727
6728 bt_dev_dbg(hdev, "key_count %u", key_count);
6729
6730 for (i = 0; i < key_count; i++) {
6731 struct mgmt_ltk_info *key = &cp->keys[i];
6732
6733 if (!ltk_is_valid(key))
6734 return mgmt_cmd_status(sk, hdev->id,
6735 MGMT_OP_LOAD_LONG_TERM_KEYS,
6736 MGMT_STATUS_INVALID_PARAMS);
6737 }
6738
6739 hci_dev_lock(hdev);
6740
6741 hci_smp_ltks_clear(hdev);
6742
6743 for (i = 0; i < key_count; i++) {
6744 struct mgmt_ltk_info *key = &cp->keys[i];
6745 u8 type, authenticated;
6746
6747 if (hci_is_blocked_key(hdev,
6748 HCI_BLOCKED_KEY_TYPE_LTK,
6749 key->val)) {
6750 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6751 &key->addr.bdaddr);
6752 continue;
6753 }
6754
6755 switch (key->type) {
6756 case MGMT_LTK_UNAUTHENTICATED:
6757 authenticated = 0x00;
6758 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6759 break;
6760 case MGMT_LTK_AUTHENTICATED:
6761 authenticated = 0x01;
6762 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6763 break;
6764 case MGMT_LTK_P256_UNAUTH:
6765 authenticated = 0x00;
6766 type = SMP_LTK_P256;
6767 break;
6768 case MGMT_LTK_P256_AUTH:
6769 authenticated = 0x01;
6770 type = SMP_LTK_P256;
6771 break;
6772 case MGMT_LTK_P256_DEBUG:
6773 authenticated = 0x00;
6774 type = SMP_LTK_P256_DEBUG;
6775 fallthrough;
6776 default:
6777 continue;
6778 }
6779
6780 hci_add_ltk(hdev, &key->addr.bdaddr,
6781 le_addr_type(key->addr.type), type, authenticated,
6782 key->val, key->enc_size, key->ediv, key->rand);
6783 }
6784
6785 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6786 NULL, 0);
6787
6788 hci_dev_unlock(hdev);
6789
6790 return err;
6791 }
6792
6793 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6794 {
6795 struct mgmt_pending_cmd *cmd = data;
6796 struct hci_conn *conn = cmd->user_data;
6797 struct mgmt_cp_get_conn_info *cp = cmd->param;
6798 struct mgmt_rp_get_conn_info rp;
6799 u8 status;
6800
6801 bt_dev_dbg(hdev, "err %d", err);
6802
6803 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6804
6805 status = mgmt_status(err);
6806 if (status == MGMT_STATUS_SUCCESS) {
6807 rp.rssi = conn->rssi;
6808 rp.tx_power = conn->tx_power;
6809 rp.max_tx_power = conn->max_tx_power;
6810 } else {
6811 rp.rssi = HCI_RSSI_INVALID;
6812 rp.tx_power = HCI_TX_POWER_INVALID;
6813 rp.max_tx_power = HCI_TX_POWER_INVALID;
6814 }
6815
6816 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6817 &rp, sizeof(rp));
6818
6819 mgmt_pending_free(cmd);
6820 }
6821
6822 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6823 {
6824 struct mgmt_pending_cmd *cmd = data;
6825 struct mgmt_cp_get_conn_info *cp = cmd->param;
6826 struct hci_conn *conn;
6827 int err;
6828 __le16 handle;
6829
6830
6831 if (cp->addr.type == BDADDR_BREDR)
6832 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6833 &cp->addr.bdaddr);
6834 else
6835 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6836
6837 if (!conn || conn->state != BT_CONNECTED)
6838 return MGMT_STATUS_NOT_CONNECTED;
6839
6840 cmd->user_data = conn;
6841 handle = cpu_to_le16(conn->handle);
6842
6843
6844 err = hci_read_rssi_sync(hdev, handle);
6845
6846
6847
6848
6849 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6850 conn->tx_power == HCI_TX_POWER_INVALID))
6851 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6852
6853
6854 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6855 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6856
6857 return err;
6858 }
6859
6860 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6861 u16 len)
6862 {
6863 struct mgmt_cp_get_conn_info *cp = data;
6864 struct mgmt_rp_get_conn_info rp;
6865 struct hci_conn *conn;
6866 unsigned long conn_info_age;
6867 int err = 0;
6868
6869 bt_dev_dbg(hdev, "sock %p", sk);
6870
6871 memset(&rp, 0, sizeof(rp));
6872 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6873 rp.addr.type = cp->addr.type;
6874
6875 if (!bdaddr_type_is_valid(cp->addr.type))
6876 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6877 MGMT_STATUS_INVALID_PARAMS,
6878 &rp, sizeof(rp));
6879
6880 hci_dev_lock(hdev);
6881
6882 if (!hdev_is_powered(hdev)) {
6883 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6884 MGMT_STATUS_NOT_POWERED, &rp,
6885 sizeof(rp));
6886 goto unlock;
6887 }
6888
6889 if (cp->addr.type == BDADDR_BREDR)
6890 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6891 &cp->addr.bdaddr);
6892 else
6893 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6894
6895 if (!conn || conn->state != BT_CONNECTED) {
6896 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6897 MGMT_STATUS_NOT_CONNECTED, &rp,
6898 sizeof(rp));
6899 goto unlock;
6900 }
6901
6902
6903
6904
6905 conn_info_age = hdev->conn_info_min_age +
6906 prandom_u32_max(hdev->conn_info_max_age -
6907 hdev->conn_info_min_age);
6908
6909
6910
6911
6912 if (time_after(jiffies, conn->conn_info_timestamp +
6913 msecs_to_jiffies(conn_info_age)) ||
6914 !conn->conn_info_timestamp) {
6915 struct mgmt_pending_cmd *cmd;
6916
6917 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6918 len);
6919 if (!cmd) {
6920 err = -ENOMEM;
6921 } else {
6922 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6923 cmd, get_conn_info_complete);
6924 }
6925
6926 if (err < 0) {
6927 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6928 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6929
6930 if (cmd)
6931 mgmt_pending_free(cmd);
6932
6933 goto unlock;
6934 }
6935
6936 conn->conn_info_timestamp = jiffies;
6937 } else {
6938
6939 rp.rssi = conn->rssi;
6940 rp.tx_power = conn->tx_power;
6941 rp.max_tx_power = conn->max_tx_power;
6942
6943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6944 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6945 }
6946
6947 unlock:
6948 hci_dev_unlock(hdev);
6949 return err;
6950 }
6951
6952 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6953 {
6954 struct mgmt_pending_cmd *cmd = data;
6955 struct mgmt_cp_get_clock_info *cp = cmd->param;
6956 struct mgmt_rp_get_clock_info rp;
6957 struct hci_conn *conn = cmd->user_data;
6958 u8 status = mgmt_status(err);
6959
6960 bt_dev_dbg(hdev, "err %d", err);
6961
6962 memset(&rp, 0, sizeof(rp));
6963 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6964 rp.addr.type = cp->addr.type;
6965
6966 if (err)
6967 goto complete;
6968
6969 rp.local_clock = cpu_to_le32(hdev->clock);
6970
6971 if (conn) {
6972 rp.piconet_clock = cpu_to_le32(conn->clock);
6973 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6974 }
6975
6976 complete:
6977 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6978 sizeof(rp));
6979
6980 mgmt_pending_free(cmd);
6981 }
6982
6983 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6984 {
6985 struct mgmt_pending_cmd *cmd = data;
6986 struct mgmt_cp_get_clock_info *cp = cmd->param;
6987 struct hci_cp_read_clock hci_cp;
6988 struct hci_conn *conn;
6989
6990 memset(&hci_cp, 0, sizeof(hci_cp));
6991 hci_read_clock_sync(hdev, &hci_cp);
6992
6993
6994 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
6995 if (!conn || conn->state != BT_CONNECTED)
6996 return MGMT_STATUS_NOT_CONNECTED;
6997
6998 cmd->user_data = conn;
6999 hci_cp.handle = cpu_to_le16(conn->handle);
7000 hci_cp.which = 0x01;
7001
7002 return hci_read_clock_sync(hdev, &hci_cp);
7003 }
7004
7005 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7006 u16 len)
7007 {
7008 struct mgmt_cp_get_clock_info *cp = data;
7009 struct mgmt_rp_get_clock_info rp;
7010 struct mgmt_pending_cmd *cmd;
7011 struct hci_conn *conn;
7012 int err;
7013
7014 bt_dev_dbg(hdev, "sock %p", sk);
7015
7016 memset(&rp, 0, sizeof(rp));
7017 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7018 rp.addr.type = cp->addr.type;
7019
7020 if (cp->addr.type != BDADDR_BREDR)
7021 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7022 MGMT_STATUS_INVALID_PARAMS,
7023 &rp, sizeof(rp));
7024
7025 hci_dev_lock(hdev);
7026
7027 if (!hdev_is_powered(hdev)) {
7028 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7029 MGMT_STATUS_NOT_POWERED, &rp,
7030 sizeof(rp));
7031 goto unlock;
7032 }
7033
7034 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7036 &cp->addr.bdaddr);
7037 if (!conn || conn->state != BT_CONNECTED) {
7038 err = mgmt_cmd_complete(sk, hdev->id,
7039 MGMT_OP_GET_CLOCK_INFO,
7040 MGMT_STATUS_NOT_CONNECTED,
7041 &rp, sizeof(rp));
7042 goto unlock;
7043 }
7044 } else {
7045 conn = NULL;
7046 }
7047
7048 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7049 if (!cmd)
7050 err = -ENOMEM;
7051 else
7052 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7053 get_clock_info_complete);
7054
7055 if (err < 0) {
7056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7057 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7058
7059 if (cmd)
7060 mgmt_pending_free(cmd);
7061 }
7062
7063
7064 unlock:
7065 hci_dev_unlock(hdev);
7066 return err;
7067 }
7068
7069 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7070 {
7071 struct hci_conn *conn;
7072
7073 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7074 if (!conn)
7075 return false;
7076
7077 if (conn->dst_type != type)
7078 return false;
7079
7080 if (conn->state != BT_CONNECTED)
7081 return false;
7082
7083 return true;
7084 }
7085
7086
7087 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7088 u8 addr_type, u8 auto_connect)
7089 {
7090 struct hci_conn_params *params;
7091
7092 params = hci_conn_params_add(hdev, addr, addr_type);
7093 if (!params)
7094 return -EIO;
7095
7096 if (params->auto_connect == auto_connect)
7097 return 0;
7098
7099 list_del_init(¶ms->action);
7100
7101 switch (auto_connect) {
7102 case HCI_AUTO_CONN_DISABLED:
7103 case HCI_AUTO_CONN_LINK_LOSS:
7104
7105
7106
7107 if (params->explicit_connect)
7108 list_add(¶ms->action, &hdev->pend_le_conns);
7109 break;
7110 case HCI_AUTO_CONN_REPORT:
7111 if (params->explicit_connect)
7112 list_add(¶ms->action, &hdev->pend_le_conns);
7113 else
7114 list_add(¶ms->action, &hdev->pend_le_reports);
7115 break;
7116 case HCI_AUTO_CONN_DIRECT:
7117 case HCI_AUTO_CONN_ALWAYS:
7118 if (!is_connected(hdev, addr, addr_type))
7119 list_add(¶ms->action, &hdev->pend_le_conns);
7120 break;
7121 }
7122
7123 params->auto_connect = auto_connect;
7124
7125 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7126 addr, addr_type, auto_connect);
7127
7128 return 0;
7129 }
7130
7131 static void device_added(struct sock *sk, struct hci_dev *hdev,
7132 bdaddr_t *bdaddr, u8 type, u8 action)
7133 {
7134 struct mgmt_ev_device_added ev;
7135
7136 bacpy(&ev.addr.bdaddr, bdaddr);
7137 ev.addr.type = type;
7138 ev.action = action;
7139
7140 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7141 }
7142
7143 static int add_device_sync(struct hci_dev *hdev, void *data)
7144 {
7145 return hci_update_passive_scan_sync(hdev);
7146 }
7147
7148 static int add_device(struct sock *sk, struct hci_dev *hdev,
7149 void *data, u16 len)
7150 {
7151 struct mgmt_cp_add_device *cp = data;
7152 u8 auto_conn, addr_type;
7153 struct hci_conn_params *params;
7154 int err;
7155 u32 current_flags = 0;
7156 u32 supported_flags;
7157
7158 bt_dev_dbg(hdev, "sock %p", sk);
7159
7160 if (!bdaddr_type_is_valid(cp->addr.type) ||
7161 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7162 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7163 MGMT_STATUS_INVALID_PARAMS,
7164 &cp->addr, sizeof(cp->addr));
7165
7166 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7167 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7168 MGMT_STATUS_INVALID_PARAMS,
7169 &cp->addr, sizeof(cp->addr));
7170
7171 hci_dev_lock(hdev);
7172
7173 if (cp->addr.type == BDADDR_BREDR) {
7174
7175 if (cp->action != 0x01) {
7176 err = mgmt_cmd_complete(sk, hdev->id,
7177 MGMT_OP_ADD_DEVICE,
7178 MGMT_STATUS_INVALID_PARAMS,
7179 &cp->addr, sizeof(cp->addr));
7180 goto unlock;
7181 }
7182
7183 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7184 &cp->addr.bdaddr,
7185 cp->addr.type, 0);
7186 if (err)
7187 goto unlock;
7188
7189 hci_update_scan(hdev);
7190
7191 goto added;
7192 }
7193
7194 addr_type = le_addr_type(cp->addr.type);
7195
7196 if (cp->action == 0x02)
7197 auto_conn = HCI_AUTO_CONN_ALWAYS;
7198 else if (cp->action == 0x01)
7199 auto_conn = HCI_AUTO_CONN_DIRECT;
7200 else
7201 auto_conn = HCI_AUTO_CONN_REPORT;
7202
7203
7204
7205
7206
7207
7208 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7209 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7210 MGMT_STATUS_INVALID_PARAMS,
7211 &cp->addr, sizeof(cp->addr));
7212 goto unlock;
7213 }
7214
7215
7216
7217
7218 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7219 auto_conn) < 0) {
7220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7221 MGMT_STATUS_FAILED, &cp->addr,
7222 sizeof(cp->addr));
7223 goto unlock;
7224 } else {
7225 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7226 addr_type);
7227 if (params)
7228 current_flags = params->flags;
7229 }
7230
7231 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7232 if (err < 0)
7233 goto unlock;
7234
7235 added:
7236 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7237 supported_flags = hdev->conn_flags;
7238 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7239 supported_flags, current_flags);
7240
7241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7242 MGMT_STATUS_SUCCESS, &cp->addr,
7243 sizeof(cp->addr));
7244
7245 unlock:
7246 hci_dev_unlock(hdev);
7247 return err;
7248 }
7249
7250 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7251 bdaddr_t *bdaddr, u8 type)
7252 {
7253 struct mgmt_ev_device_removed ev;
7254
7255 bacpy(&ev.addr.bdaddr, bdaddr);
7256 ev.addr.type = type;
7257
7258 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7259 }
7260
7261 static int remove_device_sync(struct hci_dev *hdev, void *data)
7262 {
7263 return hci_update_passive_scan_sync(hdev);
7264 }
7265
7266 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7267 void *data, u16 len)
7268 {
7269 struct mgmt_cp_remove_device *cp = data;
7270 int err;
7271
7272 bt_dev_dbg(hdev, "sock %p", sk);
7273
7274 hci_dev_lock(hdev);
7275
7276 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7277 struct hci_conn_params *params;
7278 u8 addr_type;
7279
7280 if (!bdaddr_type_is_valid(cp->addr.type)) {
7281 err = mgmt_cmd_complete(sk, hdev->id,
7282 MGMT_OP_REMOVE_DEVICE,
7283 MGMT_STATUS_INVALID_PARAMS,
7284 &cp->addr, sizeof(cp->addr));
7285 goto unlock;
7286 }
7287
7288 if (cp->addr.type == BDADDR_BREDR) {
7289 err = hci_bdaddr_list_del(&hdev->accept_list,
7290 &cp->addr.bdaddr,
7291 cp->addr.type);
7292 if (err) {
7293 err = mgmt_cmd_complete(sk, hdev->id,
7294 MGMT_OP_REMOVE_DEVICE,
7295 MGMT_STATUS_INVALID_PARAMS,
7296 &cp->addr,
7297 sizeof(cp->addr));
7298 goto unlock;
7299 }
7300
7301 hci_update_scan(hdev);
7302
7303 device_removed(sk, hdev, &cp->addr.bdaddr,
7304 cp->addr.type);
7305 goto complete;
7306 }
7307
7308 addr_type = le_addr_type(cp->addr.type);
7309
7310
7311
7312
7313
7314
7315 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7316 err = mgmt_cmd_complete(sk, hdev->id,
7317 MGMT_OP_REMOVE_DEVICE,
7318 MGMT_STATUS_INVALID_PARAMS,
7319 &cp->addr, sizeof(cp->addr));
7320 goto unlock;
7321 }
7322
7323 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7324 addr_type);
7325 if (!params) {
7326 err = mgmt_cmd_complete(sk, hdev->id,
7327 MGMT_OP_REMOVE_DEVICE,
7328 MGMT_STATUS_INVALID_PARAMS,
7329 &cp->addr, sizeof(cp->addr));
7330 goto unlock;
7331 }
7332
7333 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7334 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7335 err = mgmt_cmd_complete(sk, hdev->id,
7336 MGMT_OP_REMOVE_DEVICE,
7337 MGMT_STATUS_INVALID_PARAMS,
7338 &cp->addr, sizeof(cp->addr));
7339 goto unlock;
7340 }
7341
7342 list_del(¶ms->action);
7343 list_del(¶ms->list);
7344 kfree(params);
7345
7346 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7347 } else {
7348 struct hci_conn_params *p, *tmp;
7349 struct bdaddr_list *b, *btmp;
7350
7351 if (cp->addr.type) {
7352 err = mgmt_cmd_complete(sk, hdev->id,
7353 MGMT_OP_REMOVE_DEVICE,
7354 MGMT_STATUS_INVALID_PARAMS,
7355 &cp->addr, sizeof(cp->addr));
7356 goto unlock;
7357 }
7358
7359 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7360 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7361 list_del(&b->list);
7362 kfree(b);
7363 }
7364
7365 hci_update_scan(hdev);
7366
7367 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7368 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7369 continue;
7370 device_removed(sk, hdev, &p->addr, p->addr_type);
7371 if (p->explicit_connect) {
7372 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7373 continue;
7374 }
7375 list_del(&p->action);
7376 list_del(&p->list);
7377 kfree(p);
7378 }
7379
7380 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7381 }
7382
7383 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7384
7385 complete:
7386 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7387 MGMT_STATUS_SUCCESS, &cp->addr,
7388 sizeof(cp->addr));
7389 unlock:
7390 hci_dev_unlock(hdev);
7391 return err;
7392 }
7393
7394 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7395 u16 len)
7396 {
7397 struct mgmt_cp_load_conn_param *cp = data;
7398 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7399 sizeof(struct mgmt_conn_param));
7400 u16 param_count, expected_len;
7401 int i;
7402
7403 if (!lmp_le_capable(hdev))
7404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7405 MGMT_STATUS_NOT_SUPPORTED);
7406
7407 param_count = __le16_to_cpu(cp->param_count);
7408 if (param_count > max_param_count) {
7409 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7410 param_count);
7411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7412 MGMT_STATUS_INVALID_PARAMS);
7413 }
7414
7415 expected_len = struct_size(cp, params, param_count);
7416 if (expected_len != len) {
7417 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7418 expected_len, len);
7419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7420 MGMT_STATUS_INVALID_PARAMS);
7421 }
7422
7423 bt_dev_dbg(hdev, "param_count %u", param_count);
7424
7425 hci_dev_lock(hdev);
7426
7427 hci_conn_params_clear_disabled(hdev);
7428
7429 for (i = 0; i < param_count; i++) {
7430 struct mgmt_conn_param *param = &cp->params[i];
7431 struct hci_conn_params *hci_param;
7432 u16 min, max, latency, timeout;
7433 u8 addr_type;
7434
7435 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7436 param->addr.type);
7437
7438 if (param->addr.type == BDADDR_LE_PUBLIC) {
7439 addr_type = ADDR_LE_DEV_PUBLIC;
7440 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7441 addr_type = ADDR_LE_DEV_RANDOM;
7442 } else {
7443 bt_dev_err(hdev, "ignoring invalid connection parameters");
7444 continue;
7445 }
7446
7447 min = le16_to_cpu(param->min_interval);
7448 max = le16_to_cpu(param->max_interval);
7449 latency = le16_to_cpu(param->latency);
7450 timeout = le16_to_cpu(param->timeout);
7451
7452 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7453 min, max, latency, timeout);
7454
7455 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7456 bt_dev_err(hdev, "ignoring invalid connection parameters");
7457 continue;
7458 }
7459
7460 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7461 addr_type);
7462 if (!hci_param) {
7463 bt_dev_err(hdev, "failed to add connection parameters");
7464 continue;
7465 }
7466
7467 hci_param->conn_min_interval = min;
7468 hci_param->conn_max_interval = max;
7469 hci_param->conn_latency = latency;
7470 hci_param->supervision_timeout = timeout;
7471 }
7472
7473 hci_dev_unlock(hdev);
7474
7475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7476 NULL, 0);
7477 }
7478
7479 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7480 void *data, u16 len)
7481 {
7482 struct mgmt_cp_set_external_config *cp = data;
7483 bool changed;
7484 int err;
7485
7486 bt_dev_dbg(hdev, "sock %p", sk);
7487
7488 if (hdev_is_powered(hdev))
7489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7490 MGMT_STATUS_REJECTED);
7491
7492 if (cp->config != 0x00 && cp->config != 0x01)
7493 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7494 MGMT_STATUS_INVALID_PARAMS);
7495
7496 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7497 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7498 MGMT_STATUS_NOT_SUPPORTED);
7499
7500 hci_dev_lock(hdev);
7501
7502 if (cp->config)
7503 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7504 else
7505 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7506
7507 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7508 if (err < 0)
7509 goto unlock;
7510
7511 if (!changed)
7512 goto unlock;
7513
7514 err = new_options(hdev, sk);
7515
7516 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7517 mgmt_index_removed(hdev);
7518
7519 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7520 hci_dev_set_flag(hdev, HCI_CONFIG);
7521 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7522
7523 queue_work(hdev->req_workqueue, &hdev->power_on);
7524 } else {
7525 set_bit(HCI_RAW, &hdev->flags);
7526 mgmt_index_added(hdev);
7527 }
7528 }
7529
7530 unlock:
7531 hci_dev_unlock(hdev);
7532 return err;
7533 }
7534
7535 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7536 void *data, u16 len)
7537 {
7538 struct mgmt_cp_set_public_address *cp = data;
7539 bool changed;
7540 int err;
7541
7542 bt_dev_dbg(hdev, "sock %p", sk);
7543
7544 if (hdev_is_powered(hdev))
7545 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7546 MGMT_STATUS_REJECTED);
7547
7548 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7549 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7550 MGMT_STATUS_INVALID_PARAMS);
7551
7552 if (!hdev->set_bdaddr)
7553 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7554 MGMT_STATUS_NOT_SUPPORTED);
7555
7556 hci_dev_lock(hdev);
7557
7558 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7559 bacpy(&hdev->public_addr, &cp->bdaddr);
7560
7561 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7562 if (err < 0)
7563 goto unlock;
7564
7565 if (!changed)
7566 goto unlock;
7567
7568 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7569 err = new_options(hdev, sk);
7570
7571 if (is_configured(hdev)) {
7572 mgmt_index_removed(hdev);
7573
7574 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7575
7576 hci_dev_set_flag(hdev, HCI_CONFIG);
7577 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7578
7579 queue_work(hdev->req_workqueue, &hdev->power_on);
7580 }
7581
7582 unlock:
7583 hci_dev_unlock(hdev);
7584 return err;
7585 }
7586
7587 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7588 int err)
7589 {
7590 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7591 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7592 u8 *h192, *r192, *h256, *r256;
7593 struct mgmt_pending_cmd *cmd = data;
7594 struct sk_buff *skb = cmd->skb;
7595 u8 status = mgmt_status(err);
7596 u16 eir_len;
7597
7598 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7599 return;
7600
7601 if (!status) {
7602 if (!skb)
7603 status = MGMT_STATUS_FAILED;
7604 else if (IS_ERR(skb))
7605 status = mgmt_status(PTR_ERR(skb));
7606 else
7607 status = mgmt_status(skb->data[0]);
7608 }
7609
7610 bt_dev_dbg(hdev, "status %u", status);
7611
7612 mgmt_cp = cmd->param;
7613
7614 if (status) {
7615 status = mgmt_status(status);
7616 eir_len = 0;
7617
7618 h192 = NULL;
7619 r192 = NULL;
7620 h256 = NULL;
7621 r256 = NULL;
7622 } else if (!bredr_sc_enabled(hdev)) {
7623 struct hci_rp_read_local_oob_data *rp;
7624
7625 if (skb->len != sizeof(*rp)) {
7626 status = MGMT_STATUS_FAILED;
7627 eir_len = 0;
7628 } else {
7629 status = MGMT_STATUS_SUCCESS;
7630 rp = (void *)skb->data;
7631
7632 eir_len = 5 + 18 + 18;
7633 h192 = rp->hash;
7634 r192 = rp->rand;
7635 h256 = NULL;
7636 r256 = NULL;
7637 }
7638 } else {
7639 struct hci_rp_read_local_oob_ext_data *rp;
7640
7641 if (skb->len != sizeof(*rp)) {
7642 status = MGMT_STATUS_FAILED;
7643 eir_len = 0;
7644 } else {
7645 status = MGMT_STATUS_SUCCESS;
7646 rp = (void *)skb->data;
7647
7648 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7649 eir_len = 5 + 18 + 18;
7650 h192 = NULL;
7651 r192 = NULL;
7652 } else {
7653 eir_len = 5 + 18 + 18 + 18 + 18;
7654 h192 = rp->hash192;
7655 r192 = rp->rand192;
7656 }
7657
7658 h256 = rp->hash256;
7659 r256 = rp->rand256;
7660 }
7661 }
7662
7663 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7664 if (!mgmt_rp)
7665 goto done;
7666
7667 if (eir_len == 0)
7668 goto send_rsp;
7669
7670 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7671 hdev->dev_class, 3);
7672
7673 if (h192 && r192) {
7674 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7675 EIR_SSP_HASH_C192, h192, 16);
7676 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7677 EIR_SSP_RAND_R192, r192, 16);
7678 }
7679
7680 if (h256 && r256) {
7681 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7682 EIR_SSP_HASH_C256, h256, 16);
7683 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7684 EIR_SSP_RAND_R256, r256, 16);
7685 }
7686
7687 send_rsp:
7688 mgmt_rp->type = mgmt_cp->type;
7689 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7690
7691 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7692 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7693 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7694 if (err < 0 || status)
7695 goto done;
7696
7697 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7698
7699 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7700 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7701 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7702 done:
7703 if (skb && !IS_ERR(skb))
7704 kfree_skb(skb);
7705
7706 kfree(mgmt_rp);
7707 mgmt_pending_remove(cmd);
7708 }
7709
7710 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7711 struct mgmt_cp_read_local_oob_ext_data *cp)
7712 {
7713 struct mgmt_pending_cmd *cmd;
7714 int err;
7715
7716 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7717 cp, sizeof(*cp));
7718 if (!cmd)
7719 return -ENOMEM;
7720
7721 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7722 read_local_oob_ext_data_complete);
7723
7724 if (err < 0) {
7725 mgmt_pending_remove(cmd);
7726 return err;
7727 }
7728
7729 return 0;
7730 }
7731
7732 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7733 void *data, u16 data_len)
7734 {
7735 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7736 struct mgmt_rp_read_local_oob_ext_data *rp;
7737 size_t rp_len;
7738 u16 eir_len;
7739 u8 status, flags, role, addr[7], hash[16], rand[16];
7740 int err;
7741
7742 bt_dev_dbg(hdev, "sock %p", sk);
7743
7744 if (hdev_is_powered(hdev)) {
7745 switch (cp->type) {
7746 case BIT(BDADDR_BREDR):
7747 status = mgmt_bredr_support(hdev);
7748 if (status)
7749 eir_len = 0;
7750 else
7751 eir_len = 5;
7752 break;
7753 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7754 status = mgmt_le_support(hdev);
7755 if (status)
7756 eir_len = 0;
7757 else
7758 eir_len = 9 + 3 + 18 + 18 + 3;
7759 break;
7760 default:
7761 status = MGMT_STATUS_INVALID_PARAMS;
7762 eir_len = 0;
7763 break;
7764 }
7765 } else {
7766 status = MGMT_STATUS_NOT_POWERED;
7767 eir_len = 0;
7768 }
7769
7770 rp_len = sizeof(*rp) + eir_len;
7771 rp = kmalloc(rp_len, GFP_ATOMIC);
7772 if (!rp)
7773 return -ENOMEM;
7774
7775 if (!status && !lmp_ssp_capable(hdev)) {
7776 status = MGMT_STATUS_NOT_SUPPORTED;
7777 eir_len = 0;
7778 }
7779
7780 if (status)
7781 goto complete;
7782
7783 hci_dev_lock(hdev);
7784
7785 eir_len = 0;
7786 switch (cp->type) {
7787 case BIT(BDADDR_BREDR):
7788 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7789 err = read_local_ssp_oob_req(hdev, sk, cp);
7790 hci_dev_unlock(hdev);
7791 if (!err)
7792 goto done;
7793
7794 status = MGMT_STATUS_FAILED;
7795 goto complete;
7796 } else {
7797 eir_len = eir_append_data(rp->eir, eir_len,
7798 EIR_CLASS_OF_DEV,
7799 hdev->dev_class, 3);
7800 }
7801 break;
7802 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7803 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7804 smp_generate_oob(hdev, hash, rand) < 0) {
7805 hci_dev_unlock(hdev);
7806 status = MGMT_STATUS_FAILED;
7807 goto complete;
7808 }
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7821 hci_dev_unlock(hdev);
7822 status = MGMT_STATUS_REJECTED;
7823 goto complete;
7824 }
7825
7826 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7827 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7828 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7829 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7830 memcpy(addr, &hdev->static_addr, 6);
7831 addr[6] = 0x01;
7832 } else {
7833 memcpy(addr, &hdev->bdaddr, 6);
7834 addr[6] = 0x00;
7835 }
7836
7837 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7838 addr, sizeof(addr));
7839
7840 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7841 role = 0x02;
7842 else
7843 role = 0x01;
7844
7845 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7846 &role, sizeof(role));
7847
7848 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7849 eir_len = eir_append_data(rp->eir, eir_len,
7850 EIR_LE_SC_CONFIRM,
7851 hash, sizeof(hash));
7852
7853 eir_len = eir_append_data(rp->eir, eir_len,
7854 EIR_LE_SC_RANDOM,
7855 rand, sizeof(rand));
7856 }
7857
7858 flags = mgmt_get_adv_discov_flags(hdev);
7859
7860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7861 flags |= LE_AD_NO_BREDR;
7862
7863 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7864 &flags, sizeof(flags));
7865 break;
7866 }
7867
7868 hci_dev_unlock(hdev);
7869
7870 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7871
7872 status = MGMT_STATUS_SUCCESS;
7873
7874 complete:
7875 rp->type = cp->type;
7876 rp->eir_len = cpu_to_le16(eir_len);
7877
7878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7879 status, rp, sizeof(*rp) + eir_len);
7880 if (err < 0 || status)
7881 goto done;
7882
7883 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7884 rp, sizeof(*rp) + eir_len,
7885 HCI_MGMT_OOB_DATA_EVENTS, sk);
7886
7887 done:
7888 kfree(rp);
7889
7890 return err;
7891 }
7892
7893 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7894 {
7895 u32 flags = 0;
7896
7897 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7898 flags |= MGMT_ADV_FLAG_DISCOV;
7899 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7900 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7901 flags |= MGMT_ADV_FLAG_APPEARANCE;
7902 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7903 flags |= MGMT_ADV_PARAM_DURATION;
7904 flags |= MGMT_ADV_PARAM_TIMEOUT;
7905 flags |= MGMT_ADV_PARAM_INTERVALS;
7906 flags |= MGMT_ADV_PARAM_TX_POWER;
7907 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7908
7909
7910
7911
7912 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7913 ext_adv_capable(hdev))
7914 flags |= MGMT_ADV_FLAG_TX_POWER;
7915
7916 if (ext_adv_capable(hdev)) {
7917 flags |= MGMT_ADV_FLAG_SEC_1M;
7918 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7919 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7920
7921 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7922 flags |= MGMT_ADV_FLAG_SEC_2M;
7923
7924 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7925 flags |= MGMT_ADV_FLAG_SEC_CODED;
7926 }
7927
7928 return flags;
7929 }
7930
7931 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7932 void *data, u16 data_len)
7933 {
7934 struct mgmt_rp_read_adv_features *rp;
7935 size_t rp_len;
7936 int err;
7937 struct adv_info *adv_instance;
7938 u32 supported_flags;
7939 u8 *instance;
7940
7941 bt_dev_dbg(hdev, "sock %p", sk);
7942
7943 if (!lmp_le_capable(hdev))
7944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7945 MGMT_STATUS_REJECTED);
7946
7947 hci_dev_lock(hdev);
7948
7949 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7950 rp = kmalloc(rp_len, GFP_ATOMIC);
7951 if (!rp) {
7952 hci_dev_unlock(hdev);
7953 return -ENOMEM;
7954 }
7955
7956 supported_flags = get_supported_adv_flags(hdev);
7957
7958 rp->supported_flags = cpu_to_le32(supported_flags);
7959 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7960 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7961 rp->max_instances = hdev->le_num_of_adv_sets;
7962 rp->num_instances = hdev->adv_instance_cnt;
7963
7964 instance = rp->instance;
7965 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7966 *instance = adv_instance->instance;
7967 instance++;
7968 }
7969
7970 hci_dev_unlock(hdev);
7971
7972 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7973 MGMT_STATUS_SUCCESS, rp, rp_len);
7974
7975 kfree(rp);
7976
7977 return err;
7978 }
7979
7980 static u8 calculate_name_len(struct hci_dev *hdev)
7981 {
7982 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7983
7984 return eir_append_local_name(hdev, buf, 0);
7985 }
7986
7987 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7988 bool is_adv_data)
7989 {
7990 u8 max_len = HCI_MAX_AD_LENGTH;
7991
7992 if (is_adv_data) {
7993 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7994 MGMT_ADV_FLAG_LIMITED_DISCOV |
7995 MGMT_ADV_FLAG_MANAGED_FLAGS))
7996 max_len -= 3;
7997
7998 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7999 max_len -= 3;
8000 } else {
8001 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8002 max_len -= calculate_name_len(hdev);
8003
8004 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8005 max_len -= 4;
8006 }
8007
8008 return max_len;
8009 }
8010
8011 static bool flags_managed(u32 adv_flags)
8012 {
8013 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8014 MGMT_ADV_FLAG_LIMITED_DISCOV |
8015 MGMT_ADV_FLAG_MANAGED_FLAGS);
8016 }
8017
8018 static bool tx_power_managed(u32 adv_flags)
8019 {
8020 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8021 }
8022
8023 static bool name_managed(u32 adv_flags)
8024 {
8025 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8026 }
8027
8028 static bool appearance_managed(u32 adv_flags)
8029 {
8030 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8031 }
8032
8033 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8034 u8 len, bool is_adv_data)
8035 {
8036 int i, cur_len;
8037 u8 max_len;
8038
8039 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8040
8041 if (len > max_len)
8042 return false;
8043
8044
8045 for (i = 0; i < len; i += (cur_len + 1)) {
8046 cur_len = data[i];
8047
8048 if (!cur_len)
8049 continue;
8050
8051 if (data[i + 1] == EIR_FLAGS &&
8052 (!is_adv_data || flags_managed(adv_flags)))
8053 return false;
8054
8055 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8056 return false;
8057
8058 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8059 return false;
8060
8061 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8062 return false;
8063
8064 if (data[i + 1] == EIR_APPEARANCE &&
8065 appearance_managed(adv_flags))
8066 return false;
8067
8068
8069
8070
8071 if (i + cur_len >= len)
8072 return false;
8073 }
8074
8075 return true;
8076 }
8077
8078 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8079 {
8080 u32 supported_flags, phy_flags;
8081
8082
8083
8084
8085 supported_flags = get_supported_adv_flags(hdev);
8086 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8087 if (adv_flags & ~supported_flags ||
8088 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8089 return false;
8090
8091 return true;
8092 }
8093
8094 static bool adv_busy(struct hci_dev *hdev)
8095 {
8096 return pending_find(MGMT_OP_SET_LE, hdev);
8097 }
8098
8099 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8100 int err)
8101 {
8102 struct adv_info *adv, *n;
8103
8104 bt_dev_dbg(hdev, "err %d", err);
8105
8106 hci_dev_lock(hdev);
8107
8108 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8109 u8 instance;
8110
8111 if (!adv->pending)
8112 continue;
8113
8114 if (!err) {
8115 adv->pending = false;
8116 continue;
8117 }
8118
8119 instance = adv->instance;
8120
8121 if (hdev->cur_adv_instance == instance)
8122 cancel_adv_timeout(hdev);
8123
8124 hci_remove_adv_instance(hdev, instance);
8125 mgmt_advertising_removed(sk, hdev, instance);
8126 }
8127
8128 hci_dev_unlock(hdev);
8129 }
8130
8131 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8132 {
8133 struct mgmt_pending_cmd *cmd = data;
8134 struct mgmt_cp_add_advertising *cp = cmd->param;
8135 struct mgmt_rp_add_advertising rp;
8136
8137 memset(&rp, 0, sizeof(rp));
8138
8139 rp.instance = cp->instance;
8140
8141 if (err)
8142 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8143 mgmt_status(err));
8144 else
8145 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8146 mgmt_status(err), &rp, sizeof(rp));
8147
8148 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8149
8150 mgmt_pending_free(cmd);
8151 }
8152
8153 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8154 {
8155 struct mgmt_pending_cmd *cmd = data;
8156 struct mgmt_cp_add_advertising *cp = cmd->param;
8157
8158 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8159 }
8160
8161 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8162 void *data, u16 data_len)
8163 {
8164 struct mgmt_cp_add_advertising *cp = data;
8165 struct mgmt_rp_add_advertising rp;
8166 u32 flags;
8167 u8 status;
8168 u16 timeout, duration;
8169 unsigned int prev_instance_cnt;
8170 u8 schedule_instance = 0;
8171 struct adv_info *adv, *next_instance;
8172 int err;
8173 struct mgmt_pending_cmd *cmd;
8174
8175 bt_dev_dbg(hdev, "sock %p", sk);
8176
8177 status = mgmt_le_support(hdev);
8178 if (status)
8179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8180 status);
8181
8182 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8184 MGMT_STATUS_INVALID_PARAMS);
8185
8186 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8188 MGMT_STATUS_INVALID_PARAMS);
8189
8190 flags = __le32_to_cpu(cp->flags);
8191 timeout = __le16_to_cpu(cp->timeout);
8192 duration = __le16_to_cpu(cp->duration);
8193
8194 if (!requested_adv_flags_are_valid(hdev, flags))
8195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8196 MGMT_STATUS_INVALID_PARAMS);
8197
8198 hci_dev_lock(hdev);
8199
8200 if (timeout && !hdev_is_powered(hdev)) {
8201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8202 MGMT_STATUS_REJECTED);
8203 goto unlock;
8204 }
8205
8206 if (adv_busy(hdev)) {
8207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8208 MGMT_STATUS_BUSY);
8209 goto unlock;
8210 }
8211
8212 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8213 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8214 cp->scan_rsp_len, false)) {
8215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8216 MGMT_STATUS_INVALID_PARAMS);
8217 goto unlock;
8218 }
8219
8220 prev_instance_cnt = hdev->adv_instance_cnt;
8221
8222 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8223 cp->adv_data_len, cp->data,
8224 cp->scan_rsp_len,
8225 cp->data + cp->adv_data_len,
8226 timeout, duration,
8227 HCI_ADV_TX_POWER_NO_PREFERENCE,
8228 hdev->le_adv_min_interval,
8229 hdev->le_adv_max_interval);
8230 if (IS_ERR(adv)) {
8231 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8232 MGMT_STATUS_FAILED);
8233 goto unlock;
8234 }
8235
8236
8237
8238
8239 if (hdev->adv_instance_cnt > prev_instance_cnt)
8240 mgmt_advertising_added(sk, hdev, cp->instance);
8241
8242 if (hdev->cur_adv_instance == cp->instance) {
8243
8244
8245
8246
8247
8248 cancel_adv_timeout(hdev);
8249
8250 next_instance = hci_get_next_instance(hdev, cp->instance);
8251 if (next_instance)
8252 schedule_instance = next_instance->instance;
8253 } else if (!hdev->adv_instance_timeout) {
8254
8255
8256
8257 schedule_instance = cp->instance;
8258 }
8259
8260
8261
8262
8263
8264 if (!hdev_is_powered(hdev) ||
8265 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8266 !schedule_instance) {
8267 rp.instance = cp->instance;
8268 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8269 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8270 goto unlock;
8271 }
8272
8273
8274
8275
8276 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8277 data_len);
8278 if (!cmd) {
8279 err = -ENOMEM;
8280 goto unlock;
8281 }
8282
8283 cp->instance = schedule_instance;
8284
8285 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8286 add_advertising_complete);
8287 if (err < 0)
8288 mgmt_pending_free(cmd);
8289
8290 unlock:
8291 hci_dev_unlock(hdev);
8292
8293 return err;
8294 }
8295
8296 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8297 int err)
8298 {
8299 struct mgmt_pending_cmd *cmd = data;
8300 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8301 struct mgmt_rp_add_ext_adv_params rp;
8302 struct adv_info *adv;
8303 u32 flags;
8304
8305 BT_DBG("%s", hdev->name);
8306
8307 hci_dev_lock(hdev);
8308
8309 adv = hci_find_adv_instance(hdev, cp->instance);
8310 if (!adv)
8311 goto unlock;
8312
8313 rp.instance = cp->instance;
8314 rp.tx_power = adv->tx_power;
8315
8316
8317
8318
8319 flags = __le32_to_cpu(cp->flags);
8320 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8321 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8322
8323 if (err) {
8324
8325
8326
8327
8328 if (!adv->pending)
8329 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8330
8331 hci_remove_adv_instance(hdev, cp->instance);
8332
8333 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8334 mgmt_status(err));
8335 } else {
8336 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8337 mgmt_status(err), &rp, sizeof(rp));
8338 }
8339
8340 unlock:
8341 if (cmd)
8342 mgmt_pending_free(cmd);
8343
8344 hci_dev_unlock(hdev);
8345 }
8346
8347 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8348 {
8349 struct mgmt_pending_cmd *cmd = data;
8350 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8351
8352 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8353 }
8354
8355 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8356 void *data, u16 data_len)
8357 {
8358 struct mgmt_cp_add_ext_adv_params *cp = data;
8359 struct mgmt_rp_add_ext_adv_params rp;
8360 struct mgmt_pending_cmd *cmd = NULL;
8361 struct adv_info *adv;
8362 u32 flags, min_interval, max_interval;
8363 u16 timeout, duration;
8364 u8 status;
8365 s8 tx_power;
8366 int err;
8367
8368 BT_DBG("%s", hdev->name);
8369
8370 status = mgmt_le_support(hdev);
8371 if (status)
8372 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8373 status);
8374
8375 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8377 MGMT_STATUS_INVALID_PARAMS);
8378
8379
8380
8381
8382
8383
8384
8385 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8387 MGMT_STATUS_INVALID_PARAMS);
8388
8389 flags = __le32_to_cpu(cp->flags);
8390
8391 if (!requested_adv_flags_are_valid(hdev, flags))
8392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8393 MGMT_STATUS_INVALID_PARAMS);
8394
8395 hci_dev_lock(hdev);
8396
8397
8398 if (!hdev_is_powered(hdev)) {
8399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8400 MGMT_STATUS_REJECTED);
8401 goto unlock;
8402 }
8403
8404 if (adv_busy(hdev)) {
8405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8406 MGMT_STATUS_BUSY);
8407 goto unlock;
8408 }
8409
8410
8411 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8412 __le16_to_cpu(cp->timeout) : 0;
8413
8414 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8415 __le16_to_cpu(cp->duration) :
8416 hdev->def_multi_adv_rotation_duration;
8417
8418 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8419 __le32_to_cpu(cp->min_interval) :
8420 hdev->le_adv_min_interval;
8421
8422 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8423 __le32_to_cpu(cp->max_interval) :
8424 hdev->le_adv_max_interval;
8425
8426 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8427 cp->tx_power :
8428 HCI_ADV_TX_POWER_NO_PREFERENCE;
8429
8430
8431 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8432 timeout, duration, tx_power, min_interval,
8433 max_interval);
8434
8435 if (IS_ERR(adv)) {
8436 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8437 MGMT_STATUS_FAILED);
8438 goto unlock;
8439 }
8440
8441
8442 if (ext_adv_capable(hdev)) {
8443 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8444 data, data_len);
8445 if (!cmd) {
8446 err = -ENOMEM;
8447 hci_remove_adv_instance(hdev, cp->instance);
8448 goto unlock;
8449 }
8450
8451 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8452 add_ext_adv_params_complete);
8453 if (err < 0)
8454 mgmt_pending_free(cmd);
8455 } else {
8456 rp.instance = cp->instance;
8457 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8458 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8459 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8460 err = mgmt_cmd_complete(sk, hdev->id,
8461 MGMT_OP_ADD_EXT_ADV_PARAMS,
8462 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8463 }
8464
8465 unlock:
8466 hci_dev_unlock(hdev);
8467
8468 return err;
8469 }
8470
8471 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8472 {
8473 struct mgmt_pending_cmd *cmd = data;
8474 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8475 struct mgmt_rp_add_advertising rp;
8476
8477 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8478
8479 memset(&rp, 0, sizeof(rp));
8480
8481 rp.instance = cp->instance;
8482
8483 if (err)
8484 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8485 mgmt_status(err));
8486 else
8487 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8488 mgmt_status(err), &rp, sizeof(rp));
8489
8490 mgmt_pending_free(cmd);
8491 }
8492
8493 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8494 {
8495 struct mgmt_pending_cmd *cmd = data;
8496 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8497 int err;
8498
8499 if (ext_adv_capable(hdev)) {
8500 err = hci_update_adv_data_sync(hdev, cp->instance);
8501 if (err)
8502 return err;
8503
8504 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8505 if (err)
8506 return err;
8507
8508 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8509 }
8510
8511 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8512 }
8513
8514 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8515 u16 data_len)
8516 {
8517 struct mgmt_cp_add_ext_adv_data *cp = data;
8518 struct mgmt_rp_add_ext_adv_data rp;
8519 u8 schedule_instance = 0;
8520 struct adv_info *next_instance;
8521 struct adv_info *adv_instance;
8522 int err = 0;
8523 struct mgmt_pending_cmd *cmd;
8524
8525 BT_DBG("%s", hdev->name);
8526
8527 hci_dev_lock(hdev);
8528
8529 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8530
8531 if (!adv_instance) {
8532 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8533 MGMT_STATUS_INVALID_PARAMS);
8534 goto unlock;
8535 }
8536
8537
8538 if (!hdev_is_powered(hdev)) {
8539 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8540 MGMT_STATUS_REJECTED);
8541 goto clear_new_instance;
8542 }
8543
8544 if (adv_busy(hdev)) {
8545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8546 MGMT_STATUS_BUSY);
8547 goto clear_new_instance;
8548 }
8549
8550
8551 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8552 cp->adv_data_len, true) ||
8553 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8554 cp->adv_data_len, cp->scan_rsp_len, false)) {
8555 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8556 MGMT_STATUS_INVALID_PARAMS);
8557 goto clear_new_instance;
8558 }
8559
8560
8561 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8562 cp->data, cp->scan_rsp_len,
8563 cp->data + cp->adv_data_len);
8564
8565
8566 if (hdev->cur_adv_instance == cp->instance) {
8567
8568
8569
8570
8571
8572
8573 cancel_adv_timeout(hdev);
8574
8575 next_instance = hci_get_next_instance(hdev, cp->instance);
8576 if (next_instance)
8577 schedule_instance = next_instance->instance;
8578 } else if (!hdev->adv_instance_timeout) {
8579
8580
8581
8582 schedule_instance = cp->instance;
8583 }
8584
8585
8586
8587
8588
8589 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8590 if (adv_instance->pending) {
8591 mgmt_advertising_added(sk, hdev, cp->instance);
8592 adv_instance->pending = false;
8593 }
8594 rp.instance = cp->instance;
8595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8596 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8597 goto unlock;
8598 }
8599
8600 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8601 data_len);
8602 if (!cmd) {
8603 err = -ENOMEM;
8604 goto clear_new_instance;
8605 }
8606
8607 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8608 add_ext_adv_data_complete);
8609 if (err < 0) {
8610 mgmt_pending_free(cmd);
8611 goto clear_new_instance;
8612 }
8613
8614
8615
8616
8617
8618
8619 if (adv_instance->pending)
8620 mgmt_advertising_added(sk, hdev, cp->instance);
8621
8622 goto unlock;
8623
8624 clear_new_instance:
8625 hci_remove_adv_instance(hdev, cp->instance);
8626
8627 unlock:
8628 hci_dev_unlock(hdev);
8629
8630 return err;
8631 }
8632
8633 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8634 int err)
8635 {
8636 struct mgmt_pending_cmd *cmd = data;
8637 struct mgmt_cp_remove_advertising *cp = cmd->param;
8638 struct mgmt_rp_remove_advertising rp;
8639
8640 bt_dev_dbg(hdev, "err %d", err);
8641
8642 memset(&rp, 0, sizeof(rp));
8643 rp.instance = cp->instance;
8644
8645 if (err)
8646 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8647 mgmt_status(err));
8648 else
8649 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8650 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8651
8652 mgmt_pending_free(cmd);
8653 }
8654
8655 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8656 {
8657 struct mgmt_pending_cmd *cmd = data;
8658 struct mgmt_cp_remove_advertising *cp = cmd->param;
8659 int err;
8660
8661 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8662 if (err)
8663 return err;
8664
8665 if (list_empty(&hdev->adv_instances))
8666 err = hci_disable_advertising_sync(hdev);
8667
8668 return err;
8669 }
8670
8671 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8672 void *data, u16 data_len)
8673 {
8674 struct mgmt_cp_remove_advertising *cp = data;
8675 struct mgmt_pending_cmd *cmd;
8676 int err;
8677
8678 bt_dev_dbg(hdev, "sock %p", sk);
8679
8680 hci_dev_lock(hdev);
8681
8682 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8683 err = mgmt_cmd_status(sk, hdev->id,
8684 MGMT_OP_REMOVE_ADVERTISING,
8685 MGMT_STATUS_INVALID_PARAMS);
8686 goto unlock;
8687 }
8688
8689 if (pending_find(MGMT_OP_SET_LE, hdev)) {
8690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8691 MGMT_STATUS_BUSY);
8692 goto unlock;
8693 }
8694
8695 if (list_empty(&hdev->adv_instances)) {
8696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8697 MGMT_STATUS_INVALID_PARAMS);
8698 goto unlock;
8699 }
8700
8701 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8702 data_len);
8703 if (!cmd) {
8704 err = -ENOMEM;
8705 goto unlock;
8706 }
8707
8708 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8709 remove_advertising_complete);
8710 if (err < 0)
8711 mgmt_pending_free(cmd);
8712
8713 unlock:
8714 hci_dev_unlock(hdev);
8715
8716 return err;
8717 }
8718
8719 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8720 void *data, u16 data_len)
8721 {
8722 struct mgmt_cp_get_adv_size_info *cp = data;
8723 struct mgmt_rp_get_adv_size_info rp;
8724 u32 flags, supported_flags;
8725
8726 bt_dev_dbg(hdev, "sock %p", sk);
8727
8728 if (!lmp_le_capable(hdev))
8729 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8730 MGMT_STATUS_REJECTED);
8731
8732 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8734 MGMT_STATUS_INVALID_PARAMS);
8735
8736 flags = __le32_to_cpu(cp->flags);
8737
8738
8739
8740
8741 supported_flags = get_supported_adv_flags(hdev);
8742 if (flags & ~supported_flags)
8743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8744 MGMT_STATUS_INVALID_PARAMS);
8745
8746 rp.instance = cp->instance;
8747 rp.flags = cp->flags;
8748 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8749 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8750
8751 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8752 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8753 }
8754
8755 static const struct hci_mgmt_handler mgmt_handlers[] = {
8756 { NULL },
8757 { read_version, MGMT_READ_VERSION_SIZE,
8758 HCI_MGMT_NO_HDEV |
8759 HCI_MGMT_UNTRUSTED },
8760 { read_commands, MGMT_READ_COMMANDS_SIZE,
8761 HCI_MGMT_NO_HDEV |
8762 HCI_MGMT_UNTRUSTED },
8763 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8764 HCI_MGMT_NO_HDEV |
8765 HCI_MGMT_UNTRUSTED },
8766 { read_controller_info, MGMT_READ_INFO_SIZE,
8767 HCI_MGMT_UNTRUSTED },
8768 { set_powered, MGMT_SETTING_SIZE },
8769 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8770 { set_connectable, MGMT_SETTING_SIZE },
8771 { set_fast_connectable, MGMT_SETTING_SIZE },
8772 { set_bondable, MGMT_SETTING_SIZE },
8773 { set_link_security, MGMT_SETTING_SIZE },
8774 { set_ssp, MGMT_SETTING_SIZE },
8775 { set_hs, MGMT_SETTING_SIZE },
8776 { set_le, MGMT_SETTING_SIZE },
8777 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8778 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8779 { add_uuid, MGMT_ADD_UUID_SIZE },
8780 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8781 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8782 HCI_MGMT_VAR_LEN },
8783 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8784 HCI_MGMT_VAR_LEN },
8785 { disconnect, MGMT_DISCONNECT_SIZE },
8786 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8787 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8788 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8789 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8790 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8791 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8792 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8793 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8794 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8795 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8796 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8797 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8798 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8799 HCI_MGMT_VAR_LEN },
8800 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8801 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8802 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8803 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8804 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8805 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8806 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8807 { set_advertising, MGMT_SETTING_SIZE },
8808 { set_bredr, MGMT_SETTING_SIZE },
8809 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8810 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8811 { set_secure_conn, MGMT_SETTING_SIZE },
8812 { set_debug_keys, MGMT_SETTING_SIZE },
8813 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8814 { load_irks, MGMT_LOAD_IRKS_SIZE,
8815 HCI_MGMT_VAR_LEN },
8816 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8817 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8818 { add_device, MGMT_ADD_DEVICE_SIZE },
8819 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8820 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8821 HCI_MGMT_VAR_LEN },
8822 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8823 HCI_MGMT_NO_HDEV |
8824 HCI_MGMT_UNTRUSTED },
8825 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8826 HCI_MGMT_UNCONFIGURED |
8827 HCI_MGMT_UNTRUSTED },
8828 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8829 HCI_MGMT_UNCONFIGURED },
8830 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8831 HCI_MGMT_UNCONFIGURED },
8832 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8833 HCI_MGMT_VAR_LEN },
8834 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8835 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8836 HCI_MGMT_NO_HDEV |
8837 HCI_MGMT_UNTRUSTED },
8838 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8839 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8840 HCI_MGMT_VAR_LEN },
8841 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8842 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8843 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8844 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8845 HCI_MGMT_UNTRUSTED },
8846 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8847 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8848 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8849 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8850 HCI_MGMT_VAR_LEN },
8851 { set_wideband_speech, MGMT_SETTING_SIZE },
8852 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8853 HCI_MGMT_UNTRUSTED },
8854 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8855 HCI_MGMT_UNTRUSTED |
8856 HCI_MGMT_HDEV_OPTIONAL },
8857 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8858 HCI_MGMT_VAR_LEN |
8859 HCI_MGMT_HDEV_OPTIONAL },
8860 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8861 HCI_MGMT_UNTRUSTED },
8862 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8863 HCI_MGMT_VAR_LEN },
8864 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8865 HCI_MGMT_UNTRUSTED },
8866 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8867 HCI_MGMT_VAR_LEN },
8868 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8869 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8870 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8871 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8872 HCI_MGMT_VAR_LEN },
8873 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8874 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8875 HCI_MGMT_VAR_LEN },
8876 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8877 HCI_MGMT_VAR_LEN },
8878 { add_adv_patterns_monitor_rssi,
8879 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8880 HCI_MGMT_VAR_LEN },
8881 };
8882
8883 void mgmt_index_added(struct hci_dev *hdev)
8884 {
8885 struct mgmt_ev_ext_index ev;
8886
8887 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8888 return;
8889
8890 switch (hdev->dev_type) {
8891 case HCI_PRIMARY:
8892 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8893 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8894 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8895 ev.type = 0x01;
8896 } else {
8897 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8898 HCI_MGMT_INDEX_EVENTS);
8899 ev.type = 0x00;
8900 }
8901 break;
8902 case HCI_AMP:
8903 ev.type = 0x02;
8904 break;
8905 default:
8906 return;
8907 }
8908
8909 ev.bus = hdev->bus;
8910
8911 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8912 HCI_MGMT_EXT_INDEX_EVENTS);
8913 }
8914
8915 void mgmt_index_removed(struct hci_dev *hdev)
8916 {
8917 struct mgmt_ev_ext_index ev;
8918 u8 status = MGMT_STATUS_INVALID_INDEX;
8919
8920 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8921 return;
8922
8923 switch (hdev->dev_type) {
8924 case HCI_PRIMARY:
8925 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8926
8927 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8928 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8929 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8930 ev.type = 0x01;
8931 } else {
8932 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8933 HCI_MGMT_INDEX_EVENTS);
8934 ev.type = 0x00;
8935 }
8936 break;
8937 case HCI_AMP:
8938 ev.type = 0x02;
8939 break;
8940 default:
8941 return;
8942 }
8943
8944 ev.bus = hdev->bus;
8945
8946 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8947 HCI_MGMT_EXT_INDEX_EVENTS);
8948
8949
8950 if (!hci_dev_test_flag(hdev, HCI_MGMT))
8951 return;
8952 cancel_delayed_work_sync(&hdev->discov_off);
8953 cancel_delayed_work_sync(&hdev->service_cache);
8954 cancel_delayed_work_sync(&hdev->rpa_expired);
8955 }
8956
8957 void mgmt_power_on(struct hci_dev *hdev, int err)
8958 {
8959 struct cmd_lookup match = { NULL, hdev };
8960
8961 bt_dev_dbg(hdev, "err %d", err);
8962
8963 hci_dev_lock(hdev);
8964
8965 if (!err) {
8966 restart_le_actions(hdev);
8967 hci_update_passive_scan(hdev);
8968 }
8969
8970 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8971
8972 new_settings(hdev, match.sk);
8973
8974 if (match.sk)
8975 sock_put(match.sk);
8976
8977 hci_dev_unlock(hdev);
8978 }
8979
8980 void __mgmt_power_off(struct hci_dev *hdev)
8981 {
8982 struct cmd_lookup match = { NULL, hdev };
8983 u8 status, zero_cod[] = { 0, 0, 0 };
8984
8985 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8986
8987
8988
8989
8990
8991
8992
8993
8994 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8995 status = MGMT_STATUS_INVALID_INDEX;
8996 else
8997 status = MGMT_STATUS_NOT_POWERED;
8998
8999 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9000
9001 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9002 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9003 zero_cod, sizeof(zero_cod),
9004 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9005 ext_info_changed(hdev, NULL);
9006 }
9007
9008 new_settings(hdev, match.sk);
9009
9010 if (match.sk)
9011 sock_put(match.sk);
9012 }
9013
9014 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9015 {
9016 struct mgmt_pending_cmd *cmd;
9017 u8 status;
9018
9019 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9020 if (!cmd)
9021 return;
9022
9023 if (err == -ERFKILL)
9024 status = MGMT_STATUS_RFKILLED;
9025 else
9026 status = MGMT_STATUS_FAILED;
9027
9028 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9029
9030 mgmt_pending_remove(cmd);
9031 }
9032
9033 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9034 bool persistent)
9035 {
9036 struct mgmt_ev_new_link_key ev;
9037
9038 memset(&ev, 0, sizeof(ev));
9039
9040 ev.store_hint = persistent;
9041 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9042 ev.key.addr.type = BDADDR_BREDR;
9043 ev.key.type = key->type;
9044 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9045 ev.key.pin_len = key->pin_len;
9046
9047 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9048 }
9049
9050 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9051 {
9052 switch (ltk->type) {
9053 case SMP_LTK:
9054 case SMP_LTK_RESPONDER:
9055 if (ltk->authenticated)
9056 return MGMT_LTK_AUTHENTICATED;
9057 return MGMT_LTK_UNAUTHENTICATED;
9058 case SMP_LTK_P256:
9059 if (ltk->authenticated)
9060 return MGMT_LTK_P256_AUTH;
9061 return MGMT_LTK_P256_UNAUTH;
9062 case SMP_LTK_P256_DEBUG:
9063 return MGMT_LTK_P256_DEBUG;
9064 }
9065
9066 return MGMT_LTK_UNAUTHENTICATED;
9067 }
9068
9069 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9070 {
9071 struct mgmt_ev_new_long_term_key ev;
9072
9073 memset(&ev, 0, sizeof(ev));
9074
9075
9076
9077
9078
9079
9080
9081
9082
9083
9084
9085
9086 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9087 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9088 ev.store_hint = 0x00;
9089 else
9090 ev.store_hint = persistent;
9091
9092 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9093 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9094 ev.key.type = mgmt_ltk_type(key);
9095 ev.key.enc_size = key->enc_size;
9096 ev.key.ediv = key->ediv;
9097 ev.key.rand = key->rand;
9098
9099 if (key->type == SMP_LTK)
9100 ev.key.initiator = 1;
9101
9102
9103
9104
9105 memcpy(ev.key.val, key->val, key->enc_size);
9106 memset(ev.key.val + key->enc_size, 0,
9107 sizeof(ev.key.val) - key->enc_size);
9108
9109 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9110 }
9111
9112 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9113 {
9114 struct mgmt_ev_new_irk ev;
9115
9116 memset(&ev, 0, sizeof(ev));
9117
9118 ev.store_hint = persistent;
9119
9120 bacpy(&ev.rpa, &irk->rpa);
9121 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9122 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9123 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9124
9125 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9126 }
9127
9128 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9129 bool persistent)
9130 {
9131 struct mgmt_ev_new_csrk ev;
9132
9133 memset(&ev, 0, sizeof(ev));
9134
9135
9136
9137
9138
9139
9140
9141
9142
9143
9144 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9145 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9146 ev.store_hint = 0x00;
9147 else
9148 ev.store_hint = persistent;
9149
9150 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9151 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9152 ev.key.type = csrk->type;
9153 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9154
9155 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9156 }
9157
9158 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9159 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9160 u16 max_interval, u16 latency, u16 timeout)
9161 {
9162 struct mgmt_ev_new_conn_param ev;
9163
9164 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9165 return;
9166
9167 memset(&ev, 0, sizeof(ev));
9168 bacpy(&ev.addr.bdaddr, bdaddr);
9169 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9170 ev.store_hint = store_hint;
9171 ev.min_interval = cpu_to_le16(min_interval);
9172 ev.max_interval = cpu_to_le16(max_interval);
9173 ev.latency = cpu_to_le16(latency);
9174 ev.timeout = cpu_to_le16(timeout);
9175
9176 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9177 }
9178
9179 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9180 u8 *name, u8 name_len)
9181 {
9182 struct sk_buff *skb;
9183 struct mgmt_ev_device_connected *ev;
9184 u16 eir_len = 0;
9185 u32 flags = 0;
9186
9187
9188 if (conn->le_adv_data_len > 0)
9189 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9190 sizeof(*ev) + conn->le_adv_data_len);
9191 else
9192 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9193 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9194 eir_precalc_len(sizeof(conn->dev_class)));
9195
9196 ev = skb_put(skb, sizeof(*ev));
9197 bacpy(&ev->addr.bdaddr, &conn->dst);
9198 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9199
9200 if (conn->out)
9201 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9202
9203 ev->flags = __cpu_to_le32(flags);
9204
9205
9206
9207
9208
9209 if (conn->le_adv_data_len > 0) {
9210 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9211 eir_len = conn->le_adv_data_len;
9212 } else {
9213 if (name)
9214 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9215
9216 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9217 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9218 conn->dev_class, sizeof(conn->dev_class));
9219 }
9220
9221 ev->eir_len = cpu_to_le16(eir_len);
9222
9223 mgmt_event_skb(skb, NULL);
9224 }
9225
9226 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9227 {
9228 struct sock **sk = data;
9229
9230 cmd->cmd_complete(cmd, 0);
9231
9232 *sk = cmd->sk;
9233 sock_hold(*sk);
9234
9235 mgmt_pending_remove(cmd);
9236 }
9237
9238 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9239 {
9240 struct hci_dev *hdev = data;
9241 struct mgmt_cp_unpair_device *cp = cmd->param;
9242
9243 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9244
9245 cmd->cmd_complete(cmd, 0);
9246 mgmt_pending_remove(cmd);
9247 }
9248
9249 bool mgmt_powering_down(struct hci_dev *hdev)
9250 {
9251 struct mgmt_pending_cmd *cmd;
9252 struct mgmt_mode *cp;
9253
9254 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9255 if (!cmd)
9256 return false;
9257
9258 cp = cmd->param;
9259 if (!cp->val)
9260 return true;
9261
9262 return false;
9263 }
9264
9265 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9266 u8 link_type, u8 addr_type, u8 reason,
9267 bool mgmt_connected)
9268 {
9269 struct mgmt_ev_device_disconnected ev;
9270 struct sock *sk = NULL;
9271
9272
9273
9274
9275 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9276 cancel_delayed_work(&hdev->power_off);
9277 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9278 }
9279
9280 if (!mgmt_connected)
9281 return;
9282
9283 if (link_type != ACL_LINK && link_type != LE_LINK)
9284 return;
9285
9286 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9287
9288 bacpy(&ev.addr.bdaddr, bdaddr);
9289 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9290 ev.reason = reason;
9291
9292
9293 if (hdev->suspended)
9294 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9295
9296 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9297
9298 if (sk)
9299 sock_put(sk);
9300
9301 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9302 hdev);
9303 }
9304
9305 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9306 u8 link_type, u8 addr_type, u8 status)
9307 {
9308 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9309 struct mgmt_cp_disconnect *cp;
9310 struct mgmt_pending_cmd *cmd;
9311
9312 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9313 hdev);
9314
9315 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9316 if (!cmd)
9317 return;
9318
9319 cp = cmd->param;
9320
9321 if (bacmp(bdaddr, &cp->addr.bdaddr))
9322 return;
9323
9324 if (cp->addr.type != bdaddr_type)
9325 return;
9326
9327 cmd->cmd_complete(cmd, mgmt_status(status));
9328 mgmt_pending_remove(cmd);
9329 }
9330
9331 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9332 u8 addr_type, u8 status)
9333 {
9334 struct mgmt_ev_connect_failed ev;
9335
9336
9337
9338
9339 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9340 cancel_delayed_work(&hdev->power_off);
9341 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9342 }
9343
9344 bacpy(&ev.addr.bdaddr, bdaddr);
9345 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9346 ev.status = mgmt_status(status);
9347
9348 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9349 }
9350
9351 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9352 {
9353 struct mgmt_ev_pin_code_request ev;
9354
9355 bacpy(&ev.addr.bdaddr, bdaddr);
9356 ev.addr.type = BDADDR_BREDR;
9357 ev.secure = secure;
9358
9359 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9360 }
9361
9362 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9363 u8 status)
9364 {
9365 struct mgmt_pending_cmd *cmd;
9366
9367 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9368 if (!cmd)
9369 return;
9370
9371 cmd->cmd_complete(cmd, mgmt_status(status));
9372 mgmt_pending_remove(cmd);
9373 }
9374
9375 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9376 u8 status)
9377 {
9378 struct mgmt_pending_cmd *cmd;
9379
9380 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9381 if (!cmd)
9382 return;
9383
9384 cmd->cmd_complete(cmd, mgmt_status(status));
9385 mgmt_pending_remove(cmd);
9386 }
9387
9388 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9389 u8 link_type, u8 addr_type, u32 value,
9390 u8 confirm_hint)
9391 {
9392 struct mgmt_ev_user_confirm_request ev;
9393
9394 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9395
9396 bacpy(&ev.addr.bdaddr, bdaddr);
9397 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9398 ev.confirm_hint = confirm_hint;
9399 ev.value = cpu_to_le32(value);
9400
9401 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9402 NULL);
9403 }
9404
9405 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9406 u8 link_type, u8 addr_type)
9407 {
9408 struct mgmt_ev_user_passkey_request ev;
9409
9410 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9411
9412 bacpy(&ev.addr.bdaddr, bdaddr);
9413 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9414
9415 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9416 NULL);
9417 }
9418
9419 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9420 u8 link_type, u8 addr_type, u8 status,
9421 u8 opcode)
9422 {
9423 struct mgmt_pending_cmd *cmd;
9424
9425 cmd = pending_find(opcode, hdev);
9426 if (!cmd)
9427 return -ENOENT;
9428
9429 cmd->cmd_complete(cmd, mgmt_status(status));
9430 mgmt_pending_remove(cmd);
9431
9432 return 0;
9433 }
9434
9435 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9436 u8 link_type, u8 addr_type, u8 status)
9437 {
9438 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9439 status, MGMT_OP_USER_CONFIRM_REPLY);
9440 }
9441
9442 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9443 u8 link_type, u8 addr_type, u8 status)
9444 {
9445 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9446 status,
9447 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9448 }
9449
9450 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9451 u8 link_type, u8 addr_type, u8 status)
9452 {
9453 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9454 status, MGMT_OP_USER_PASSKEY_REPLY);
9455 }
9456
9457 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9458 u8 link_type, u8 addr_type, u8 status)
9459 {
9460 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9461 status,
9462 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9463 }
9464
9465 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9466 u8 link_type, u8 addr_type, u32 passkey,
9467 u8 entered)
9468 {
9469 struct mgmt_ev_passkey_notify ev;
9470
9471 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9472
9473 bacpy(&ev.addr.bdaddr, bdaddr);
9474 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9475 ev.passkey = __cpu_to_le32(passkey);
9476 ev.entered = entered;
9477
9478 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9479 }
9480
9481 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9482 {
9483 struct mgmt_ev_auth_failed ev;
9484 struct mgmt_pending_cmd *cmd;
9485 u8 status = mgmt_status(hci_status);
9486
9487 bacpy(&ev.addr.bdaddr, &conn->dst);
9488 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9489 ev.status = status;
9490
9491 cmd = find_pairing(conn);
9492
9493 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9494 cmd ? cmd->sk : NULL);
9495
9496 if (cmd) {
9497 cmd->cmd_complete(cmd, status);
9498 mgmt_pending_remove(cmd);
9499 }
9500 }
9501
9502 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9503 {
9504 struct cmd_lookup match = { NULL, hdev };
9505 bool changed;
9506
9507 if (status) {
9508 u8 mgmt_err = mgmt_status(status);
9509 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9510 cmd_status_rsp, &mgmt_err);
9511 return;
9512 }
9513
9514 if (test_bit(HCI_AUTH, &hdev->flags))
9515 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9516 else
9517 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9518
9519 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9520 &match);
9521
9522 if (changed)
9523 new_settings(hdev, match.sk);
9524
9525 if (match.sk)
9526 sock_put(match.sk);
9527 }
9528
9529 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9530 {
9531 struct cmd_lookup *match = data;
9532
9533 if (match->sk == NULL) {
9534 match->sk = cmd->sk;
9535 sock_hold(match->sk);
9536 }
9537 }
9538
9539 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9540 u8 status)
9541 {
9542 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9543
9544 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9545 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9546 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9547
9548 if (!status) {
9549 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9550 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9551 ext_info_changed(hdev, NULL);
9552 }
9553
9554 if (match.sk)
9555 sock_put(match.sk);
9556 }
9557
9558 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9559 {
9560 struct mgmt_cp_set_local_name ev;
9561 struct mgmt_pending_cmd *cmd;
9562
9563 if (status)
9564 return;
9565
9566 memset(&ev, 0, sizeof(ev));
9567 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9568 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9569
9570 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9571 if (!cmd) {
9572 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9573
9574
9575
9576
9577 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9578 return;
9579 }
9580
9581 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9582 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9583 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9584 }
9585
9586 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9587 {
9588 int i;
9589
9590 for (i = 0; i < uuid_count; i++) {
9591 if (!memcmp(uuid, uuids[i], 16))
9592 return true;
9593 }
9594
9595 return false;
9596 }
9597
9598 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9599 {
9600 u16 parsed = 0;
9601
9602 while (parsed < eir_len) {
9603 u8 field_len = eir[0];
9604 u8 uuid[16];
9605 int i;
9606
9607 if (field_len == 0)
9608 break;
9609
9610 if (eir_len - parsed < field_len + 1)
9611 break;
9612
9613 switch (eir[1]) {
9614 case EIR_UUID16_ALL:
9615 case EIR_UUID16_SOME:
9616 for (i = 0; i + 3 <= field_len; i += 2) {
9617 memcpy(uuid, bluetooth_base_uuid, 16);
9618 uuid[13] = eir[i + 3];
9619 uuid[12] = eir[i + 2];
9620 if (has_uuid(uuid, uuid_count, uuids))
9621 return true;
9622 }
9623 break;
9624 case EIR_UUID32_ALL:
9625 case EIR_UUID32_SOME:
9626 for (i = 0; i + 5 <= field_len; i += 4) {
9627 memcpy(uuid, bluetooth_base_uuid, 16);
9628 uuid[15] = eir[i + 5];
9629 uuid[14] = eir[i + 4];
9630 uuid[13] = eir[i + 3];
9631 uuid[12] = eir[i + 2];
9632 if (has_uuid(uuid, uuid_count, uuids))
9633 return true;
9634 }
9635 break;
9636 case EIR_UUID128_ALL:
9637 case EIR_UUID128_SOME:
9638 for (i = 0; i + 17 <= field_len; i += 16) {
9639 memcpy(uuid, eir + i + 2, 16);
9640 if (has_uuid(uuid, uuid_count, uuids))
9641 return true;
9642 }
9643 break;
9644 }
9645
9646 parsed += field_len + 1;
9647 eir += field_len + 1;
9648 }
9649
9650 return false;
9651 }
9652
9653 static void restart_le_scan(struct hci_dev *hdev)
9654 {
9655
9656 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9657 return;
9658
9659 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9660 hdev->discovery.scan_start +
9661 hdev->discovery.scan_duration))
9662 return;
9663
9664 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9665 DISCOV_LE_RESTART_DELAY);
9666 }
9667
9668 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9669 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9670 {
9671
9672
9673
9674
9675
9676
9677
9678
9679
9680 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9681 (rssi == HCI_RSSI_INVALID ||
9682 (rssi < hdev->discovery.rssi &&
9683 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9684 return false;
9685
9686 if (hdev->discovery.uuid_count != 0) {
9687
9688
9689
9690 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9691 hdev->discovery.uuids) &&
9692 !eir_has_uuids(scan_rsp, scan_rsp_len,
9693 hdev->discovery.uuid_count,
9694 hdev->discovery.uuids))
9695 return false;
9696 }
9697
9698
9699
9700
9701 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9702 restart_le_scan(hdev);
9703
9704
9705 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9706 rssi < hdev->discovery.rssi)
9707 return false;
9708 }
9709
9710 return true;
9711 }
9712
9713 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9714 bdaddr_t *bdaddr, u8 addr_type)
9715 {
9716 struct mgmt_ev_adv_monitor_device_lost ev;
9717
9718 ev.monitor_handle = cpu_to_le16(handle);
9719 bacpy(&ev.addr.bdaddr, bdaddr);
9720 ev.addr.type = addr_type;
9721
9722 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9723 NULL);
9724 }
9725
9726 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9727 struct sk_buff *skb,
9728 struct sock *skip_sk,
9729 u16 handle)
9730 {
9731 struct sk_buff *advmon_skb;
9732 size_t advmon_skb_len;
9733 __le16 *monitor_handle;
9734
9735 if (!skb)
9736 return;
9737
9738 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9739 sizeof(struct mgmt_ev_device_found)) + skb->len;
9740 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9741 advmon_skb_len);
9742 if (!advmon_skb)
9743 return;
9744
9745
9746
9747
9748
9749 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9750 *monitor_handle = cpu_to_le16(handle);
9751 skb_put_data(advmon_skb, skb->data, skb->len);
9752
9753 mgmt_event_skb(advmon_skb, skip_sk);
9754 }
9755
9756 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9757 bdaddr_t *bdaddr, bool report_device,
9758 struct sk_buff *skb,
9759 struct sock *skip_sk)
9760 {
9761 struct monitored_device *dev, *tmp;
9762 bool matched = false;
9763 bool notified = false;
9764
9765
9766
9767
9768
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780 if (report_device && !hdev->advmon_pend_notify) {
9781 mgmt_event_skb(skb, skip_sk);
9782 return;
9783 }
9784
9785 hdev->advmon_pend_notify = false;
9786
9787 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9788 if (!bacmp(&dev->bdaddr, bdaddr)) {
9789 matched = true;
9790
9791 if (!dev->notified) {
9792 mgmt_send_adv_monitor_device_found(hdev, skb,
9793 skip_sk,
9794 dev->handle);
9795 notified = true;
9796 dev->notified = true;
9797 }
9798 }
9799
9800 if (!dev->notified)
9801 hdev->advmon_pend_notify = true;
9802 }
9803
9804 if (!report_device &&
9805 ((matched && !notified) || !msft_monitor_supported(hdev))) {
9806
9807
9808
9809
9810
9811 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9812 }
9813
9814 if (report_device)
9815 mgmt_event_skb(skb, skip_sk);
9816 else
9817 kfree_skb(skb);
9818 }
9819
9820 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9821 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9822 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9823 {
9824 struct sk_buff *skb;
9825 struct mgmt_ev_device_found *ev;
9826 bool report_device = hci_discovery_active(hdev);
9827
9828
9829
9830
9831
9832 if (!hci_discovery_active(hdev)) {
9833 if (link_type == ACL_LINK)
9834 return;
9835 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9836 report_device = true;
9837 else if (!hci_is_adv_monitoring(hdev))
9838 return;
9839 }
9840
9841 if (hdev->discovery.result_filtering) {
9842
9843 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9844 scan_rsp_len))
9845 return;
9846 }
9847
9848 if (hdev->discovery.limited) {
9849
9850 if (dev_class) {
9851 if (!(dev_class[1] & 0x20))
9852 return;
9853 } else {
9854 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9855 if (!flags || !(flags[0] & LE_AD_LIMITED))
9856 return;
9857 }
9858 }
9859
9860
9861 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9862 sizeof(*ev) + eir_len + scan_rsp_len + 5);
9863 if (!skb)
9864 return;
9865
9866 ev = skb_put(skb, sizeof(*ev));
9867
9868
9869
9870
9871
9872
9873
9874
9875
9876 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9877 link_type == ACL_LINK)
9878 rssi = 0;
9879
9880 bacpy(&ev->addr.bdaddr, bdaddr);
9881 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9882 ev->rssi = rssi;
9883 ev->flags = cpu_to_le32(flags);
9884
9885 if (eir_len > 0)
9886
9887 skb_put_data(skb, eir, eir_len);
9888
9889 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9890 u8 eir_cod[5];
9891
9892 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9893 dev_class, 3);
9894 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9895 }
9896
9897 if (scan_rsp_len > 0)
9898
9899 skb_put_data(skb, scan_rsp, scan_rsp_len);
9900
9901 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9902
9903 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9904 }
9905
9906 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9907 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9908 {
9909 struct sk_buff *skb;
9910 struct mgmt_ev_device_found *ev;
9911 u16 eir_len = 0;
9912 u32 flags = 0;
9913
9914 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9915 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9916
9917 ev = skb_put(skb, sizeof(*ev));
9918 bacpy(&ev->addr.bdaddr, bdaddr);
9919 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9920 ev->rssi = rssi;
9921
9922 if (name)
9923 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9924 else
9925 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9926
9927 ev->eir_len = cpu_to_le16(eir_len);
9928 ev->flags = cpu_to_le32(flags);
9929
9930 mgmt_event_skb(skb, NULL);
9931 }
9932
9933 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9934 {
9935 struct mgmt_ev_discovering ev;
9936
9937 bt_dev_dbg(hdev, "discovering %u", discovering);
9938
9939 memset(&ev, 0, sizeof(ev));
9940 ev.type = hdev->discovery.type;
9941 ev.discovering = discovering;
9942
9943 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9944 }
9945
9946 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9947 {
9948 struct mgmt_ev_controller_suspend ev;
9949
9950 ev.suspend_state = state;
9951 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9952 }
9953
9954 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9955 u8 addr_type)
9956 {
9957 struct mgmt_ev_controller_resume ev;
9958
9959 ev.wake_reason = reason;
9960 if (bdaddr) {
9961 bacpy(&ev.addr.bdaddr, bdaddr);
9962 ev.addr.type = addr_type;
9963 } else {
9964 memset(&ev.addr, 0, sizeof(ev.addr));
9965 }
9966
9967 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9968 }
9969
9970 static struct hci_mgmt_chan chan = {
9971 .channel = HCI_CHANNEL_CONTROL,
9972 .handler_count = ARRAY_SIZE(mgmt_handlers),
9973 .handlers = mgmt_handlers,
9974 .hdev_init = mgmt_init_hdev,
9975 };
9976
9977 int mgmt_init(void)
9978 {
9979 return hci_mgmt_chan_register(&chan);
9980 }
9981
9982 void mgmt_exit(void)
9983 {
9984 hci_mgmt_chan_unregister(&chan);
9985 }