0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <linux/compat.h>
0027 #include <linux/export.h>
0028 #include <linux/utsname.h>
0029 #include <linux/sched.h>
0030 #include <asm/unaligned.h>
0031
0032 #include <net/bluetooth/bluetooth.h>
0033 #include <net/bluetooth/hci_core.h>
0034 #include <net/bluetooth/hci_mon.h>
0035 #include <net/bluetooth/mgmt.h>
0036
0037 #include "mgmt_util.h"
0038
0039 static LIST_HEAD(mgmt_chan_list);
0040 static DEFINE_MUTEX(mgmt_chan_list_lock);
0041
0042 static DEFINE_IDA(sock_cookie_ida);
0043
0044 static atomic_t monitor_promisc = ATOMIC_INIT(0);
0045
0046
0047
0048
0049 #define hci_pi(sk) ((struct hci_pinfo *) sk)
0050
0051 struct hci_pinfo {
0052 struct bt_sock bt;
0053 struct hci_dev *hdev;
0054 struct hci_filter filter;
0055 __u8 cmsg_mask;
0056 unsigned short channel;
0057 unsigned long flags;
0058 __u32 cookie;
0059 char comm[TASK_COMM_LEN];
0060 __u16 mtu;
0061 };
0062
0063 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
0064 {
0065 struct hci_dev *hdev = hci_pi(sk)->hdev;
0066
0067 if (!hdev)
0068 return ERR_PTR(-EBADFD);
0069 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
0070 return ERR_PTR(-EPIPE);
0071 return hdev;
0072 }
0073
0074 void hci_sock_set_flag(struct sock *sk, int nr)
0075 {
0076 set_bit(nr, &hci_pi(sk)->flags);
0077 }
0078
0079 void hci_sock_clear_flag(struct sock *sk, int nr)
0080 {
0081 clear_bit(nr, &hci_pi(sk)->flags);
0082 }
0083
0084 int hci_sock_test_flag(struct sock *sk, int nr)
0085 {
0086 return test_bit(nr, &hci_pi(sk)->flags);
0087 }
0088
0089 unsigned short hci_sock_get_channel(struct sock *sk)
0090 {
0091 return hci_pi(sk)->channel;
0092 }
0093
0094 u32 hci_sock_get_cookie(struct sock *sk)
0095 {
0096 return hci_pi(sk)->cookie;
0097 }
0098
0099 static bool hci_sock_gen_cookie(struct sock *sk)
0100 {
0101 int id = hci_pi(sk)->cookie;
0102
0103 if (!id) {
0104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
0105 if (id < 0)
0106 id = 0xffffffff;
0107
0108 hci_pi(sk)->cookie = id;
0109 get_task_comm(hci_pi(sk)->comm, current);
0110 return true;
0111 }
0112
0113 return false;
0114 }
0115
0116 static void hci_sock_free_cookie(struct sock *sk)
0117 {
0118 int id = hci_pi(sk)->cookie;
0119
0120 if (id) {
0121 hci_pi(sk)->cookie = 0xffffffff;
0122 ida_simple_remove(&sock_cookie_ida, id);
0123 }
0124 }
0125
0126 static inline int hci_test_bit(int nr, const void *addr)
0127 {
0128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
0129 }
0130
0131
0132 #define HCI_SFLT_MAX_OGF 5
0133
0134 struct hci_sec_filter {
0135 __u32 type_mask;
0136 __u32 event_mask[2];
0137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
0138 };
0139
0140 static const struct hci_sec_filter hci_sec_filter = {
0141
0142 0x10,
0143
0144 { 0x1000d9fe, 0x0000b00c },
0145
0146 {
0147 { 0x0 },
0148
0149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
0150
0151 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
0152
0153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
0154
0155 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
0156
0157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
0158 }
0159 };
0160
0161 static struct bt_sock_list hci_sk_list = {
0162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
0163 };
0164
0165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
0166 {
0167 struct hci_filter *flt;
0168 int flt_type, flt_event;
0169
0170
0171 flt = &hci_pi(sk)->filter;
0172
0173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
0174
0175 if (!test_bit(flt_type, &flt->type_mask))
0176 return true;
0177
0178
0179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
0180 return false;
0181
0182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
0183
0184 if (!hci_test_bit(flt_event, &flt->event_mask))
0185 return true;
0186
0187
0188 if (!flt->opcode)
0189 return false;
0190
0191 if (flt_event == HCI_EV_CMD_COMPLETE &&
0192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
0193 return true;
0194
0195 if (flt_event == HCI_EV_CMD_STATUS &&
0196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
0197 return true;
0198
0199 return false;
0200 }
0201
0202
0203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
0204 {
0205 struct sock *sk;
0206 struct sk_buff *skb_copy = NULL;
0207
0208 BT_DBG("hdev %p len %d", hdev, skb->len);
0209
0210 read_lock(&hci_sk_list.lock);
0211
0212 sk_for_each(sk, &hci_sk_list.head) {
0213 struct sk_buff *nskb;
0214
0215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
0216 continue;
0217
0218
0219 if (skb->sk == sk)
0220 continue;
0221
0222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
0223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
0224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
0225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
0226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
0227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
0228 continue;
0229 if (is_filtered_packet(sk, skb))
0230 continue;
0231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
0232 if (!bt_cb(skb)->incoming)
0233 continue;
0234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
0235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
0236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
0237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
0238 continue;
0239 } else {
0240
0241 continue;
0242 }
0243
0244 if (!skb_copy) {
0245
0246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
0247 if (!skb_copy)
0248 continue;
0249
0250
0251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
0252 }
0253
0254 nskb = skb_clone(skb_copy, GFP_ATOMIC);
0255 if (!nskb)
0256 continue;
0257
0258 if (sock_queue_rcv_skb(sk, nskb))
0259 kfree_skb(nskb);
0260 }
0261
0262 read_unlock(&hci_sk_list.lock);
0263
0264 kfree_skb(skb_copy);
0265 }
0266
0267
0268 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
0269 int flag, struct sock *skip_sk)
0270 {
0271 struct sock *sk;
0272
0273 BT_DBG("channel %u len %d", channel, skb->len);
0274
0275 sk_for_each(sk, &hci_sk_list.head) {
0276 struct sk_buff *nskb;
0277
0278
0279 if (!hci_sock_test_flag(sk, flag))
0280 continue;
0281
0282
0283 if (sk == skip_sk)
0284 continue;
0285
0286 if (sk->sk_state != BT_BOUND)
0287 continue;
0288
0289 if (hci_pi(sk)->channel != channel)
0290 continue;
0291
0292 nskb = skb_clone(skb, GFP_ATOMIC);
0293 if (!nskb)
0294 continue;
0295
0296 if (sock_queue_rcv_skb(sk, nskb))
0297 kfree_skb(nskb);
0298 }
0299
0300 }
0301
0302 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
0303 int flag, struct sock *skip_sk)
0304 {
0305 read_lock(&hci_sk_list.lock);
0306 __hci_send_to_channel(channel, skb, flag, skip_sk);
0307 read_unlock(&hci_sk_list.lock);
0308 }
0309
0310
0311 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
0312 {
0313 struct sk_buff *skb_copy = NULL;
0314 struct hci_mon_hdr *hdr;
0315 __le16 opcode;
0316
0317 if (!atomic_read(&monitor_promisc))
0318 return;
0319
0320 BT_DBG("hdev %p len %d", hdev, skb->len);
0321
0322 switch (hci_skb_pkt_type(skb)) {
0323 case HCI_COMMAND_PKT:
0324 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
0325 break;
0326 case HCI_EVENT_PKT:
0327 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
0328 break;
0329 case HCI_ACLDATA_PKT:
0330 if (bt_cb(skb)->incoming)
0331 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
0332 else
0333 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
0334 break;
0335 case HCI_SCODATA_PKT:
0336 if (bt_cb(skb)->incoming)
0337 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
0338 else
0339 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
0340 break;
0341 case HCI_ISODATA_PKT:
0342 if (bt_cb(skb)->incoming)
0343 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
0344 else
0345 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
0346 break;
0347 case HCI_DIAG_PKT:
0348 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
0349 break;
0350 default:
0351 return;
0352 }
0353
0354
0355 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
0356 if (!skb_copy)
0357 return;
0358
0359
0360 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
0361 hdr->opcode = opcode;
0362 hdr->index = cpu_to_le16(hdev->id);
0363 hdr->len = cpu_to_le16(skb->len);
0364
0365 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
0366 HCI_SOCK_TRUSTED, NULL);
0367 kfree_skb(skb_copy);
0368 }
0369
0370 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
0371 void *data, u16 data_len, ktime_t tstamp,
0372 int flag, struct sock *skip_sk)
0373 {
0374 struct sock *sk;
0375 __le16 index;
0376
0377 if (hdev)
0378 index = cpu_to_le16(hdev->id);
0379 else
0380 index = cpu_to_le16(MGMT_INDEX_NONE);
0381
0382 read_lock(&hci_sk_list.lock);
0383
0384 sk_for_each(sk, &hci_sk_list.head) {
0385 struct hci_mon_hdr *hdr;
0386 struct sk_buff *skb;
0387
0388 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
0389 continue;
0390
0391
0392 if (!hci_sock_test_flag(sk, flag))
0393 continue;
0394
0395
0396 if (sk == skip_sk)
0397 continue;
0398
0399 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
0400 if (!skb)
0401 continue;
0402
0403 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
0404 put_unaligned_le16(event, skb_put(skb, 2));
0405
0406 if (data)
0407 skb_put_data(skb, data, data_len);
0408
0409 skb->tstamp = tstamp;
0410
0411 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
0412 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
0413 hdr->index = index;
0414 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0415
0416 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
0417 HCI_SOCK_TRUSTED, NULL);
0418 kfree_skb(skb);
0419 }
0420
0421 read_unlock(&hci_sk_list.lock);
0422 }
0423
0424 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
0425 {
0426 struct hci_mon_hdr *hdr;
0427 struct hci_mon_new_index *ni;
0428 struct hci_mon_index_info *ii;
0429 struct sk_buff *skb;
0430 __le16 opcode;
0431
0432 switch (event) {
0433 case HCI_DEV_REG:
0434 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
0435 if (!skb)
0436 return NULL;
0437
0438 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
0439 ni->type = hdev->dev_type;
0440 ni->bus = hdev->bus;
0441 bacpy(&ni->bdaddr, &hdev->bdaddr);
0442 memcpy(ni->name, hdev->name, 8);
0443
0444 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
0445 break;
0446
0447 case HCI_DEV_UNREG:
0448 skb = bt_skb_alloc(0, GFP_ATOMIC);
0449 if (!skb)
0450 return NULL;
0451
0452 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
0453 break;
0454
0455 case HCI_DEV_SETUP:
0456 if (hdev->manufacturer == 0xffff)
0457 return NULL;
0458 fallthrough;
0459
0460 case HCI_DEV_UP:
0461 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
0462 if (!skb)
0463 return NULL;
0464
0465 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
0466 bacpy(&ii->bdaddr, &hdev->bdaddr);
0467 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
0468
0469 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
0470 break;
0471
0472 case HCI_DEV_OPEN:
0473 skb = bt_skb_alloc(0, GFP_ATOMIC);
0474 if (!skb)
0475 return NULL;
0476
0477 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
0478 break;
0479
0480 case HCI_DEV_CLOSE:
0481 skb = bt_skb_alloc(0, GFP_ATOMIC);
0482 if (!skb)
0483 return NULL;
0484
0485 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
0486 break;
0487
0488 default:
0489 return NULL;
0490 }
0491
0492 __net_timestamp(skb);
0493
0494 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
0495 hdr->opcode = opcode;
0496 hdr->index = cpu_to_le16(hdev->id);
0497 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0498
0499 return skb;
0500 }
0501
0502 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
0503 {
0504 struct hci_mon_hdr *hdr;
0505 struct sk_buff *skb;
0506 u16 format;
0507 u8 ver[3];
0508 u32 flags;
0509
0510
0511 if (!hci_pi(sk)->cookie)
0512 return NULL;
0513
0514 switch (hci_pi(sk)->channel) {
0515 case HCI_CHANNEL_RAW:
0516 format = 0x0000;
0517 ver[0] = BT_SUBSYS_VERSION;
0518 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
0519 break;
0520 case HCI_CHANNEL_USER:
0521 format = 0x0001;
0522 ver[0] = BT_SUBSYS_VERSION;
0523 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
0524 break;
0525 case HCI_CHANNEL_CONTROL:
0526 format = 0x0002;
0527 mgmt_fill_version_info(ver);
0528 break;
0529 default:
0530
0531 return NULL;
0532 }
0533
0534 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
0535 if (!skb)
0536 return NULL;
0537
0538 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
0539
0540 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
0541 put_unaligned_le16(format, skb_put(skb, 2));
0542 skb_put_data(skb, ver, sizeof(ver));
0543 put_unaligned_le32(flags, skb_put(skb, 4));
0544 skb_put_u8(skb, TASK_COMM_LEN);
0545 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
0546
0547 __net_timestamp(skb);
0548
0549 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
0550 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
0551 if (hci_pi(sk)->hdev)
0552 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
0553 else
0554 hdr->index = cpu_to_le16(HCI_DEV_NONE);
0555 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0556
0557 return skb;
0558 }
0559
0560 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
0561 {
0562 struct hci_mon_hdr *hdr;
0563 struct sk_buff *skb;
0564
0565
0566 if (!hci_pi(sk)->cookie)
0567 return NULL;
0568
0569 switch (hci_pi(sk)->channel) {
0570 case HCI_CHANNEL_RAW:
0571 case HCI_CHANNEL_USER:
0572 case HCI_CHANNEL_CONTROL:
0573 break;
0574 default:
0575
0576 return NULL;
0577 }
0578
0579 skb = bt_skb_alloc(4, GFP_ATOMIC);
0580 if (!skb)
0581 return NULL;
0582
0583 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
0584
0585 __net_timestamp(skb);
0586
0587 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
0588 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
0589 if (hci_pi(sk)->hdev)
0590 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
0591 else
0592 hdr->index = cpu_to_le16(HCI_DEV_NONE);
0593 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0594
0595 return skb;
0596 }
0597
0598 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
0599 u16 opcode, u16 len,
0600 const void *buf)
0601 {
0602 struct hci_mon_hdr *hdr;
0603 struct sk_buff *skb;
0604
0605 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
0606 if (!skb)
0607 return NULL;
0608
0609 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
0610 put_unaligned_le16(opcode, skb_put(skb, 2));
0611
0612 if (buf)
0613 skb_put_data(skb, buf, len);
0614
0615 __net_timestamp(skb);
0616
0617 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
0618 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
0619 hdr->index = cpu_to_le16(index);
0620 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0621
0622 return skb;
0623 }
0624
0625 static void __printf(2, 3)
0626 send_monitor_note(struct sock *sk, const char *fmt, ...)
0627 {
0628 size_t len;
0629 struct hci_mon_hdr *hdr;
0630 struct sk_buff *skb;
0631 va_list args;
0632
0633 va_start(args, fmt);
0634 len = vsnprintf(NULL, 0, fmt, args);
0635 va_end(args);
0636
0637 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
0638 if (!skb)
0639 return;
0640
0641 va_start(args, fmt);
0642 vsprintf(skb_put(skb, len), fmt, args);
0643 *(u8 *)skb_put(skb, 1) = 0;
0644 va_end(args);
0645
0646 __net_timestamp(skb);
0647
0648 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
0649 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
0650 hdr->index = cpu_to_le16(HCI_DEV_NONE);
0651 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
0652
0653 if (sock_queue_rcv_skb(sk, skb))
0654 kfree_skb(skb);
0655 }
0656
0657 static void send_monitor_replay(struct sock *sk)
0658 {
0659 struct hci_dev *hdev;
0660
0661 read_lock(&hci_dev_list_lock);
0662
0663 list_for_each_entry(hdev, &hci_dev_list, list) {
0664 struct sk_buff *skb;
0665
0666 skb = create_monitor_event(hdev, HCI_DEV_REG);
0667 if (!skb)
0668 continue;
0669
0670 if (sock_queue_rcv_skb(sk, skb))
0671 kfree_skb(skb);
0672
0673 if (!test_bit(HCI_RUNNING, &hdev->flags))
0674 continue;
0675
0676 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
0677 if (!skb)
0678 continue;
0679
0680 if (sock_queue_rcv_skb(sk, skb))
0681 kfree_skb(skb);
0682
0683 if (test_bit(HCI_UP, &hdev->flags))
0684 skb = create_monitor_event(hdev, HCI_DEV_UP);
0685 else if (hci_dev_test_flag(hdev, HCI_SETUP))
0686 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
0687 else
0688 skb = NULL;
0689
0690 if (skb) {
0691 if (sock_queue_rcv_skb(sk, skb))
0692 kfree_skb(skb);
0693 }
0694 }
0695
0696 read_unlock(&hci_dev_list_lock);
0697 }
0698
0699 static void send_monitor_control_replay(struct sock *mon_sk)
0700 {
0701 struct sock *sk;
0702
0703 read_lock(&hci_sk_list.lock);
0704
0705 sk_for_each(sk, &hci_sk_list.head) {
0706 struct sk_buff *skb;
0707
0708 skb = create_monitor_ctrl_open(sk);
0709 if (!skb)
0710 continue;
0711
0712 if (sock_queue_rcv_skb(mon_sk, skb))
0713 kfree_skb(skb);
0714 }
0715
0716 read_unlock(&hci_sk_list.lock);
0717 }
0718
0719
0720 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
0721 {
0722 struct hci_event_hdr *hdr;
0723 struct hci_ev_stack_internal *ev;
0724 struct sk_buff *skb;
0725
0726 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
0727 if (!skb)
0728 return;
0729
0730 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
0731 hdr->evt = HCI_EV_STACK_INTERNAL;
0732 hdr->plen = sizeof(*ev) + dlen;
0733
0734 ev = skb_put(skb, sizeof(*ev) + dlen);
0735 ev->type = type;
0736 memcpy(ev->data, data, dlen);
0737
0738 bt_cb(skb)->incoming = 1;
0739 __net_timestamp(skb);
0740
0741 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
0742 hci_send_to_sock(hdev, skb);
0743 kfree_skb(skb);
0744 }
0745
0746 void hci_sock_dev_event(struct hci_dev *hdev, int event)
0747 {
0748 BT_DBG("hdev %s event %d", hdev->name, event);
0749
0750 if (atomic_read(&monitor_promisc)) {
0751 struct sk_buff *skb;
0752
0753
0754 skb = create_monitor_event(hdev, event);
0755 if (skb) {
0756 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
0757 HCI_SOCK_TRUSTED, NULL);
0758 kfree_skb(skb);
0759 }
0760 }
0761
0762 if (event <= HCI_DEV_DOWN) {
0763 struct hci_ev_si_device ev;
0764
0765
0766 ev.event = event;
0767 ev.dev_id = hdev->id;
0768 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
0769 }
0770
0771 if (event == HCI_DEV_UNREG) {
0772 struct sock *sk;
0773
0774
0775 read_lock(&hci_sk_list.lock);
0776 sk_for_each(sk, &hci_sk_list.head) {
0777 if (hci_pi(sk)->hdev == hdev) {
0778 sk->sk_err = EPIPE;
0779 sk->sk_state_change(sk);
0780 }
0781 }
0782 read_unlock(&hci_sk_list.lock);
0783 }
0784 }
0785
0786 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
0787 {
0788 struct hci_mgmt_chan *c;
0789
0790 list_for_each_entry(c, &mgmt_chan_list, list) {
0791 if (c->channel == channel)
0792 return c;
0793 }
0794
0795 return NULL;
0796 }
0797
0798 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
0799 {
0800 struct hci_mgmt_chan *c;
0801
0802 mutex_lock(&mgmt_chan_list_lock);
0803 c = __hci_mgmt_chan_find(channel);
0804 mutex_unlock(&mgmt_chan_list_lock);
0805
0806 return c;
0807 }
0808
0809 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
0810 {
0811 if (c->channel < HCI_CHANNEL_CONTROL)
0812 return -EINVAL;
0813
0814 mutex_lock(&mgmt_chan_list_lock);
0815 if (__hci_mgmt_chan_find(c->channel)) {
0816 mutex_unlock(&mgmt_chan_list_lock);
0817 return -EALREADY;
0818 }
0819
0820 list_add_tail(&c->list, &mgmt_chan_list);
0821
0822 mutex_unlock(&mgmt_chan_list_lock);
0823
0824 return 0;
0825 }
0826 EXPORT_SYMBOL(hci_mgmt_chan_register);
0827
0828 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
0829 {
0830 mutex_lock(&mgmt_chan_list_lock);
0831 list_del(&c->list);
0832 mutex_unlock(&mgmt_chan_list_lock);
0833 }
0834 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
0835
0836 static int hci_sock_release(struct socket *sock)
0837 {
0838 struct sock *sk = sock->sk;
0839 struct hci_dev *hdev;
0840 struct sk_buff *skb;
0841
0842 BT_DBG("sock %p sk %p", sock, sk);
0843
0844 if (!sk)
0845 return 0;
0846
0847 lock_sock(sk);
0848
0849 switch (hci_pi(sk)->channel) {
0850 case HCI_CHANNEL_MONITOR:
0851 atomic_dec(&monitor_promisc);
0852 break;
0853 case HCI_CHANNEL_RAW:
0854 case HCI_CHANNEL_USER:
0855 case HCI_CHANNEL_CONTROL:
0856
0857 skb = create_monitor_ctrl_close(sk);
0858 if (skb) {
0859 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
0860 HCI_SOCK_TRUSTED, NULL);
0861 kfree_skb(skb);
0862 }
0863
0864 hci_sock_free_cookie(sk);
0865 break;
0866 }
0867
0868 bt_sock_unlink(&hci_sk_list, sk);
0869
0870 hdev = hci_pi(sk)->hdev;
0871 if (hdev) {
0872 if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
0873 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888 hci_dev_do_close(hdev);
0889 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
0890 hci_register_suspend_notifier(hdev);
0891 mgmt_index_added(hdev);
0892 }
0893
0894 atomic_dec(&hdev->promisc);
0895 hci_dev_put(hdev);
0896 }
0897
0898 sock_orphan(sk);
0899 release_sock(sk);
0900 sock_put(sk);
0901 return 0;
0902 }
0903
0904 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
0905 {
0906 bdaddr_t bdaddr;
0907 int err;
0908
0909 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
0910 return -EFAULT;
0911
0912 hci_dev_lock(hdev);
0913
0914 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
0915
0916 hci_dev_unlock(hdev);
0917
0918 return err;
0919 }
0920
0921 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
0922 {
0923 bdaddr_t bdaddr;
0924 int err;
0925
0926 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
0927 return -EFAULT;
0928
0929 hci_dev_lock(hdev);
0930
0931 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
0932
0933 hci_dev_unlock(hdev);
0934
0935 return err;
0936 }
0937
0938
0939 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
0940 unsigned long arg)
0941 {
0942 struct hci_dev *hdev = hci_hdev_from_sock(sk);
0943
0944 if (IS_ERR(hdev))
0945 return PTR_ERR(hdev);
0946
0947 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0948 return -EBUSY;
0949
0950 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0951 return -EOPNOTSUPP;
0952
0953 if (hdev->dev_type != HCI_PRIMARY)
0954 return -EOPNOTSUPP;
0955
0956 switch (cmd) {
0957 case HCISETRAW:
0958 if (!capable(CAP_NET_ADMIN))
0959 return -EPERM;
0960 return -EOPNOTSUPP;
0961
0962 case HCIGETCONNINFO:
0963 return hci_get_conn_info(hdev, (void __user *)arg);
0964
0965 case HCIGETAUTHINFO:
0966 return hci_get_auth_info(hdev, (void __user *)arg);
0967
0968 case HCIBLOCKADDR:
0969 if (!capable(CAP_NET_ADMIN))
0970 return -EPERM;
0971 return hci_sock_reject_list_add(hdev, (void __user *)arg);
0972
0973 case HCIUNBLOCKADDR:
0974 if (!capable(CAP_NET_ADMIN))
0975 return -EPERM;
0976 return hci_sock_reject_list_del(hdev, (void __user *)arg);
0977 }
0978
0979 return -ENOIOCTLCMD;
0980 }
0981
0982 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
0983 unsigned long arg)
0984 {
0985 void __user *argp = (void __user *)arg;
0986 struct sock *sk = sock->sk;
0987 int err;
0988
0989 BT_DBG("cmd %x arg %lx", cmd, arg);
0990
0991 lock_sock(sk);
0992
0993 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
0994 err = -EBADFD;
0995 goto done;
0996 }
0997
0998
0999
1000
1001
1002
1003
1004 if (hci_sock_gen_cookie(sk)) {
1005 struct sk_buff *skb;
1006
1007 if (capable(CAP_NET_ADMIN))
1008 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1009
1010
1011 skb = create_monitor_ctrl_open(sk);
1012 if (skb) {
1013 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1014 HCI_SOCK_TRUSTED, NULL);
1015 kfree_skb(skb);
1016 }
1017 }
1018
1019 release_sock(sk);
1020
1021 switch (cmd) {
1022 case HCIGETDEVLIST:
1023 return hci_get_dev_list(argp);
1024
1025 case HCIGETDEVINFO:
1026 return hci_get_dev_info(argp);
1027
1028 case HCIGETCONNLIST:
1029 return hci_get_conn_list(argp);
1030
1031 case HCIDEVUP:
1032 if (!capable(CAP_NET_ADMIN))
1033 return -EPERM;
1034 return hci_dev_open(arg);
1035
1036 case HCIDEVDOWN:
1037 if (!capable(CAP_NET_ADMIN))
1038 return -EPERM;
1039 return hci_dev_close(arg);
1040
1041 case HCIDEVRESET:
1042 if (!capable(CAP_NET_ADMIN))
1043 return -EPERM;
1044 return hci_dev_reset(arg);
1045
1046 case HCIDEVRESTAT:
1047 if (!capable(CAP_NET_ADMIN))
1048 return -EPERM;
1049 return hci_dev_reset_stat(arg);
1050
1051 case HCISETSCAN:
1052 case HCISETAUTH:
1053 case HCISETENCRYPT:
1054 case HCISETPTYPE:
1055 case HCISETLINKPOL:
1056 case HCISETLINKMODE:
1057 case HCISETACLMTU:
1058 case HCISETSCOMTU:
1059 if (!capable(CAP_NET_ADMIN))
1060 return -EPERM;
1061 return hci_dev_cmd(cmd, argp);
1062
1063 case HCIINQUIRY:
1064 return hci_inquiry(argp);
1065 }
1066
1067 lock_sock(sk);
1068
1069 err = hci_sock_bound_ioctl(sk, cmd, arg);
1070
1071 done:
1072 release_sock(sk);
1073 return err;
1074 }
1075
1076 #ifdef CONFIG_COMPAT
1077 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1078 unsigned long arg)
1079 {
1080 switch (cmd) {
1081 case HCIDEVUP:
1082 case HCIDEVDOWN:
1083 case HCIDEVRESET:
1084 case HCIDEVRESTAT:
1085 return hci_sock_ioctl(sock, cmd, arg);
1086 }
1087
1088 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1089 }
1090 #endif
1091
1092 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1093 int addr_len)
1094 {
1095 struct sockaddr_hci haddr;
1096 struct sock *sk = sock->sk;
1097 struct hci_dev *hdev = NULL;
1098 struct sk_buff *skb;
1099 int len, err = 0;
1100
1101 BT_DBG("sock %p sk %p", sock, sk);
1102
1103 if (!addr)
1104 return -EINVAL;
1105
1106 memset(&haddr, 0, sizeof(haddr));
1107 len = min_t(unsigned int, sizeof(haddr), addr_len);
1108 memcpy(&haddr, addr, len);
1109
1110 if (haddr.hci_family != AF_BLUETOOTH)
1111 return -EINVAL;
1112
1113 lock_sock(sk);
1114
1115
1116
1117
1118
1119 hdev = hci_pi(sk)->hdev;
1120 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1121 hci_pi(sk)->hdev = NULL;
1122 sk->sk_state = BT_OPEN;
1123 hci_dev_put(hdev);
1124 }
1125 hdev = NULL;
1126
1127 if (sk->sk_state == BT_BOUND) {
1128 err = -EALREADY;
1129 goto done;
1130 }
1131
1132 switch (haddr.hci_channel) {
1133 case HCI_CHANNEL_RAW:
1134 if (hci_pi(sk)->hdev) {
1135 err = -EALREADY;
1136 goto done;
1137 }
1138
1139 if (haddr.hci_dev != HCI_DEV_NONE) {
1140 hdev = hci_dev_get(haddr.hci_dev);
1141 if (!hdev) {
1142 err = -ENODEV;
1143 goto done;
1144 }
1145
1146 atomic_inc(&hdev->promisc);
1147 }
1148
1149 hci_pi(sk)->channel = haddr.hci_channel;
1150
1151 if (!hci_sock_gen_cookie(sk)) {
1152
1153
1154
1155
1156
1157
1158 skb = create_monitor_ctrl_close(sk);
1159 if (skb) {
1160 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1161 HCI_SOCK_TRUSTED, NULL);
1162 kfree_skb(skb);
1163 }
1164 }
1165
1166 if (capable(CAP_NET_ADMIN))
1167 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1168
1169 hci_pi(sk)->hdev = hdev;
1170
1171
1172 skb = create_monitor_ctrl_open(sk);
1173 if (skb) {
1174 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1175 HCI_SOCK_TRUSTED, NULL);
1176 kfree_skb(skb);
1177 }
1178 break;
1179
1180 case HCI_CHANNEL_USER:
1181 if (hci_pi(sk)->hdev) {
1182 err = -EALREADY;
1183 goto done;
1184 }
1185
1186 if (haddr.hci_dev == HCI_DEV_NONE) {
1187 err = -EINVAL;
1188 goto done;
1189 }
1190
1191 if (!capable(CAP_NET_ADMIN)) {
1192 err = -EPERM;
1193 goto done;
1194 }
1195
1196 hdev = hci_dev_get(haddr.hci_dev);
1197 if (!hdev) {
1198 err = -ENODEV;
1199 goto done;
1200 }
1201
1202 if (test_bit(HCI_INIT, &hdev->flags) ||
1203 hci_dev_test_flag(hdev, HCI_SETUP) ||
1204 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1205 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1206 test_bit(HCI_UP, &hdev->flags))) {
1207 err = -EBUSY;
1208 hci_dev_put(hdev);
1209 goto done;
1210 }
1211
1212 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1213 err = -EUSERS;
1214 hci_dev_put(hdev);
1215 goto done;
1216 }
1217
1218 mgmt_index_removed(hdev);
1219 hci_unregister_suspend_notifier(hdev);
1220
1221 err = hci_dev_open(hdev->id);
1222 if (err) {
1223 if (err == -EALREADY) {
1224
1225
1226
1227
1228
1229
1230
1231 err = 0;
1232 } else {
1233 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1234 hci_register_suspend_notifier(hdev);
1235 mgmt_index_added(hdev);
1236 hci_dev_put(hdev);
1237 goto done;
1238 }
1239 }
1240
1241 hci_pi(sk)->channel = haddr.hci_channel;
1242
1243 if (!hci_sock_gen_cookie(sk)) {
1244
1245
1246
1247
1248
1249 skb = create_monitor_ctrl_close(sk);
1250 if (skb) {
1251 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1252 HCI_SOCK_TRUSTED, NULL);
1253 kfree_skb(skb);
1254 }
1255 }
1256
1257
1258
1259
1260 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1261
1262 hci_pi(sk)->hdev = hdev;
1263
1264
1265 skb = create_monitor_ctrl_open(sk);
1266 if (skb) {
1267 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1268 HCI_SOCK_TRUSTED, NULL);
1269 kfree_skb(skb);
1270 }
1271
1272 atomic_inc(&hdev->promisc);
1273 break;
1274
1275 case HCI_CHANNEL_MONITOR:
1276 if (haddr.hci_dev != HCI_DEV_NONE) {
1277 err = -EINVAL;
1278 goto done;
1279 }
1280
1281 if (!capable(CAP_NET_RAW)) {
1282 err = -EPERM;
1283 goto done;
1284 }
1285
1286 hci_pi(sk)->channel = haddr.hci_channel;
1287
1288
1289
1290
1291 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1292
1293 send_monitor_note(sk, "Linux version %s (%s)",
1294 init_utsname()->release,
1295 init_utsname()->machine);
1296 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1297 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1298 send_monitor_replay(sk);
1299 send_monitor_control_replay(sk);
1300
1301 atomic_inc(&monitor_promisc);
1302 break;
1303
1304 case HCI_CHANNEL_LOGGING:
1305 if (haddr.hci_dev != HCI_DEV_NONE) {
1306 err = -EINVAL;
1307 goto done;
1308 }
1309
1310 if (!capable(CAP_NET_ADMIN)) {
1311 err = -EPERM;
1312 goto done;
1313 }
1314
1315 hci_pi(sk)->channel = haddr.hci_channel;
1316 break;
1317
1318 default:
1319 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1320 err = -EINVAL;
1321 goto done;
1322 }
1323
1324 if (haddr.hci_dev != HCI_DEV_NONE) {
1325 err = -EINVAL;
1326 goto done;
1327 }
1328
1329
1330
1331
1332
1333
1334 if (capable(CAP_NET_ADMIN))
1335 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1336
1337 hci_pi(sk)->channel = haddr.hci_channel;
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1350 if (!hci_sock_gen_cookie(sk)) {
1351
1352
1353
1354
1355
1356
1357 skb = create_monitor_ctrl_close(sk);
1358 if (skb) {
1359 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1360 HCI_SOCK_TRUSTED, NULL);
1361 kfree_skb(skb);
1362 }
1363 }
1364
1365
1366 skb = create_monitor_ctrl_open(sk);
1367 if (skb) {
1368 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1369 HCI_SOCK_TRUSTED, NULL);
1370 kfree_skb(skb);
1371 }
1372
1373 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1374 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1375 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1376 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1377 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1378 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1379 }
1380 break;
1381 }
1382
1383
1384 if (!hci_pi(sk)->mtu)
1385 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1386
1387 sk->sk_state = BT_BOUND;
1388
1389 done:
1390 release_sock(sk);
1391 return err;
1392 }
1393
1394 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1395 int peer)
1396 {
1397 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1398 struct sock *sk = sock->sk;
1399 struct hci_dev *hdev;
1400 int err = 0;
1401
1402 BT_DBG("sock %p sk %p", sock, sk);
1403
1404 if (peer)
1405 return -EOPNOTSUPP;
1406
1407 lock_sock(sk);
1408
1409 hdev = hci_hdev_from_sock(sk);
1410 if (IS_ERR(hdev)) {
1411 err = PTR_ERR(hdev);
1412 goto done;
1413 }
1414
1415 haddr->hci_family = AF_BLUETOOTH;
1416 haddr->hci_dev = hdev->id;
1417 haddr->hci_channel= hci_pi(sk)->channel;
1418 err = sizeof(*haddr);
1419
1420 done:
1421 release_sock(sk);
1422 return err;
1423 }
1424
1425 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1426 struct sk_buff *skb)
1427 {
1428 __u8 mask = hci_pi(sk)->cmsg_mask;
1429
1430 if (mask & HCI_CMSG_DIR) {
1431 int incoming = bt_cb(skb)->incoming;
1432 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1433 &incoming);
1434 }
1435
1436 if (mask & HCI_CMSG_TSTAMP) {
1437 #ifdef CONFIG_COMPAT
1438 struct old_timeval32 ctv;
1439 #endif
1440 struct __kernel_old_timeval tv;
1441 void *data;
1442 int len;
1443
1444 skb_get_timestamp(skb, &tv);
1445
1446 data = &tv;
1447 len = sizeof(tv);
1448 #ifdef CONFIG_COMPAT
1449 if (!COMPAT_USE_64BIT_TIME &&
1450 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1451 ctv.tv_sec = tv.tv_sec;
1452 ctv.tv_usec = tv.tv_usec;
1453 data = &ctv;
1454 len = sizeof(ctv);
1455 }
1456 #endif
1457
1458 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1459 }
1460 }
1461
1462 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1463 size_t len, int flags)
1464 {
1465 struct sock *sk = sock->sk;
1466 struct sk_buff *skb;
1467 int copied, err;
1468 unsigned int skblen;
1469
1470 BT_DBG("sock %p, sk %p", sock, sk);
1471
1472 if (flags & MSG_OOB)
1473 return -EOPNOTSUPP;
1474
1475 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1476 return -EOPNOTSUPP;
1477
1478 if (sk->sk_state == BT_CLOSED)
1479 return 0;
1480
1481 skb = skb_recv_datagram(sk, flags, &err);
1482 if (!skb)
1483 return err;
1484
1485 skblen = skb->len;
1486 copied = skb->len;
1487 if (len < copied) {
1488 msg->msg_flags |= MSG_TRUNC;
1489 copied = len;
1490 }
1491
1492 skb_reset_transport_header(skb);
1493 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1494
1495 switch (hci_pi(sk)->channel) {
1496 case HCI_CHANNEL_RAW:
1497 hci_sock_cmsg(sk, msg, skb);
1498 break;
1499 case HCI_CHANNEL_USER:
1500 case HCI_CHANNEL_MONITOR:
1501 sock_recv_timestamp(msg, sk, skb);
1502 break;
1503 default:
1504 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1505 sock_recv_timestamp(msg, sk, skb);
1506 break;
1507 }
1508
1509 skb_free_datagram(sk, skb);
1510
1511 if (flags & MSG_TRUNC)
1512 copied = skblen;
1513
1514 return err ? : copied;
1515 }
1516
1517 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1518 struct sk_buff *skb)
1519 {
1520 u8 *cp;
1521 struct mgmt_hdr *hdr;
1522 u16 opcode, index, len;
1523 struct hci_dev *hdev = NULL;
1524 const struct hci_mgmt_handler *handler;
1525 bool var_len, no_hdev;
1526 int err;
1527
1528 BT_DBG("got %d bytes", skb->len);
1529
1530 if (skb->len < sizeof(*hdr))
1531 return -EINVAL;
1532
1533 hdr = (void *)skb->data;
1534 opcode = __le16_to_cpu(hdr->opcode);
1535 index = __le16_to_cpu(hdr->index);
1536 len = __le16_to_cpu(hdr->len);
1537
1538 if (len != skb->len - sizeof(*hdr)) {
1539 err = -EINVAL;
1540 goto done;
1541 }
1542
1543 if (chan->channel == HCI_CHANNEL_CONTROL) {
1544 struct sk_buff *cmd;
1545
1546
1547 cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1548 skb->data + sizeof(*hdr));
1549 if (cmd) {
1550 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1551 HCI_SOCK_TRUSTED, NULL);
1552 kfree_skb(cmd);
1553 }
1554 }
1555
1556 if (opcode >= chan->handler_count ||
1557 chan->handlers[opcode].func == NULL) {
1558 BT_DBG("Unknown op %u", opcode);
1559 err = mgmt_cmd_status(sk, index, opcode,
1560 MGMT_STATUS_UNKNOWN_COMMAND);
1561 goto done;
1562 }
1563
1564 handler = &chan->handlers[opcode];
1565
1566 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1567 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_PERMISSION_DENIED);
1570 goto done;
1571 }
1572
1573 if (index != MGMT_INDEX_NONE) {
1574 hdev = hci_dev_get(index);
1575 if (!hdev) {
1576 err = mgmt_cmd_status(sk, index, opcode,
1577 MGMT_STATUS_INVALID_INDEX);
1578 goto done;
1579 }
1580
1581 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1582 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1583 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1584 err = mgmt_cmd_status(sk, index, opcode,
1585 MGMT_STATUS_INVALID_INDEX);
1586 goto done;
1587 }
1588
1589 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1590 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1591 err = mgmt_cmd_status(sk, index, opcode,
1592 MGMT_STATUS_INVALID_INDEX);
1593 goto done;
1594 }
1595 }
1596
1597 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1598 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1599 if (no_hdev != !hdev) {
1600 err = mgmt_cmd_status(sk, index, opcode,
1601 MGMT_STATUS_INVALID_INDEX);
1602 goto done;
1603 }
1604 }
1605
1606 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1607 if ((var_len && len < handler->data_len) ||
1608 (!var_len && len != handler->data_len)) {
1609 err = mgmt_cmd_status(sk, index, opcode,
1610 MGMT_STATUS_INVALID_PARAMS);
1611 goto done;
1612 }
1613
1614 if (hdev && chan->hdev_init)
1615 chan->hdev_init(sk, hdev);
1616
1617 cp = skb->data + sizeof(*hdr);
1618
1619 err = handler->func(sk, hdev, cp, len);
1620 if (err < 0)
1621 goto done;
1622
1623 err = skb->len;
1624
1625 done:
1626 if (hdev)
1627 hci_dev_put(hdev);
1628
1629 return err;
1630 }
1631
1632 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1633 unsigned int flags)
1634 {
1635 struct hci_mon_hdr *hdr;
1636 struct hci_dev *hdev;
1637 u16 index;
1638 int err;
1639
1640
1641
1642
1643
1644 if (skb->len < sizeof(*hdr) + 3)
1645 return -EINVAL;
1646
1647 hdr = (void *)skb->data;
1648
1649 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1650 return -EINVAL;
1651
1652 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1653 __u8 priority = skb->data[sizeof(*hdr)];
1654 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1670 ident_len > skb->len - sizeof(*hdr) - 3 ||
1671 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1672 return -EINVAL;
1673 } else {
1674 return -EINVAL;
1675 }
1676
1677 index = __le16_to_cpu(hdr->index);
1678
1679 if (index != MGMT_INDEX_NONE) {
1680 hdev = hci_dev_get(index);
1681 if (!hdev)
1682 return -ENODEV;
1683 } else {
1684 hdev = NULL;
1685 }
1686
1687 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1688
1689 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1690 err = skb->len;
1691
1692 if (hdev)
1693 hci_dev_put(hdev);
1694
1695 return err;
1696 }
1697
1698 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1699 size_t len)
1700 {
1701 struct sock *sk = sock->sk;
1702 struct hci_mgmt_chan *chan;
1703 struct hci_dev *hdev;
1704 struct sk_buff *skb;
1705 int err;
1706 const unsigned int flags = msg->msg_flags;
1707
1708 BT_DBG("sock %p sk %p", sock, sk);
1709
1710 if (flags & MSG_OOB)
1711 return -EOPNOTSUPP;
1712
1713 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1714 return -EINVAL;
1715
1716 if (len < 4 || len > hci_pi(sk)->mtu)
1717 return -EINVAL;
1718
1719 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1720 if (IS_ERR(skb))
1721 return PTR_ERR(skb);
1722
1723 lock_sock(sk);
1724
1725 switch (hci_pi(sk)->channel) {
1726 case HCI_CHANNEL_RAW:
1727 case HCI_CHANNEL_USER:
1728 break;
1729 case HCI_CHANNEL_MONITOR:
1730 err = -EOPNOTSUPP;
1731 goto drop;
1732 case HCI_CHANNEL_LOGGING:
1733 err = hci_logging_frame(sk, skb, flags);
1734 goto drop;
1735 default:
1736 mutex_lock(&mgmt_chan_list_lock);
1737 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1738 if (chan)
1739 err = hci_mgmt_cmd(chan, sk, skb);
1740 else
1741 err = -EINVAL;
1742
1743 mutex_unlock(&mgmt_chan_list_lock);
1744 goto drop;
1745 }
1746
1747 hdev = hci_hdev_from_sock(sk);
1748 if (IS_ERR(hdev)) {
1749 err = PTR_ERR(hdev);
1750 goto drop;
1751 }
1752
1753 if (!test_bit(HCI_UP, &hdev->flags)) {
1754 err = -ENETDOWN;
1755 goto drop;
1756 }
1757
1758 hci_skb_pkt_type(skb) = skb->data[0];
1759 skb_pull(skb, 1);
1760
1761 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1762
1763
1764
1765
1766
1767 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1768 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1769 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1770 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1771 err = -EINVAL;
1772 goto drop;
1773 }
1774
1775 skb_queue_tail(&hdev->raw_q, skb);
1776 queue_work(hdev->workqueue, &hdev->tx_work);
1777 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1778 u16 opcode = get_unaligned_le16(skb->data);
1779 u16 ogf = hci_opcode_ogf(opcode);
1780 u16 ocf = hci_opcode_ocf(opcode);
1781
1782 if (((ogf > HCI_SFLT_MAX_OGF) ||
1783 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1784 &hci_sec_filter.ocf_mask[ogf])) &&
1785 !capable(CAP_NET_RAW)) {
1786 err = -EPERM;
1787 goto drop;
1788 }
1789
1790
1791
1792
1793 hci_skb_opcode(skb) = opcode;
1794
1795 if (ogf == 0x3f) {
1796 skb_queue_tail(&hdev->raw_q, skb);
1797 queue_work(hdev->workqueue, &hdev->tx_work);
1798 } else {
1799
1800
1801
1802 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1803
1804 skb_queue_tail(&hdev->cmd_q, skb);
1805 queue_work(hdev->workqueue, &hdev->cmd_work);
1806 }
1807 } else {
1808 if (!capable(CAP_NET_RAW)) {
1809 err = -EPERM;
1810 goto drop;
1811 }
1812
1813 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1814 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1815 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1816 err = -EINVAL;
1817 goto drop;
1818 }
1819
1820 skb_queue_tail(&hdev->raw_q, skb);
1821 queue_work(hdev->workqueue, &hdev->tx_work);
1822 }
1823
1824 err = len;
1825
1826 done:
1827 release_sock(sk);
1828 return err;
1829
1830 drop:
1831 kfree_skb(skb);
1832 goto done;
1833 }
1834
1835 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1836 sockptr_t optval, unsigned int len)
1837 {
1838 struct hci_ufilter uf = { .opcode = 0 };
1839 struct sock *sk = sock->sk;
1840 int err = 0, opt = 0;
1841
1842 BT_DBG("sk %p, opt %d", sk, optname);
1843
1844 lock_sock(sk);
1845
1846 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1847 err = -EBADFD;
1848 goto done;
1849 }
1850
1851 switch (optname) {
1852 case HCI_DATA_DIR:
1853 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1854 err = -EFAULT;
1855 break;
1856 }
1857
1858 if (opt)
1859 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1860 else
1861 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1862 break;
1863
1864 case HCI_TIME_STAMP:
1865 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1866 err = -EFAULT;
1867 break;
1868 }
1869
1870 if (opt)
1871 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1872 else
1873 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1874 break;
1875
1876 case HCI_FILTER:
1877 {
1878 struct hci_filter *f = &hci_pi(sk)->filter;
1879
1880 uf.type_mask = f->type_mask;
1881 uf.opcode = f->opcode;
1882 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1883 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1884 }
1885
1886 len = min_t(unsigned int, len, sizeof(uf));
1887 if (copy_from_sockptr(&uf, optval, len)) {
1888 err = -EFAULT;
1889 break;
1890 }
1891
1892 if (!capable(CAP_NET_RAW)) {
1893 uf.type_mask &= hci_sec_filter.type_mask;
1894 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1895 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1896 }
1897
1898 {
1899 struct hci_filter *f = &hci_pi(sk)->filter;
1900
1901 f->type_mask = uf.type_mask;
1902 f->opcode = uf.opcode;
1903 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1904 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1905 }
1906 break;
1907
1908 default:
1909 err = -ENOPROTOOPT;
1910 break;
1911 }
1912
1913 done:
1914 release_sock(sk);
1915 return err;
1916 }
1917
1918 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1919 sockptr_t optval, unsigned int len)
1920 {
1921 struct sock *sk = sock->sk;
1922 int err = 0;
1923 u16 opt;
1924
1925 BT_DBG("sk %p, opt %d", sk, optname);
1926
1927 if (level == SOL_HCI)
1928 return hci_sock_setsockopt_old(sock, level, optname, optval,
1929 len);
1930
1931 if (level != SOL_BLUETOOTH)
1932 return -ENOPROTOOPT;
1933
1934 lock_sock(sk);
1935
1936 switch (optname) {
1937 case BT_SNDMTU:
1938 case BT_RCVMTU:
1939 switch (hci_pi(sk)->channel) {
1940
1941
1942
1943 case HCI_CHANNEL_RAW:
1944 case HCI_CHANNEL_USER:
1945 err = -ENOPROTOOPT;
1946 goto done;
1947 }
1948
1949 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1950 err = -EFAULT;
1951 break;
1952 }
1953
1954 hci_pi(sk)->mtu = opt;
1955 break;
1956
1957 default:
1958 err = -ENOPROTOOPT;
1959 break;
1960 }
1961
1962 done:
1963 release_sock(sk);
1964 return err;
1965 }
1966
1967 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
1968 char __user *optval, int __user *optlen)
1969 {
1970 struct hci_ufilter uf;
1971 struct sock *sk = sock->sk;
1972 int len, opt, err = 0;
1973
1974 BT_DBG("sk %p, opt %d", sk, optname);
1975
1976 if (get_user(len, optlen))
1977 return -EFAULT;
1978
1979 lock_sock(sk);
1980
1981 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1982 err = -EBADFD;
1983 goto done;
1984 }
1985
1986 switch (optname) {
1987 case HCI_DATA_DIR:
1988 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1989 opt = 1;
1990 else
1991 opt = 0;
1992
1993 if (put_user(opt, optval))
1994 err = -EFAULT;
1995 break;
1996
1997 case HCI_TIME_STAMP:
1998 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1999 opt = 1;
2000 else
2001 opt = 0;
2002
2003 if (put_user(opt, optval))
2004 err = -EFAULT;
2005 break;
2006
2007 case HCI_FILTER:
2008 {
2009 struct hci_filter *f = &hci_pi(sk)->filter;
2010
2011 memset(&uf, 0, sizeof(uf));
2012 uf.type_mask = f->type_mask;
2013 uf.opcode = f->opcode;
2014 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2015 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2016 }
2017
2018 len = min_t(unsigned int, len, sizeof(uf));
2019 if (copy_to_user(optval, &uf, len))
2020 err = -EFAULT;
2021 break;
2022
2023 default:
2024 err = -ENOPROTOOPT;
2025 break;
2026 }
2027
2028 done:
2029 release_sock(sk);
2030 return err;
2031 }
2032
2033 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2034 char __user *optval, int __user *optlen)
2035 {
2036 struct sock *sk = sock->sk;
2037 int err = 0;
2038
2039 BT_DBG("sk %p, opt %d", sk, optname);
2040
2041 if (level == SOL_HCI)
2042 return hci_sock_getsockopt_old(sock, level, optname, optval,
2043 optlen);
2044
2045 if (level != SOL_BLUETOOTH)
2046 return -ENOPROTOOPT;
2047
2048 lock_sock(sk);
2049
2050 switch (optname) {
2051 case BT_SNDMTU:
2052 case BT_RCVMTU:
2053 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2054 err = -EFAULT;
2055 break;
2056
2057 default:
2058 err = -ENOPROTOOPT;
2059 break;
2060 }
2061
2062 release_sock(sk);
2063 return err;
2064 }
2065
2066 static void hci_sock_destruct(struct sock *sk)
2067 {
2068 skb_queue_purge(&sk->sk_receive_queue);
2069 skb_queue_purge(&sk->sk_write_queue);
2070 }
2071
2072 static const struct proto_ops hci_sock_ops = {
2073 .family = PF_BLUETOOTH,
2074 .owner = THIS_MODULE,
2075 .release = hci_sock_release,
2076 .bind = hci_sock_bind,
2077 .getname = hci_sock_getname,
2078 .sendmsg = hci_sock_sendmsg,
2079 .recvmsg = hci_sock_recvmsg,
2080 .ioctl = hci_sock_ioctl,
2081 #ifdef CONFIG_COMPAT
2082 .compat_ioctl = hci_sock_compat_ioctl,
2083 #endif
2084 .poll = datagram_poll,
2085 .listen = sock_no_listen,
2086 .shutdown = sock_no_shutdown,
2087 .setsockopt = hci_sock_setsockopt,
2088 .getsockopt = hci_sock_getsockopt,
2089 .connect = sock_no_connect,
2090 .socketpair = sock_no_socketpair,
2091 .accept = sock_no_accept,
2092 .mmap = sock_no_mmap
2093 };
2094
2095 static struct proto hci_sk_proto = {
2096 .name = "HCI",
2097 .owner = THIS_MODULE,
2098 .obj_size = sizeof(struct hci_pinfo)
2099 };
2100
2101 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2102 int kern)
2103 {
2104 struct sock *sk;
2105
2106 BT_DBG("sock %p", sock);
2107
2108 if (sock->type != SOCK_RAW)
2109 return -ESOCKTNOSUPPORT;
2110
2111 sock->ops = &hci_sock_ops;
2112
2113 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2114 if (!sk)
2115 return -ENOMEM;
2116
2117 sock_init_data(sock, sk);
2118
2119 sock_reset_flag(sk, SOCK_ZAPPED);
2120
2121 sk->sk_protocol = protocol;
2122
2123 sock->state = SS_UNCONNECTED;
2124 sk->sk_state = BT_OPEN;
2125 sk->sk_destruct = hci_sock_destruct;
2126
2127 bt_sock_link(&hci_sk_list, sk);
2128 return 0;
2129 }
2130
2131 static const struct net_proto_family hci_sock_family_ops = {
2132 .family = PF_BLUETOOTH,
2133 .owner = THIS_MODULE,
2134 .create = hci_sock_create,
2135 };
2136
2137 int __init hci_sock_init(void)
2138 {
2139 int err;
2140
2141 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2142
2143 err = proto_register(&hci_sk_proto, 0);
2144 if (err < 0)
2145 return err;
2146
2147 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2148 if (err < 0) {
2149 BT_ERR("HCI socket registration failed");
2150 goto error;
2151 }
2152
2153 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2154 if (err < 0) {
2155 BT_ERR("Failed to create HCI proc file");
2156 bt_sock_unregister(BTPROTO_HCI);
2157 goto error;
2158 }
2159
2160 BT_INFO("HCI socket layer initialized");
2161
2162 return 0;
2163
2164 error:
2165 proto_unregister(&hci_sk_proto);
2166 return err;
2167 }
2168
2169 void hci_sock_cleanup(void)
2170 {
2171 bt_procfs_cleanup(&init_net, "hci");
2172 bt_sock_unregister(BTPROTO_HCI);
2173 proto_unregister(&hci_sk_proto);
2174 }