cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hci_sock.c (47964B)


      1/*
      2   BlueZ - Bluetooth protocol stack for Linux
      3   Copyright (C) 2000-2001 Qualcomm Incorporated
      4
      5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
      6
      7   This program is free software; you can redistribute it and/or modify
      8   it under the terms of the GNU General Public License version 2 as
      9   published by the Free Software Foundation;
     10
     11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
     14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
     15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
     16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19
     20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
     21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
     22   SOFTWARE IS DISCLAIMED.
     23*/
     24
     25/* Bluetooth HCI sockets. */
     26#include <linux/compat.h>
     27#include <linux/export.h>
     28#include <linux/utsname.h>
     29#include <linux/sched.h>
     30#include <asm/unaligned.h>
     31
     32#include <net/bluetooth/bluetooth.h>
     33#include <net/bluetooth/hci_core.h>
     34#include <net/bluetooth/hci_mon.h>
     35#include <net/bluetooth/mgmt.h>
     36
     37#include "mgmt_util.h"
     38
     39static LIST_HEAD(mgmt_chan_list);
     40static DEFINE_MUTEX(mgmt_chan_list_lock);
     41
     42static DEFINE_IDA(sock_cookie_ida);
     43
     44static atomic_t monitor_promisc = ATOMIC_INIT(0);
     45
     46/* ----- HCI socket interface ----- */
     47
     48/* Socket info */
     49#define hci_pi(sk) ((struct hci_pinfo *) sk)
     50
     51struct hci_pinfo {
     52	struct bt_sock    bt;
     53	struct hci_dev    *hdev;
     54	struct hci_filter filter;
     55	__u8              cmsg_mask;
     56	unsigned short    channel;
     57	unsigned long     flags;
     58	__u32             cookie;
     59	char              comm[TASK_COMM_LEN];
     60	__u16             mtu;
     61};
     62
     63static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
     64{
     65	struct hci_dev *hdev = hci_pi(sk)->hdev;
     66
     67	if (!hdev)
     68		return ERR_PTR(-EBADFD);
     69	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
     70		return ERR_PTR(-EPIPE);
     71	return hdev;
     72}
     73
     74void hci_sock_set_flag(struct sock *sk, int nr)
     75{
     76	set_bit(nr, &hci_pi(sk)->flags);
     77}
     78
     79void hci_sock_clear_flag(struct sock *sk, int nr)
     80{
     81	clear_bit(nr, &hci_pi(sk)->flags);
     82}
     83
     84int hci_sock_test_flag(struct sock *sk, int nr)
     85{
     86	return test_bit(nr, &hci_pi(sk)->flags);
     87}
     88
     89unsigned short hci_sock_get_channel(struct sock *sk)
     90{
     91	return hci_pi(sk)->channel;
     92}
     93
     94u32 hci_sock_get_cookie(struct sock *sk)
     95{
     96	return hci_pi(sk)->cookie;
     97}
     98
     99static bool hci_sock_gen_cookie(struct sock *sk)
    100{
    101	int id = hci_pi(sk)->cookie;
    102
    103	if (!id) {
    104		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
    105		if (id < 0)
    106			id = 0xffffffff;
    107
    108		hci_pi(sk)->cookie = id;
    109		get_task_comm(hci_pi(sk)->comm, current);
    110		return true;
    111	}
    112
    113	return false;
    114}
    115
    116static void hci_sock_free_cookie(struct sock *sk)
    117{
    118	int id = hci_pi(sk)->cookie;
    119
    120	if (id) {
    121		hci_pi(sk)->cookie = 0xffffffff;
    122		ida_simple_remove(&sock_cookie_ida, id);
    123	}
    124}
    125
    126static inline int hci_test_bit(int nr, const void *addr)
    127{
    128	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
    129}
    130
    131/* Security filter */
    132#define HCI_SFLT_MAX_OGF  5
    133
    134struct hci_sec_filter {
    135	__u32 type_mask;
    136	__u32 event_mask[2];
    137	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
    138};
    139
    140static const struct hci_sec_filter hci_sec_filter = {
    141	/* Packet types */
    142	0x10,
    143	/* Events */
    144	{ 0x1000d9fe, 0x0000b00c },
    145	/* Commands */
    146	{
    147		{ 0x0 },
    148		/* OGF_LINK_CTL */
    149		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
    150		/* OGF_LINK_POLICY */
    151		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
    152		/* OGF_HOST_CTL */
    153		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
    154		/* OGF_INFO_PARAM */
    155		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
    156		/* OGF_STATUS_PARAM */
    157		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
    158	}
    159};
    160
    161static struct bt_sock_list hci_sk_list = {
    162	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
    163};
    164
    165static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
    166{
    167	struct hci_filter *flt;
    168	int flt_type, flt_event;
    169
    170	/* Apply filter */
    171	flt = &hci_pi(sk)->filter;
    172
    173	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
    174
    175	if (!test_bit(flt_type, &flt->type_mask))
    176		return true;
    177
    178	/* Extra filter for event packets only */
    179	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
    180		return false;
    181
    182	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
    183
    184	if (!hci_test_bit(flt_event, &flt->event_mask))
    185		return true;
    186
    187	/* Check filter only when opcode is set */
    188	if (!flt->opcode)
    189		return false;
    190
    191	if (flt_event == HCI_EV_CMD_COMPLETE &&
    192	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
    193		return true;
    194
    195	if (flt_event == HCI_EV_CMD_STATUS &&
    196	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
    197		return true;
    198
    199	return false;
    200}
    201
    202/* Send frame to RAW socket */
    203void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
    204{
    205	struct sock *sk;
    206	struct sk_buff *skb_copy = NULL;
    207
    208	BT_DBG("hdev %p len %d", hdev, skb->len);
    209
    210	read_lock(&hci_sk_list.lock);
    211
    212	sk_for_each(sk, &hci_sk_list.head) {
    213		struct sk_buff *nskb;
    214
    215		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
    216			continue;
    217
    218		/* Don't send frame to the socket it came from */
    219		if (skb->sk == sk)
    220			continue;
    221
    222		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
    223			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
    224			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
    225			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
    226			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
    227			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
    228				continue;
    229			if (is_filtered_packet(sk, skb))
    230				continue;
    231		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
    232			if (!bt_cb(skb)->incoming)
    233				continue;
    234			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
    235			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
    236			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
    237			    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
    238				continue;
    239		} else {
    240			/* Don't send frame to other channel types */
    241			continue;
    242		}
    243
    244		if (!skb_copy) {
    245			/* Create a private copy with headroom */
    246			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
    247			if (!skb_copy)
    248				continue;
    249
    250			/* Put type byte before the data */
    251			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
    252		}
    253
    254		nskb = skb_clone(skb_copy, GFP_ATOMIC);
    255		if (!nskb)
    256			continue;
    257
    258		if (sock_queue_rcv_skb(sk, nskb))
    259			kfree_skb(nskb);
    260	}
    261
    262	read_unlock(&hci_sk_list.lock);
    263
    264	kfree_skb(skb_copy);
    265}
    266
    267/* Send frame to sockets with specific channel */
    268static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
    269				  int flag, struct sock *skip_sk)
    270{
    271	struct sock *sk;
    272
    273	BT_DBG("channel %u len %d", channel, skb->len);
    274
    275	sk_for_each(sk, &hci_sk_list.head) {
    276		struct sk_buff *nskb;
    277
    278		/* Ignore socket without the flag set */
    279		if (!hci_sock_test_flag(sk, flag))
    280			continue;
    281
    282		/* Skip the original socket */
    283		if (sk == skip_sk)
    284			continue;
    285
    286		if (sk->sk_state != BT_BOUND)
    287			continue;
    288
    289		if (hci_pi(sk)->channel != channel)
    290			continue;
    291
    292		nskb = skb_clone(skb, GFP_ATOMIC);
    293		if (!nskb)
    294			continue;
    295
    296		if (sock_queue_rcv_skb(sk, nskb))
    297			kfree_skb(nskb);
    298	}
    299
    300}
    301
    302void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
    303			 int flag, struct sock *skip_sk)
    304{
    305	read_lock(&hci_sk_list.lock);
    306	__hci_send_to_channel(channel, skb, flag, skip_sk);
    307	read_unlock(&hci_sk_list.lock);
    308}
    309
    310/* Send frame to monitor socket */
    311void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
    312{
    313	struct sk_buff *skb_copy = NULL;
    314	struct hci_mon_hdr *hdr;
    315	__le16 opcode;
    316
    317	if (!atomic_read(&monitor_promisc))
    318		return;
    319
    320	BT_DBG("hdev %p len %d", hdev, skb->len);
    321
    322	switch (hci_skb_pkt_type(skb)) {
    323	case HCI_COMMAND_PKT:
    324		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
    325		break;
    326	case HCI_EVENT_PKT:
    327		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
    328		break;
    329	case HCI_ACLDATA_PKT:
    330		if (bt_cb(skb)->incoming)
    331			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
    332		else
    333			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
    334		break;
    335	case HCI_SCODATA_PKT:
    336		if (bt_cb(skb)->incoming)
    337			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
    338		else
    339			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
    340		break;
    341	case HCI_ISODATA_PKT:
    342		if (bt_cb(skb)->incoming)
    343			opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
    344		else
    345			opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
    346		break;
    347	case HCI_DIAG_PKT:
    348		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
    349		break;
    350	default:
    351		return;
    352	}
    353
    354	/* Create a private copy with headroom */
    355	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
    356	if (!skb_copy)
    357		return;
    358
    359	/* Put header before the data */
    360	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
    361	hdr->opcode = opcode;
    362	hdr->index = cpu_to_le16(hdev->id);
    363	hdr->len = cpu_to_le16(skb->len);
    364
    365	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
    366			    HCI_SOCK_TRUSTED, NULL);
    367	kfree_skb(skb_copy);
    368}
    369
    370void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
    371				 void *data, u16 data_len, ktime_t tstamp,
    372				 int flag, struct sock *skip_sk)
    373{
    374	struct sock *sk;
    375	__le16 index;
    376
    377	if (hdev)
    378		index = cpu_to_le16(hdev->id);
    379	else
    380		index = cpu_to_le16(MGMT_INDEX_NONE);
    381
    382	read_lock(&hci_sk_list.lock);
    383
    384	sk_for_each(sk, &hci_sk_list.head) {
    385		struct hci_mon_hdr *hdr;
    386		struct sk_buff *skb;
    387
    388		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
    389			continue;
    390
    391		/* Ignore socket without the flag set */
    392		if (!hci_sock_test_flag(sk, flag))
    393			continue;
    394
    395		/* Skip the original socket */
    396		if (sk == skip_sk)
    397			continue;
    398
    399		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
    400		if (!skb)
    401			continue;
    402
    403		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
    404		put_unaligned_le16(event, skb_put(skb, 2));
    405
    406		if (data)
    407			skb_put_data(skb, data, data_len);
    408
    409		skb->tstamp = tstamp;
    410
    411		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
    412		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
    413		hdr->index = index;
    414		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    415
    416		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
    417				      HCI_SOCK_TRUSTED, NULL);
    418		kfree_skb(skb);
    419	}
    420
    421	read_unlock(&hci_sk_list.lock);
    422}
    423
    424static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
    425{
    426	struct hci_mon_hdr *hdr;
    427	struct hci_mon_new_index *ni;
    428	struct hci_mon_index_info *ii;
    429	struct sk_buff *skb;
    430	__le16 opcode;
    431
    432	switch (event) {
    433	case HCI_DEV_REG:
    434		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
    435		if (!skb)
    436			return NULL;
    437
    438		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
    439		ni->type = hdev->dev_type;
    440		ni->bus = hdev->bus;
    441		bacpy(&ni->bdaddr, &hdev->bdaddr);
    442		memcpy(ni->name, hdev->name, 8);
    443
    444		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
    445		break;
    446
    447	case HCI_DEV_UNREG:
    448		skb = bt_skb_alloc(0, GFP_ATOMIC);
    449		if (!skb)
    450			return NULL;
    451
    452		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
    453		break;
    454
    455	case HCI_DEV_SETUP:
    456		if (hdev->manufacturer == 0xffff)
    457			return NULL;
    458		fallthrough;
    459
    460	case HCI_DEV_UP:
    461		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
    462		if (!skb)
    463			return NULL;
    464
    465		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
    466		bacpy(&ii->bdaddr, &hdev->bdaddr);
    467		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
    468
    469		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
    470		break;
    471
    472	case HCI_DEV_OPEN:
    473		skb = bt_skb_alloc(0, GFP_ATOMIC);
    474		if (!skb)
    475			return NULL;
    476
    477		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
    478		break;
    479
    480	case HCI_DEV_CLOSE:
    481		skb = bt_skb_alloc(0, GFP_ATOMIC);
    482		if (!skb)
    483			return NULL;
    484
    485		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
    486		break;
    487
    488	default:
    489		return NULL;
    490	}
    491
    492	__net_timestamp(skb);
    493
    494	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
    495	hdr->opcode = opcode;
    496	hdr->index = cpu_to_le16(hdev->id);
    497	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    498
    499	return skb;
    500}
    501
    502static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
    503{
    504	struct hci_mon_hdr *hdr;
    505	struct sk_buff *skb;
    506	u16 format;
    507	u8 ver[3];
    508	u32 flags;
    509
    510	/* No message needed when cookie is not present */
    511	if (!hci_pi(sk)->cookie)
    512		return NULL;
    513
    514	switch (hci_pi(sk)->channel) {
    515	case HCI_CHANNEL_RAW:
    516		format = 0x0000;
    517		ver[0] = BT_SUBSYS_VERSION;
    518		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
    519		break;
    520	case HCI_CHANNEL_USER:
    521		format = 0x0001;
    522		ver[0] = BT_SUBSYS_VERSION;
    523		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
    524		break;
    525	case HCI_CHANNEL_CONTROL:
    526		format = 0x0002;
    527		mgmt_fill_version_info(ver);
    528		break;
    529	default:
    530		/* No message for unsupported format */
    531		return NULL;
    532	}
    533
    534	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
    535	if (!skb)
    536		return NULL;
    537
    538	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
    539
    540	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
    541	put_unaligned_le16(format, skb_put(skb, 2));
    542	skb_put_data(skb, ver, sizeof(ver));
    543	put_unaligned_le32(flags, skb_put(skb, 4));
    544	skb_put_u8(skb, TASK_COMM_LEN);
    545	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
    546
    547	__net_timestamp(skb);
    548
    549	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
    550	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
    551	if (hci_pi(sk)->hdev)
    552		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
    553	else
    554		hdr->index = cpu_to_le16(HCI_DEV_NONE);
    555	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    556
    557	return skb;
    558}
    559
    560static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
    561{
    562	struct hci_mon_hdr *hdr;
    563	struct sk_buff *skb;
    564
    565	/* No message needed when cookie is not present */
    566	if (!hci_pi(sk)->cookie)
    567		return NULL;
    568
    569	switch (hci_pi(sk)->channel) {
    570	case HCI_CHANNEL_RAW:
    571	case HCI_CHANNEL_USER:
    572	case HCI_CHANNEL_CONTROL:
    573		break;
    574	default:
    575		/* No message for unsupported format */
    576		return NULL;
    577	}
    578
    579	skb = bt_skb_alloc(4, GFP_ATOMIC);
    580	if (!skb)
    581		return NULL;
    582
    583	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
    584
    585	__net_timestamp(skb);
    586
    587	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
    588	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
    589	if (hci_pi(sk)->hdev)
    590		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
    591	else
    592		hdr->index = cpu_to_le16(HCI_DEV_NONE);
    593	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    594
    595	return skb;
    596}
    597
    598static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
    599						   u16 opcode, u16 len,
    600						   const void *buf)
    601{
    602	struct hci_mon_hdr *hdr;
    603	struct sk_buff *skb;
    604
    605	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
    606	if (!skb)
    607		return NULL;
    608
    609	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
    610	put_unaligned_le16(opcode, skb_put(skb, 2));
    611
    612	if (buf)
    613		skb_put_data(skb, buf, len);
    614
    615	__net_timestamp(skb);
    616
    617	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
    618	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
    619	hdr->index = cpu_to_le16(index);
    620	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    621
    622	return skb;
    623}
    624
    625static void __printf(2, 3)
    626send_monitor_note(struct sock *sk, const char *fmt, ...)
    627{
    628	size_t len;
    629	struct hci_mon_hdr *hdr;
    630	struct sk_buff *skb;
    631	va_list args;
    632
    633	va_start(args, fmt);
    634	len = vsnprintf(NULL, 0, fmt, args);
    635	va_end(args);
    636
    637	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
    638	if (!skb)
    639		return;
    640
    641	va_start(args, fmt);
    642	vsprintf(skb_put(skb, len), fmt, args);
    643	*(u8 *)skb_put(skb, 1) = 0;
    644	va_end(args);
    645
    646	__net_timestamp(skb);
    647
    648	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
    649	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
    650	hdr->index = cpu_to_le16(HCI_DEV_NONE);
    651	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
    652
    653	if (sock_queue_rcv_skb(sk, skb))
    654		kfree_skb(skb);
    655}
    656
    657static void send_monitor_replay(struct sock *sk)
    658{
    659	struct hci_dev *hdev;
    660
    661	read_lock(&hci_dev_list_lock);
    662
    663	list_for_each_entry(hdev, &hci_dev_list, list) {
    664		struct sk_buff *skb;
    665
    666		skb = create_monitor_event(hdev, HCI_DEV_REG);
    667		if (!skb)
    668			continue;
    669
    670		if (sock_queue_rcv_skb(sk, skb))
    671			kfree_skb(skb);
    672
    673		if (!test_bit(HCI_RUNNING, &hdev->flags))
    674			continue;
    675
    676		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
    677		if (!skb)
    678			continue;
    679
    680		if (sock_queue_rcv_skb(sk, skb))
    681			kfree_skb(skb);
    682
    683		if (test_bit(HCI_UP, &hdev->flags))
    684			skb = create_monitor_event(hdev, HCI_DEV_UP);
    685		else if (hci_dev_test_flag(hdev, HCI_SETUP))
    686			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
    687		else
    688			skb = NULL;
    689
    690		if (skb) {
    691			if (sock_queue_rcv_skb(sk, skb))
    692				kfree_skb(skb);
    693		}
    694	}
    695
    696	read_unlock(&hci_dev_list_lock);
    697}
    698
    699static void send_monitor_control_replay(struct sock *mon_sk)
    700{
    701	struct sock *sk;
    702
    703	read_lock(&hci_sk_list.lock);
    704
    705	sk_for_each(sk, &hci_sk_list.head) {
    706		struct sk_buff *skb;
    707
    708		skb = create_monitor_ctrl_open(sk);
    709		if (!skb)
    710			continue;
    711
    712		if (sock_queue_rcv_skb(mon_sk, skb))
    713			kfree_skb(skb);
    714	}
    715
    716	read_unlock(&hci_sk_list.lock);
    717}
    718
    719/* Generate internal stack event */
    720static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
    721{
    722	struct hci_event_hdr *hdr;
    723	struct hci_ev_stack_internal *ev;
    724	struct sk_buff *skb;
    725
    726	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
    727	if (!skb)
    728		return;
    729
    730	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
    731	hdr->evt  = HCI_EV_STACK_INTERNAL;
    732	hdr->plen = sizeof(*ev) + dlen;
    733
    734	ev = skb_put(skb, sizeof(*ev) + dlen);
    735	ev->type = type;
    736	memcpy(ev->data, data, dlen);
    737
    738	bt_cb(skb)->incoming = 1;
    739	__net_timestamp(skb);
    740
    741	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
    742	hci_send_to_sock(hdev, skb);
    743	kfree_skb(skb);
    744}
    745
    746void hci_sock_dev_event(struct hci_dev *hdev, int event)
    747{
    748	BT_DBG("hdev %s event %d", hdev->name, event);
    749
    750	if (atomic_read(&monitor_promisc)) {
    751		struct sk_buff *skb;
    752
    753		/* Send event to monitor */
    754		skb = create_monitor_event(hdev, event);
    755		if (skb) {
    756			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
    757					    HCI_SOCK_TRUSTED, NULL);
    758			kfree_skb(skb);
    759		}
    760	}
    761
    762	if (event <= HCI_DEV_DOWN) {
    763		struct hci_ev_si_device ev;
    764
    765		/* Send event to sockets */
    766		ev.event  = event;
    767		ev.dev_id = hdev->id;
    768		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
    769	}
    770
    771	if (event == HCI_DEV_UNREG) {
    772		struct sock *sk;
    773
    774		/* Wake up sockets using this dead device */
    775		read_lock(&hci_sk_list.lock);
    776		sk_for_each(sk, &hci_sk_list.head) {
    777			if (hci_pi(sk)->hdev == hdev) {
    778				sk->sk_err = EPIPE;
    779				sk->sk_state_change(sk);
    780			}
    781		}
    782		read_unlock(&hci_sk_list.lock);
    783	}
    784}
    785
    786static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
    787{
    788	struct hci_mgmt_chan *c;
    789
    790	list_for_each_entry(c, &mgmt_chan_list, list) {
    791		if (c->channel == channel)
    792			return c;
    793	}
    794
    795	return NULL;
    796}
    797
    798static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
    799{
    800	struct hci_mgmt_chan *c;
    801
    802	mutex_lock(&mgmt_chan_list_lock);
    803	c = __hci_mgmt_chan_find(channel);
    804	mutex_unlock(&mgmt_chan_list_lock);
    805
    806	return c;
    807}
    808
    809int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
    810{
    811	if (c->channel < HCI_CHANNEL_CONTROL)
    812		return -EINVAL;
    813
    814	mutex_lock(&mgmt_chan_list_lock);
    815	if (__hci_mgmt_chan_find(c->channel)) {
    816		mutex_unlock(&mgmt_chan_list_lock);
    817		return -EALREADY;
    818	}
    819
    820	list_add_tail(&c->list, &mgmt_chan_list);
    821
    822	mutex_unlock(&mgmt_chan_list_lock);
    823
    824	return 0;
    825}
    826EXPORT_SYMBOL(hci_mgmt_chan_register);
    827
    828void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
    829{
    830	mutex_lock(&mgmt_chan_list_lock);
    831	list_del(&c->list);
    832	mutex_unlock(&mgmt_chan_list_lock);
    833}
    834EXPORT_SYMBOL(hci_mgmt_chan_unregister);
    835
    836static int hci_sock_release(struct socket *sock)
    837{
    838	struct sock *sk = sock->sk;
    839	struct hci_dev *hdev;
    840	struct sk_buff *skb;
    841
    842	BT_DBG("sock %p sk %p", sock, sk);
    843
    844	if (!sk)
    845		return 0;
    846
    847	lock_sock(sk);
    848
    849	switch (hci_pi(sk)->channel) {
    850	case HCI_CHANNEL_MONITOR:
    851		atomic_dec(&monitor_promisc);
    852		break;
    853	case HCI_CHANNEL_RAW:
    854	case HCI_CHANNEL_USER:
    855	case HCI_CHANNEL_CONTROL:
    856		/* Send event to monitor */
    857		skb = create_monitor_ctrl_close(sk);
    858		if (skb) {
    859			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
    860					    HCI_SOCK_TRUSTED, NULL);
    861			kfree_skb(skb);
    862		}
    863
    864		hci_sock_free_cookie(sk);
    865		break;
    866	}
    867
    868	bt_sock_unlink(&hci_sk_list, sk);
    869
    870	hdev = hci_pi(sk)->hdev;
    871	if (hdev) {
    872		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
    873			/* When releasing a user channel exclusive access,
    874			 * call hci_dev_do_close directly instead of calling
    875			 * hci_dev_close to ensure the exclusive access will
    876			 * be released and the controller brought back down.
    877			 *
    878			 * The checking of HCI_AUTO_OFF is not needed in this
    879			 * case since it will have been cleared already when
    880			 * opening the user channel.
    881			 */
    882			hci_dev_do_close(hdev);
    883			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
    884			mgmt_index_added(hdev);
    885		}
    886
    887		atomic_dec(&hdev->promisc);
    888		hci_dev_put(hdev);
    889	}
    890
    891	sock_orphan(sk);
    892	release_sock(sk);
    893	sock_put(sk);
    894	return 0;
    895}
    896
    897static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
    898{
    899	bdaddr_t bdaddr;
    900	int err;
    901
    902	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
    903		return -EFAULT;
    904
    905	hci_dev_lock(hdev);
    906
    907	err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
    908
    909	hci_dev_unlock(hdev);
    910
    911	return err;
    912}
    913
    914static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
    915{
    916	bdaddr_t bdaddr;
    917	int err;
    918
    919	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
    920		return -EFAULT;
    921
    922	hci_dev_lock(hdev);
    923
    924	err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
    925
    926	hci_dev_unlock(hdev);
    927
    928	return err;
    929}
    930
    931/* Ioctls that require bound socket */
    932static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
    933				unsigned long arg)
    934{
    935	struct hci_dev *hdev = hci_hdev_from_sock(sk);
    936
    937	if (IS_ERR(hdev))
    938		return PTR_ERR(hdev);
    939
    940	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
    941		return -EBUSY;
    942
    943	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
    944		return -EOPNOTSUPP;
    945
    946	if (hdev->dev_type != HCI_PRIMARY)
    947		return -EOPNOTSUPP;
    948
    949	switch (cmd) {
    950	case HCISETRAW:
    951		if (!capable(CAP_NET_ADMIN))
    952			return -EPERM;
    953		return -EOPNOTSUPP;
    954
    955	case HCIGETCONNINFO:
    956		return hci_get_conn_info(hdev, (void __user *)arg);
    957
    958	case HCIGETAUTHINFO:
    959		return hci_get_auth_info(hdev, (void __user *)arg);
    960
    961	case HCIBLOCKADDR:
    962		if (!capable(CAP_NET_ADMIN))
    963			return -EPERM;
    964		return hci_sock_reject_list_add(hdev, (void __user *)arg);
    965
    966	case HCIUNBLOCKADDR:
    967		if (!capable(CAP_NET_ADMIN))
    968			return -EPERM;
    969		return hci_sock_reject_list_del(hdev, (void __user *)arg);
    970	}
    971
    972	return -ENOIOCTLCMD;
    973}
    974
    975static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
    976			  unsigned long arg)
    977{
    978	void __user *argp = (void __user *)arg;
    979	struct sock *sk = sock->sk;
    980	int err;
    981
    982	BT_DBG("cmd %x arg %lx", cmd, arg);
    983
    984	lock_sock(sk);
    985
    986	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
    987		err = -EBADFD;
    988		goto done;
    989	}
    990
    991	/* When calling an ioctl on an unbound raw socket, then ensure
    992	 * that the monitor gets informed. Ensure that the resulting event
    993	 * is only send once by checking if the cookie exists or not. The
    994	 * socket cookie will be only ever generated once for the lifetime
    995	 * of a given socket.
    996	 */
    997	if (hci_sock_gen_cookie(sk)) {
    998		struct sk_buff *skb;
    999
   1000		if (capable(CAP_NET_ADMIN))
   1001			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
   1002
   1003		/* Send event to monitor */
   1004		skb = create_monitor_ctrl_open(sk);
   1005		if (skb) {
   1006			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1007					    HCI_SOCK_TRUSTED, NULL);
   1008			kfree_skb(skb);
   1009		}
   1010	}
   1011
   1012	release_sock(sk);
   1013
   1014	switch (cmd) {
   1015	case HCIGETDEVLIST:
   1016		return hci_get_dev_list(argp);
   1017
   1018	case HCIGETDEVINFO:
   1019		return hci_get_dev_info(argp);
   1020
   1021	case HCIGETCONNLIST:
   1022		return hci_get_conn_list(argp);
   1023
   1024	case HCIDEVUP:
   1025		if (!capable(CAP_NET_ADMIN))
   1026			return -EPERM;
   1027		return hci_dev_open(arg);
   1028
   1029	case HCIDEVDOWN:
   1030		if (!capable(CAP_NET_ADMIN))
   1031			return -EPERM;
   1032		return hci_dev_close(arg);
   1033
   1034	case HCIDEVRESET:
   1035		if (!capable(CAP_NET_ADMIN))
   1036			return -EPERM;
   1037		return hci_dev_reset(arg);
   1038
   1039	case HCIDEVRESTAT:
   1040		if (!capable(CAP_NET_ADMIN))
   1041			return -EPERM;
   1042		return hci_dev_reset_stat(arg);
   1043
   1044	case HCISETSCAN:
   1045	case HCISETAUTH:
   1046	case HCISETENCRYPT:
   1047	case HCISETPTYPE:
   1048	case HCISETLINKPOL:
   1049	case HCISETLINKMODE:
   1050	case HCISETACLMTU:
   1051	case HCISETSCOMTU:
   1052		if (!capable(CAP_NET_ADMIN))
   1053			return -EPERM;
   1054		return hci_dev_cmd(cmd, argp);
   1055
   1056	case HCIINQUIRY:
   1057		return hci_inquiry(argp);
   1058	}
   1059
   1060	lock_sock(sk);
   1061
   1062	err = hci_sock_bound_ioctl(sk, cmd, arg);
   1063
   1064done:
   1065	release_sock(sk);
   1066	return err;
   1067}
   1068
   1069#ifdef CONFIG_COMPAT
   1070static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
   1071				 unsigned long arg)
   1072{
   1073	switch (cmd) {
   1074	case HCIDEVUP:
   1075	case HCIDEVDOWN:
   1076	case HCIDEVRESET:
   1077	case HCIDEVRESTAT:
   1078		return hci_sock_ioctl(sock, cmd, arg);
   1079	}
   1080
   1081	return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
   1082}
   1083#endif
   1084
   1085static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
   1086			 int addr_len)
   1087{
   1088	struct sockaddr_hci haddr;
   1089	struct sock *sk = sock->sk;
   1090	struct hci_dev *hdev = NULL;
   1091	struct sk_buff *skb;
   1092	int len, err = 0;
   1093
   1094	BT_DBG("sock %p sk %p", sock, sk);
   1095
   1096	if (!addr)
   1097		return -EINVAL;
   1098
   1099	memset(&haddr, 0, sizeof(haddr));
   1100	len = min_t(unsigned int, sizeof(haddr), addr_len);
   1101	memcpy(&haddr, addr, len);
   1102
   1103	if (haddr.hci_family != AF_BLUETOOTH)
   1104		return -EINVAL;
   1105
   1106	lock_sock(sk);
   1107
   1108	/* Allow detaching from dead device and attaching to alive device, if
   1109	 * the caller wants to re-bind (instead of close) this socket in
   1110	 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
   1111	 */
   1112	hdev = hci_pi(sk)->hdev;
   1113	if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
   1114		hci_pi(sk)->hdev = NULL;
   1115		sk->sk_state = BT_OPEN;
   1116		hci_dev_put(hdev);
   1117	}
   1118	hdev = NULL;
   1119
   1120	if (sk->sk_state == BT_BOUND) {
   1121		err = -EALREADY;
   1122		goto done;
   1123	}
   1124
   1125	switch (haddr.hci_channel) {
   1126	case HCI_CHANNEL_RAW:
   1127		if (hci_pi(sk)->hdev) {
   1128			err = -EALREADY;
   1129			goto done;
   1130		}
   1131
   1132		if (haddr.hci_dev != HCI_DEV_NONE) {
   1133			hdev = hci_dev_get(haddr.hci_dev);
   1134			if (!hdev) {
   1135				err = -ENODEV;
   1136				goto done;
   1137			}
   1138
   1139			atomic_inc(&hdev->promisc);
   1140		}
   1141
   1142		hci_pi(sk)->channel = haddr.hci_channel;
   1143
   1144		if (!hci_sock_gen_cookie(sk)) {
   1145			/* In the case when a cookie has already been assigned,
   1146			 * then there has been already an ioctl issued against
   1147			 * an unbound socket and with that triggered an open
   1148			 * notification. Send a close notification first to
   1149			 * allow the state transition to bounded.
   1150			 */
   1151			skb = create_monitor_ctrl_close(sk);
   1152			if (skb) {
   1153				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1154						    HCI_SOCK_TRUSTED, NULL);
   1155				kfree_skb(skb);
   1156			}
   1157		}
   1158
   1159		if (capable(CAP_NET_ADMIN))
   1160			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
   1161
   1162		hci_pi(sk)->hdev = hdev;
   1163
   1164		/* Send event to monitor */
   1165		skb = create_monitor_ctrl_open(sk);
   1166		if (skb) {
   1167			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1168					    HCI_SOCK_TRUSTED, NULL);
   1169			kfree_skb(skb);
   1170		}
   1171		break;
   1172
   1173	case HCI_CHANNEL_USER:
   1174		if (hci_pi(sk)->hdev) {
   1175			err = -EALREADY;
   1176			goto done;
   1177		}
   1178
   1179		if (haddr.hci_dev == HCI_DEV_NONE) {
   1180			err = -EINVAL;
   1181			goto done;
   1182		}
   1183
   1184		if (!capable(CAP_NET_ADMIN)) {
   1185			err = -EPERM;
   1186			goto done;
   1187		}
   1188
   1189		hdev = hci_dev_get(haddr.hci_dev);
   1190		if (!hdev) {
   1191			err = -ENODEV;
   1192			goto done;
   1193		}
   1194
   1195		if (test_bit(HCI_INIT, &hdev->flags) ||
   1196		    hci_dev_test_flag(hdev, HCI_SETUP) ||
   1197		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
   1198		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
   1199		     test_bit(HCI_UP, &hdev->flags))) {
   1200			err = -EBUSY;
   1201			hci_dev_put(hdev);
   1202			goto done;
   1203		}
   1204
   1205		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
   1206			err = -EUSERS;
   1207			hci_dev_put(hdev);
   1208			goto done;
   1209		}
   1210
   1211		mgmt_index_removed(hdev);
   1212
   1213		err = hci_dev_open(hdev->id);
   1214		if (err) {
   1215			if (err == -EALREADY) {
   1216				/* In case the transport is already up and
   1217				 * running, clear the error here.
   1218				 *
   1219				 * This can happen when opening a user
   1220				 * channel and HCI_AUTO_OFF grace period
   1221				 * is still active.
   1222				 */
   1223				err = 0;
   1224			} else {
   1225				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
   1226				mgmt_index_added(hdev);
   1227				hci_dev_put(hdev);
   1228				goto done;
   1229			}
   1230		}
   1231
   1232		hci_pi(sk)->channel = haddr.hci_channel;
   1233
   1234		if (!hci_sock_gen_cookie(sk)) {
   1235			/* In the case when a cookie has already been assigned,
   1236			 * this socket will transition from a raw socket into
   1237			 * a user channel socket. For a clean transition, send
   1238			 * the close notification first.
   1239			 */
   1240			skb = create_monitor_ctrl_close(sk);
   1241			if (skb) {
   1242				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1243						    HCI_SOCK_TRUSTED, NULL);
   1244				kfree_skb(skb);
   1245			}
   1246		}
   1247
   1248		/* The user channel is restricted to CAP_NET_ADMIN
   1249		 * capabilities and with that implicitly trusted.
   1250		 */
   1251		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
   1252
   1253		hci_pi(sk)->hdev = hdev;
   1254
   1255		/* Send event to monitor */
   1256		skb = create_monitor_ctrl_open(sk);
   1257		if (skb) {
   1258			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1259					    HCI_SOCK_TRUSTED, NULL);
   1260			kfree_skb(skb);
   1261		}
   1262
   1263		atomic_inc(&hdev->promisc);
   1264		break;
   1265
   1266	case HCI_CHANNEL_MONITOR:
   1267		if (haddr.hci_dev != HCI_DEV_NONE) {
   1268			err = -EINVAL;
   1269			goto done;
   1270		}
   1271
   1272		if (!capable(CAP_NET_RAW)) {
   1273			err = -EPERM;
   1274			goto done;
   1275		}
   1276
   1277		hci_pi(sk)->channel = haddr.hci_channel;
   1278
   1279		/* The monitor interface is restricted to CAP_NET_RAW
   1280		 * capabilities and with that implicitly trusted.
   1281		 */
   1282		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
   1283
   1284		send_monitor_note(sk, "Linux version %s (%s)",
   1285				  init_utsname()->release,
   1286				  init_utsname()->machine);
   1287		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
   1288				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
   1289		send_monitor_replay(sk);
   1290		send_monitor_control_replay(sk);
   1291
   1292		atomic_inc(&monitor_promisc);
   1293		break;
   1294
   1295	case HCI_CHANNEL_LOGGING:
   1296		if (haddr.hci_dev != HCI_DEV_NONE) {
   1297			err = -EINVAL;
   1298			goto done;
   1299		}
   1300
   1301		if (!capable(CAP_NET_ADMIN)) {
   1302			err = -EPERM;
   1303			goto done;
   1304		}
   1305
   1306		hci_pi(sk)->channel = haddr.hci_channel;
   1307		break;
   1308
   1309	default:
   1310		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
   1311			err = -EINVAL;
   1312			goto done;
   1313		}
   1314
   1315		if (haddr.hci_dev != HCI_DEV_NONE) {
   1316			err = -EINVAL;
   1317			goto done;
   1318		}
   1319
   1320		/* Users with CAP_NET_ADMIN capabilities are allowed
   1321		 * access to all management commands and events. For
   1322		 * untrusted users the interface is restricted and
   1323		 * also only untrusted events are sent.
   1324		 */
   1325		if (capable(CAP_NET_ADMIN))
   1326			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
   1327
   1328		hci_pi(sk)->channel = haddr.hci_channel;
   1329
   1330		/* At the moment the index and unconfigured index events
   1331		 * are enabled unconditionally. Setting them on each
   1332		 * socket when binding keeps this functionality. They
   1333		 * however might be cleared later and then sending of these
   1334		 * events will be disabled, but that is then intentional.
   1335		 *
   1336		 * This also enables generic events that are safe to be
   1337		 * received by untrusted users. Example for such events
   1338		 * are changes to settings, class of device, name etc.
   1339		 */
   1340		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
   1341			if (!hci_sock_gen_cookie(sk)) {
   1342				/* In the case when a cookie has already been
   1343				 * assigned, this socket will transition from
   1344				 * a raw socket into a control socket. To
   1345				 * allow for a clean transition, send the
   1346				 * close notification first.
   1347				 */
   1348				skb = create_monitor_ctrl_close(sk);
   1349				if (skb) {
   1350					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1351							    HCI_SOCK_TRUSTED, NULL);
   1352					kfree_skb(skb);
   1353				}
   1354			}
   1355
   1356			/* Send event to monitor */
   1357			skb = create_monitor_ctrl_open(sk);
   1358			if (skb) {
   1359				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
   1360						    HCI_SOCK_TRUSTED, NULL);
   1361				kfree_skb(skb);
   1362			}
   1363
   1364			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
   1365			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
   1366			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
   1367			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
   1368			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
   1369			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
   1370		}
   1371		break;
   1372	}
   1373
   1374	/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
   1375	if (!hci_pi(sk)->mtu)
   1376		hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
   1377
   1378	sk->sk_state = BT_BOUND;
   1379
   1380done:
   1381	release_sock(sk);
   1382	return err;
   1383}
   1384
   1385static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
   1386			    int peer)
   1387{
   1388	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
   1389	struct sock *sk = sock->sk;
   1390	struct hci_dev *hdev;
   1391	int err = 0;
   1392
   1393	BT_DBG("sock %p sk %p", sock, sk);
   1394
   1395	if (peer)
   1396		return -EOPNOTSUPP;
   1397
   1398	lock_sock(sk);
   1399
   1400	hdev = hci_hdev_from_sock(sk);
   1401	if (IS_ERR(hdev)) {
   1402		err = PTR_ERR(hdev);
   1403		goto done;
   1404	}
   1405
   1406	haddr->hci_family = AF_BLUETOOTH;
   1407	haddr->hci_dev    = hdev->id;
   1408	haddr->hci_channel= hci_pi(sk)->channel;
   1409	err = sizeof(*haddr);
   1410
   1411done:
   1412	release_sock(sk);
   1413	return err;
   1414}
   1415
   1416static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
   1417			  struct sk_buff *skb)
   1418{
   1419	__u8 mask = hci_pi(sk)->cmsg_mask;
   1420
   1421	if (mask & HCI_CMSG_DIR) {
   1422		int incoming = bt_cb(skb)->incoming;
   1423		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
   1424			 &incoming);
   1425	}
   1426
   1427	if (mask & HCI_CMSG_TSTAMP) {
   1428#ifdef CONFIG_COMPAT
   1429		struct old_timeval32 ctv;
   1430#endif
   1431		struct __kernel_old_timeval tv;
   1432		void *data;
   1433		int len;
   1434
   1435		skb_get_timestamp(skb, &tv);
   1436
   1437		data = &tv;
   1438		len = sizeof(tv);
   1439#ifdef CONFIG_COMPAT
   1440		if (!COMPAT_USE_64BIT_TIME &&
   1441		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
   1442			ctv.tv_sec = tv.tv_sec;
   1443			ctv.tv_usec = tv.tv_usec;
   1444			data = &ctv;
   1445			len = sizeof(ctv);
   1446		}
   1447#endif
   1448
   1449		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
   1450	}
   1451}
   1452
   1453static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
   1454			    size_t len, int flags)
   1455{
   1456	struct sock *sk = sock->sk;
   1457	struct sk_buff *skb;
   1458	int copied, err;
   1459	unsigned int skblen;
   1460
   1461	BT_DBG("sock %p, sk %p", sock, sk);
   1462
   1463	if (flags & MSG_OOB)
   1464		return -EOPNOTSUPP;
   1465
   1466	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
   1467		return -EOPNOTSUPP;
   1468
   1469	if (sk->sk_state == BT_CLOSED)
   1470		return 0;
   1471
   1472	skb = skb_recv_datagram(sk, flags, &err);
   1473	if (!skb)
   1474		return err;
   1475
   1476	skblen = skb->len;
   1477	copied = skb->len;
   1478	if (len < copied) {
   1479		msg->msg_flags |= MSG_TRUNC;
   1480		copied = len;
   1481	}
   1482
   1483	skb_reset_transport_header(skb);
   1484	err = skb_copy_datagram_msg(skb, 0, msg, copied);
   1485
   1486	switch (hci_pi(sk)->channel) {
   1487	case HCI_CHANNEL_RAW:
   1488		hci_sock_cmsg(sk, msg, skb);
   1489		break;
   1490	case HCI_CHANNEL_USER:
   1491	case HCI_CHANNEL_MONITOR:
   1492		sock_recv_timestamp(msg, sk, skb);
   1493		break;
   1494	default:
   1495		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
   1496			sock_recv_timestamp(msg, sk, skb);
   1497		break;
   1498	}
   1499
   1500	skb_free_datagram(sk, skb);
   1501
   1502	if (flags & MSG_TRUNC)
   1503		copied = skblen;
   1504
   1505	return err ? : copied;
   1506}
   1507
   1508static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
   1509			struct sk_buff *skb)
   1510{
   1511	u8 *cp;
   1512	struct mgmt_hdr *hdr;
   1513	u16 opcode, index, len;
   1514	struct hci_dev *hdev = NULL;
   1515	const struct hci_mgmt_handler *handler;
   1516	bool var_len, no_hdev;
   1517	int err;
   1518
   1519	BT_DBG("got %d bytes", skb->len);
   1520
   1521	if (skb->len < sizeof(*hdr))
   1522		return -EINVAL;
   1523
   1524	hdr = (void *)skb->data;
   1525	opcode = __le16_to_cpu(hdr->opcode);
   1526	index = __le16_to_cpu(hdr->index);
   1527	len = __le16_to_cpu(hdr->len);
   1528
   1529	if (len != skb->len - sizeof(*hdr)) {
   1530		err = -EINVAL;
   1531		goto done;
   1532	}
   1533
   1534	if (chan->channel == HCI_CHANNEL_CONTROL) {
   1535		struct sk_buff *cmd;
   1536
   1537		/* Send event to monitor */
   1538		cmd = create_monitor_ctrl_command(sk, index, opcode, len,
   1539						  skb->data + sizeof(*hdr));
   1540		if (cmd) {
   1541			hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
   1542					    HCI_SOCK_TRUSTED, NULL);
   1543			kfree_skb(cmd);
   1544		}
   1545	}
   1546
   1547	if (opcode >= chan->handler_count ||
   1548	    chan->handlers[opcode].func == NULL) {
   1549		BT_DBG("Unknown op %u", opcode);
   1550		err = mgmt_cmd_status(sk, index, opcode,
   1551				      MGMT_STATUS_UNKNOWN_COMMAND);
   1552		goto done;
   1553	}
   1554
   1555	handler = &chan->handlers[opcode];
   1556
   1557	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
   1558	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
   1559		err = mgmt_cmd_status(sk, index, opcode,
   1560				      MGMT_STATUS_PERMISSION_DENIED);
   1561		goto done;
   1562	}
   1563
   1564	if (index != MGMT_INDEX_NONE) {
   1565		hdev = hci_dev_get(index);
   1566		if (!hdev) {
   1567			err = mgmt_cmd_status(sk, index, opcode,
   1568					      MGMT_STATUS_INVALID_INDEX);
   1569			goto done;
   1570		}
   1571
   1572		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
   1573		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
   1574		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
   1575			err = mgmt_cmd_status(sk, index, opcode,
   1576					      MGMT_STATUS_INVALID_INDEX);
   1577			goto done;
   1578		}
   1579
   1580		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
   1581		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
   1582			err = mgmt_cmd_status(sk, index, opcode,
   1583					      MGMT_STATUS_INVALID_INDEX);
   1584			goto done;
   1585		}
   1586	}
   1587
   1588	if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
   1589		no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
   1590		if (no_hdev != !hdev) {
   1591			err = mgmt_cmd_status(sk, index, opcode,
   1592					      MGMT_STATUS_INVALID_INDEX);
   1593			goto done;
   1594		}
   1595	}
   1596
   1597	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
   1598	if ((var_len && len < handler->data_len) ||
   1599	    (!var_len && len != handler->data_len)) {
   1600		err = mgmt_cmd_status(sk, index, opcode,
   1601				      MGMT_STATUS_INVALID_PARAMS);
   1602		goto done;
   1603	}
   1604
   1605	if (hdev && chan->hdev_init)
   1606		chan->hdev_init(sk, hdev);
   1607
   1608	cp = skb->data + sizeof(*hdr);
   1609
   1610	err = handler->func(sk, hdev, cp, len);
   1611	if (err < 0)
   1612		goto done;
   1613
   1614	err = skb->len;
   1615
   1616done:
   1617	if (hdev)
   1618		hci_dev_put(hdev);
   1619
   1620	return err;
   1621}
   1622
   1623static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
   1624			     unsigned int flags)
   1625{
   1626	struct hci_mon_hdr *hdr;
   1627	struct hci_dev *hdev;
   1628	u16 index;
   1629	int err;
   1630
   1631	/* The logging frame consists at minimum of the standard header,
   1632	 * the priority byte, the ident length byte and at least one string
   1633	 * terminator NUL byte. Anything shorter are invalid packets.
   1634	 */
   1635	if (skb->len < sizeof(*hdr) + 3)
   1636		return -EINVAL;
   1637
   1638	hdr = (void *)skb->data;
   1639
   1640	if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
   1641		return -EINVAL;
   1642
   1643	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
   1644		__u8 priority = skb->data[sizeof(*hdr)];
   1645		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
   1646
   1647		/* Only the priorities 0-7 are valid and with that any other
   1648		 * value results in an invalid packet.
   1649		 *
   1650		 * The priority byte is followed by an ident length byte and
   1651		 * the NUL terminated ident string. Check that the ident
   1652		 * length is not overflowing the packet and also that the
   1653		 * ident string itself is NUL terminated. In case the ident
   1654		 * length is zero, the length value actually doubles as NUL
   1655		 * terminator identifier.
   1656		 *
   1657		 * The message follows the ident string (if present) and
   1658		 * must be NUL terminated. Otherwise it is not a valid packet.
   1659		 */
   1660		if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
   1661		    ident_len > skb->len - sizeof(*hdr) - 3 ||
   1662		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
   1663			return -EINVAL;
   1664	} else {
   1665		return -EINVAL;
   1666	}
   1667
   1668	index = __le16_to_cpu(hdr->index);
   1669
   1670	if (index != MGMT_INDEX_NONE) {
   1671		hdev = hci_dev_get(index);
   1672		if (!hdev)
   1673			return -ENODEV;
   1674	} else {
   1675		hdev = NULL;
   1676	}
   1677
   1678	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
   1679
   1680	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
   1681	err = skb->len;
   1682
   1683	if (hdev)
   1684		hci_dev_put(hdev);
   1685
   1686	return err;
   1687}
   1688
   1689static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
   1690			    size_t len)
   1691{
   1692	struct sock *sk = sock->sk;
   1693	struct hci_mgmt_chan *chan;
   1694	struct hci_dev *hdev;
   1695	struct sk_buff *skb;
   1696	int err;
   1697	const unsigned int flags = msg->msg_flags;
   1698
   1699	BT_DBG("sock %p sk %p", sock, sk);
   1700
   1701	if (flags & MSG_OOB)
   1702		return -EOPNOTSUPP;
   1703
   1704	if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
   1705		return -EINVAL;
   1706
   1707	if (len < 4 || len > hci_pi(sk)->mtu)
   1708		return -EINVAL;
   1709
   1710	skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
   1711	if (IS_ERR(skb))
   1712		return PTR_ERR(skb);
   1713
   1714	lock_sock(sk);
   1715
   1716	switch (hci_pi(sk)->channel) {
   1717	case HCI_CHANNEL_RAW:
   1718	case HCI_CHANNEL_USER:
   1719		break;
   1720	case HCI_CHANNEL_MONITOR:
   1721		err = -EOPNOTSUPP;
   1722		goto drop;
   1723	case HCI_CHANNEL_LOGGING:
   1724		err = hci_logging_frame(sk, skb, flags);
   1725		goto drop;
   1726	default:
   1727		mutex_lock(&mgmt_chan_list_lock);
   1728		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
   1729		if (chan)
   1730			err = hci_mgmt_cmd(chan, sk, skb);
   1731		else
   1732			err = -EINVAL;
   1733
   1734		mutex_unlock(&mgmt_chan_list_lock);
   1735		goto drop;
   1736	}
   1737
   1738	hdev = hci_hdev_from_sock(sk);
   1739	if (IS_ERR(hdev)) {
   1740		err = PTR_ERR(hdev);
   1741		goto drop;
   1742	}
   1743
   1744	if (!test_bit(HCI_UP, &hdev->flags)) {
   1745		err = -ENETDOWN;
   1746		goto drop;
   1747	}
   1748
   1749	hci_skb_pkt_type(skb) = skb->data[0];
   1750	skb_pull(skb, 1);
   1751
   1752	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
   1753		/* No permission check is needed for user channel
   1754		 * since that gets enforced when binding the socket.
   1755		 *
   1756		 * However check that the packet type is valid.
   1757		 */
   1758		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
   1759		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
   1760		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
   1761		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
   1762			err = -EINVAL;
   1763			goto drop;
   1764		}
   1765
   1766		skb_queue_tail(&hdev->raw_q, skb);
   1767		queue_work(hdev->workqueue, &hdev->tx_work);
   1768	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
   1769		u16 opcode = get_unaligned_le16(skb->data);
   1770		u16 ogf = hci_opcode_ogf(opcode);
   1771		u16 ocf = hci_opcode_ocf(opcode);
   1772
   1773		if (((ogf > HCI_SFLT_MAX_OGF) ||
   1774		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
   1775				   &hci_sec_filter.ocf_mask[ogf])) &&
   1776		    !capable(CAP_NET_RAW)) {
   1777			err = -EPERM;
   1778			goto drop;
   1779		}
   1780
   1781		/* Since the opcode has already been extracted here, store
   1782		 * a copy of the value for later use by the drivers.
   1783		 */
   1784		hci_skb_opcode(skb) = opcode;
   1785
   1786		if (ogf == 0x3f) {
   1787			skb_queue_tail(&hdev->raw_q, skb);
   1788			queue_work(hdev->workqueue, &hdev->tx_work);
   1789		} else {
   1790			/* Stand-alone HCI commands must be flagged as
   1791			 * single-command requests.
   1792			 */
   1793			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
   1794
   1795			skb_queue_tail(&hdev->cmd_q, skb);
   1796			queue_work(hdev->workqueue, &hdev->cmd_work);
   1797		}
   1798	} else {
   1799		if (!capable(CAP_NET_RAW)) {
   1800			err = -EPERM;
   1801			goto drop;
   1802		}
   1803
   1804		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
   1805		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
   1806		    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
   1807			err = -EINVAL;
   1808			goto drop;
   1809		}
   1810
   1811		skb_queue_tail(&hdev->raw_q, skb);
   1812		queue_work(hdev->workqueue, &hdev->tx_work);
   1813	}
   1814
   1815	err = len;
   1816
   1817done:
   1818	release_sock(sk);
   1819	return err;
   1820
   1821drop:
   1822	kfree_skb(skb);
   1823	goto done;
   1824}
   1825
   1826static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
   1827				   sockptr_t optval, unsigned int len)
   1828{
   1829	struct hci_ufilter uf = { .opcode = 0 };
   1830	struct sock *sk = sock->sk;
   1831	int err = 0, opt = 0;
   1832
   1833	BT_DBG("sk %p, opt %d", sk, optname);
   1834
   1835	lock_sock(sk);
   1836
   1837	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
   1838		err = -EBADFD;
   1839		goto done;
   1840	}
   1841
   1842	switch (optname) {
   1843	case HCI_DATA_DIR:
   1844		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
   1845			err = -EFAULT;
   1846			break;
   1847		}
   1848
   1849		if (opt)
   1850			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
   1851		else
   1852			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
   1853		break;
   1854
   1855	case HCI_TIME_STAMP:
   1856		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
   1857			err = -EFAULT;
   1858			break;
   1859		}
   1860
   1861		if (opt)
   1862			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
   1863		else
   1864			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
   1865		break;
   1866
   1867	case HCI_FILTER:
   1868		{
   1869			struct hci_filter *f = &hci_pi(sk)->filter;
   1870
   1871			uf.type_mask = f->type_mask;
   1872			uf.opcode    = f->opcode;
   1873			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
   1874			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
   1875		}
   1876
   1877		len = min_t(unsigned int, len, sizeof(uf));
   1878		if (copy_from_sockptr(&uf, optval, len)) {
   1879			err = -EFAULT;
   1880			break;
   1881		}
   1882
   1883		if (!capable(CAP_NET_RAW)) {
   1884			uf.type_mask &= hci_sec_filter.type_mask;
   1885			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
   1886			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
   1887		}
   1888
   1889		{
   1890			struct hci_filter *f = &hci_pi(sk)->filter;
   1891
   1892			f->type_mask = uf.type_mask;
   1893			f->opcode    = uf.opcode;
   1894			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
   1895			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
   1896		}
   1897		break;
   1898
   1899	default:
   1900		err = -ENOPROTOOPT;
   1901		break;
   1902	}
   1903
   1904done:
   1905	release_sock(sk);
   1906	return err;
   1907}
   1908
   1909static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
   1910			       sockptr_t optval, unsigned int len)
   1911{
   1912	struct sock *sk = sock->sk;
   1913	int err = 0;
   1914	u16 opt;
   1915
   1916	BT_DBG("sk %p, opt %d", sk, optname);
   1917
   1918	if (level == SOL_HCI)
   1919		return hci_sock_setsockopt_old(sock, level, optname, optval,
   1920					       len);
   1921
   1922	if (level != SOL_BLUETOOTH)
   1923		return -ENOPROTOOPT;
   1924
   1925	lock_sock(sk);
   1926
   1927	switch (optname) {
   1928	case BT_SNDMTU:
   1929	case BT_RCVMTU:
   1930		switch (hci_pi(sk)->channel) {
   1931		/* Don't allow changing MTU for channels that are meant for HCI
   1932		 * traffic only.
   1933		 */
   1934		case HCI_CHANNEL_RAW:
   1935		case HCI_CHANNEL_USER:
   1936			err = -ENOPROTOOPT;
   1937			goto done;
   1938		}
   1939
   1940		if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
   1941			err = -EFAULT;
   1942			break;
   1943		}
   1944
   1945		hci_pi(sk)->mtu = opt;
   1946		break;
   1947
   1948	default:
   1949		err = -ENOPROTOOPT;
   1950		break;
   1951	}
   1952
   1953done:
   1954	release_sock(sk);
   1955	return err;
   1956}
   1957
   1958static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
   1959				   char __user *optval, int __user *optlen)
   1960{
   1961	struct hci_ufilter uf;
   1962	struct sock *sk = sock->sk;
   1963	int len, opt, err = 0;
   1964
   1965	BT_DBG("sk %p, opt %d", sk, optname);
   1966
   1967	if (get_user(len, optlen))
   1968		return -EFAULT;
   1969
   1970	lock_sock(sk);
   1971
   1972	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
   1973		err = -EBADFD;
   1974		goto done;
   1975	}
   1976
   1977	switch (optname) {
   1978	case HCI_DATA_DIR:
   1979		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
   1980			opt = 1;
   1981		else
   1982			opt = 0;
   1983
   1984		if (put_user(opt, optval))
   1985			err = -EFAULT;
   1986		break;
   1987
   1988	case HCI_TIME_STAMP:
   1989		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
   1990			opt = 1;
   1991		else
   1992			opt = 0;
   1993
   1994		if (put_user(opt, optval))
   1995			err = -EFAULT;
   1996		break;
   1997
   1998	case HCI_FILTER:
   1999		{
   2000			struct hci_filter *f = &hci_pi(sk)->filter;
   2001
   2002			memset(&uf, 0, sizeof(uf));
   2003			uf.type_mask = f->type_mask;
   2004			uf.opcode    = f->opcode;
   2005			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
   2006			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
   2007		}
   2008
   2009		len = min_t(unsigned int, len, sizeof(uf));
   2010		if (copy_to_user(optval, &uf, len))
   2011			err = -EFAULT;
   2012		break;
   2013
   2014	default:
   2015		err = -ENOPROTOOPT;
   2016		break;
   2017	}
   2018
   2019done:
   2020	release_sock(sk);
   2021	return err;
   2022}
   2023
   2024static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
   2025			       char __user *optval, int __user *optlen)
   2026{
   2027	struct sock *sk = sock->sk;
   2028	int err = 0;
   2029
   2030	BT_DBG("sk %p, opt %d", sk, optname);
   2031
   2032	if (level == SOL_HCI)
   2033		return hci_sock_getsockopt_old(sock, level, optname, optval,
   2034					       optlen);
   2035
   2036	if (level != SOL_BLUETOOTH)
   2037		return -ENOPROTOOPT;
   2038
   2039	lock_sock(sk);
   2040
   2041	switch (optname) {
   2042	case BT_SNDMTU:
   2043	case BT_RCVMTU:
   2044		if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
   2045			err = -EFAULT;
   2046		break;
   2047
   2048	default:
   2049		err = -ENOPROTOOPT;
   2050		break;
   2051	}
   2052
   2053	release_sock(sk);
   2054	return err;
   2055}
   2056
   2057static void hci_sock_destruct(struct sock *sk)
   2058{
   2059	skb_queue_purge(&sk->sk_receive_queue);
   2060	skb_queue_purge(&sk->sk_write_queue);
   2061}
   2062
   2063static const struct proto_ops hci_sock_ops = {
   2064	.family		= PF_BLUETOOTH,
   2065	.owner		= THIS_MODULE,
   2066	.release	= hci_sock_release,
   2067	.bind		= hci_sock_bind,
   2068	.getname	= hci_sock_getname,
   2069	.sendmsg	= hci_sock_sendmsg,
   2070	.recvmsg	= hci_sock_recvmsg,
   2071	.ioctl		= hci_sock_ioctl,
   2072#ifdef CONFIG_COMPAT
   2073	.compat_ioctl	= hci_sock_compat_ioctl,
   2074#endif
   2075	.poll		= datagram_poll,
   2076	.listen		= sock_no_listen,
   2077	.shutdown	= sock_no_shutdown,
   2078	.setsockopt	= hci_sock_setsockopt,
   2079	.getsockopt	= hci_sock_getsockopt,
   2080	.connect	= sock_no_connect,
   2081	.socketpair	= sock_no_socketpair,
   2082	.accept		= sock_no_accept,
   2083	.mmap		= sock_no_mmap
   2084};
   2085
   2086static struct proto hci_sk_proto = {
   2087	.name		= "HCI",
   2088	.owner		= THIS_MODULE,
   2089	.obj_size	= sizeof(struct hci_pinfo)
   2090};
   2091
   2092static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
   2093			   int kern)
   2094{
   2095	struct sock *sk;
   2096
   2097	BT_DBG("sock %p", sock);
   2098
   2099	if (sock->type != SOCK_RAW)
   2100		return -ESOCKTNOSUPPORT;
   2101
   2102	sock->ops = &hci_sock_ops;
   2103
   2104	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
   2105	if (!sk)
   2106		return -ENOMEM;
   2107
   2108	sock_init_data(sock, sk);
   2109
   2110	sock_reset_flag(sk, SOCK_ZAPPED);
   2111
   2112	sk->sk_protocol = protocol;
   2113
   2114	sock->state = SS_UNCONNECTED;
   2115	sk->sk_state = BT_OPEN;
   2116	sk->sk_destruct = hci_sock_destruct;
   2117
   2118	bt_sock_link(&hci_sk_list, sk);
   2119	return 0;
   2120}
   2121
   2122static const struct net_proto_family hci_sock_family_ops = {
   2123	.family	= PF_BLUETOOTH,
   2124	.owner	= THIS_MODULE,
   2125	.create	= hci_sock_create,
   2126};
   2127
   2128int __init hci_sock_init(void)
   2129{
   2130	int err;
   2131
   2132	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
   2133
   2134	err = proto_register(&hci_sk_proto, 0);
   2135	if (err < 0)
   2136		return err;
   2137
   2138	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
   2139	if (err < 0) {
   2140		BT_ERR("HCI socket registration failed");
   2141		goto error;
   2142	}
   2143
   2144	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
   2145	if (err < 0) {
   2146		BT_ERR("Failed to create HCI proc file");
   2147		bt_sock_unregister(BTPROTO_HCI);
   2148		goto error;
   2149	}
   2150
   2151	BT_INFO("HCI socket layer initialized");
   2152
   2153	return 0;
   2154
   2155error:
   2156	proto_unregister(&hci_sk_proto);
   2157	return err;
   2158}
   2159
   2160void hci_sock_cleanup(void)
   2161{
   2162	bt_procfs_cleanup(&init_net, "hci");
   2163	bt_sock_unregister(BTPROTO_HCI);
   2164	proto_unregister(&hci_sk_proto);
   2165}