cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hci_conn.c (46908B)


      1/*
      2   BlueZ - Bluetooth protocol stack for Linux
      3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
      4
      5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
      6
      7   This program is free software; you can redistribute it and/or modify
      8   it under the terms of the GNU General Public License version 2 as
      9   published by the Free Software Foundation;
     10
     11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
     14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
     15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
     16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19
     20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
     21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
     22   SOFTWARE IS DISCLAIMED.
     23*/
     24
     25/* Bluetooth HCI connection handling. */
     26
     27#include <linux/export.h>
     28#include <linux/debugfs.h>
     29
     30#include <net/bluetooth/bluetooth.h>
     31#include <net/bluetooth/hci_core.h>
     32#include <net/bluetooth/l2cap.h>
     33
     34#include "hci_request.h"
     35#include "smp.h"
     36#include "a2mp.h"
     37
     38struct sco_param {
     39	u16 pkt_type;
     40	u16 max_latency;
     41	u8  retrans_effort;
     42};
     43
     44static const struct sco_param esco_param_cvsd[] = {
     45	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
     46	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
     47	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
     48	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
     49	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
     50};
     51
     52static const struct sco_param sco_param_cvsd[] = {
     53	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
     54	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
     55};
     56
     57static const struct sco_param esco_param_msbc[] = {
     58	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
     59	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
     60};
     61
     62/* This function requires the caller holds hdev->lock */
     63static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
     64{
     65	struct hci_conn_params *params;
     66	struct hci_dev *hdev = conn->hdev;
     67	struct smp_irk *irk;
     68	bdaddr_t *bdaddr;
     69	u8 bdaddr_type;
     70
     71	bdaddr = &conn->dst;
     72	bdaddr_type = conn->dst_type;
     73
     74	/* Check if we need to convert to identity address */
     75	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
     76	if (irk) {
     77		bdaddr = &irk->bdaddr;
     78		bdaddr_type = irk->addr_type;
     79	}
     80
     81	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
     82					   bdaddr_type);
     83	if (!params || !params->explicit_connect)
     84		return;
     85
     86	/* The connection attempt was doing scan for new RPA, and is
     87	 * in scan phase. If params are not associated with any other
     88	 * autoconnect action, remove them completely. If they are, just unmark
     89	 * them as waiting for connection, by clearing explicit_connect field.
     90	 */
     91	params->explicit_connect = false;
     92
     93	list_del_init(&params->action);
     94
     95	switch (params->auto_connect) {
     96	case HCI_AUTO_CONN_EXPLICIT:
     97		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
     98		/* return instead of break to avoid duplicate scan update */
     99		return;
    100	case HCI_AUTO_CONN_DIRECT:
    101	case HCI_AUTO_CONN_ALWAYS:
    102		list_add(&params->action, &hdev->pend_le_conns);
    103		break;
    104	case HCI_AUTO_CONN_REPORT:
    105		list_add(&params->action, &hdev->pend_le_reports);
    106		break;
    107	default:
    108		break;
    109	}
    110
    111	hci_update_passive_scan(hdev);
    112}
    113
    114static void hci_conn_cleanup(struct hci_conn *conn)
    115{
    116	struct hci_dev *hdev = conn->hdev;
    117
    118	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
    119		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
    120
    121	hci_chan_list_flush(conn);
    122
    123	hci_conn_hash_del(hdev, conn);
    124
    125	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
    126		switch (conn->setting & SCO_AIRMODE_MASK) {
    127		case SCO_AIRMODE_CVSD:
    128		case SCO_AIRMODE_TRANSP:
    129			if (hdev->notify)
    130				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
    131			break;
    132		}
    133	} else {
    134		if (hdev->notify)
    135			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
    136	}
    137
    138	hci_conn_del_sysfs(conn);
    139
    140	debugfs_remove_recursive(conn->debugfs);
    141
    142	hci_dev_put(hdev);
    143
    144	hci_conn_put(conn);
    145}
    146
    147static void le_scan_cleanup(struct work_struct *work)
    148{
    149	struct hci_conn *conn = container_of(work, struct hci_conn,
    150					     le_scan_cleanup);
    151	struct hci_dev *hdev = conn->hdev;
    152	struct hci_conn *c = NULL;
    153
    154	BT_DBG("%s hcon %p", hdev->name, conn);
    155
    156	hci_dev_lock(hdev);
    157
    158	/* Check that the hci_conn is still around */
    159	rcu_read_lock();
    160	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
    161		if (c == conn)
    162			break;
    163	}
    164	rcu_read_unlock();
    165
    166	if (c == conn) {
    167		hci_connect_le_scan_cleanup(conn);
    168		hci_conn_cleanup(conn);
    169	}
    170
    171	hci_dev_unlock(hdev);
    172	hci_dev_put(hdev);
    173	hci_conn_put(conn);
    174}
    175
    176static void hci_connect_le_scan_remove(struct hci_conn *conn)
    177{
    178	BT_DBG("%s hcon %p", conn->hdev->name, conn);
    179
    180	/* We can't call hci_conn_del/hci_conn_cleanup here since that
    181	 * could deadlock with another hci_conn_del() call that's holding
    182	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
    183	 * Instead, grab temporary extra references to the hci_dev and
    184	 * hci_conn and perform the necessary cleanup in a separate work
    185	 * callback.
    186	 */
    187
    188	hci_dev_hold(conn->hdev);
    189	hci_conn_get(conn);
    190
    191	/* Even though we hold a reference to the hdev, many other
    192	 * things might get cleaned up meanwhile, including the hdev's
    193	 * own workqueue, so we can't use that for scheduling.
    194	 */
    195	schedule_work(&conn->le_scan_cleanup);
    196}
    197
    198static void hci_acl_create_connection(struct hci_conn *conn)
    199{
    200	struct hci_dev *hdev = conn->hdev;
    201	struct inquiry_entry *ie;
    202	struct hci_cp_create_conn cp;
    203
    204	BT_DBG("hcon %p", conn);
    205
    206	/* Many controllers disallow HCI Create Connection while it is doing
    207	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
    208	 * Connection. This may cause the MGMT discovering state to become false
    209	 * without user space's request but it is okay since the MGMT Discovery
    210	 * APIs do not promise that discovery should be done forever. Instead,
    211	 * the user space monitors the status of MGMT discovering and it may
    212	 * request for discovery again when this flag becomes false.
    213	 */
    214	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
    215		/* Put this connection to "pending" state so that it will be
    216		 * executed after the inquiry cancel command complete event.
    217		 */
    218		conn->state = BT_CONNECT2;
    219		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
    220		return;
    221	}
    222
    223	conn->state = BT_CONNECT;
    224	conn->out = true;
    225	conn->role = HCI_ROLE_MASTER;
    226
    227	conn->attempt++;
    228
    229	conn->link_policy = hdev->link_policy;
    230
    231	memset(&cp, 0, sizeof(cp));
    232	bacpy(&cp.bdaddr, &conn->dst);
    233	cp.pscan_rep_mode = 0x02;
    234
    235	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
    236	if (ie) {
    237		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
    238			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
    239			cp.pscan_mode     = ie->data.pscan_mode;
    240			cp.clock_offset   = ie->data.clock_offset |
    241					    cpu_to_le16(0x8000);
    242		}
    243
    244		memcpy(conn->dev_class, ie->data.dev_class, 3);
    245	}
    246
    247	cp.pkt_type = cpu_to_le16(conn->pkt_type);
    248	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
    249		cp.role_switch = 0x01;
    250	else
    251		cp.role_switch = 0x00;
    252
    253	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
    254}
    255
    256int hci_disconnect(struct hci_conn *conn, __u8 reason)
    257{
    258	BT_DBG("hcon %p", conn);
    259
    260	/* When we are central of an established connection and it enters
    261	 * the disconnect timeout, then go ahead and try to read the
    262	 * current clock offset.  Processing of the result is done
    263	 * within the event handling and hci_clock_offset_evt function.
    264	 */
    265	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
    266	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
    267		struct hci_dev *hdev = conn->hdev;
    268		struct hci_cp_read_clock_offset clkoff_cp;
    269
    270		clkoff_cp.handle = cpu_to_le16(conn->handle);
    271		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
    272			     &clkoff_cp);
    273	}
    274
    275	return hci_abort_conn(conn, reason);
    276}
    277
    278static void hci_add_sco(struct hci_conn *conn, __u16 handle)
    279{
    280	struct hci_dev *hdev = conn->hdev;
    281	struct hci_cp_add_sco cp;
    282
    283	BT_DBG("hcon %p", conn);
    284
    285	conn->state = BT_CONNECT;
    286	conn->out = true;
    287
    288	conn->attempt++;
    289
    290	cp.handle   = cpu_to_le16(handle);
    291	cp.pkt_type = cpu_to_le16(conn->pkt_type);
    292
    293	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
    294}
    295
    296static bool find_next_esco_param(struct hci_conn *conn,
    297				 const struct sco_param *esco_param, int size)
    298{
    299	for (; conn->attempt <= size; conn->attempt++) {
    300		if (lmp_esco_2m_capable(conn->link) ||
    301		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
    302			break;
    303		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
    304		       conn, conn->attempt);
    305	}
    306
    307	return conn->attempt <= size;
    308}
    309
    310static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
    311{
    312	struct hci_dev *hdev = conn->hdev;
    313	struct hci_cp_enhanced_setup_sync_conn cp;
    314	const struct sco_param *param;
    315
    316	bt_dev_dbg(hdev, "hcon %p", conn);
    317
    318	/* for offload use case, codec needs to configured before opening SCO */
    319	if (conn->codec.data_path)
    320		hci_req_configure_datapath(hdev, &conn->codec);
    321
    322	conn->state = BT_CONNECT;
    323	conn->out = true;
    324
    325	conn->attempt++;
    326
    327	memset(&cp, 0x00, sizeof(cp));
    328
    329	cp.handle   = cpu_to_le16(handle);
    330
    331	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
    332	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
    333
    334	switch (conn->codec.id) {
    335	case BT_CODEC_MSBC:
    336		if (!find_next_esco_param(conn, esco_param_msbc,
    337					  ARRAY_SIZE(esco_param_msbc)))
    338			return false;
    339
    340		param = &esco_param_msbc[conn->attempt - 1];
    341		cp.tx_coding_format.id = 0x05;
    342		cp.rx_coding_format.id = 0x05;
    343		cp.tx_codec_frame_size = __cpu_to_le16(60);
    344		cp.rx_codec_frame_size = __cpu_to_le16(60);
    345		cp.in_bandwidth = __cpu_to_le32(32000);
    346		cp.out_bandwidth = __cpu_to_le32(32000);
    347		cp.in_coding_format.id = 0x04;
    348		cp.out_coding_format.id = 0x04;
    349		cp.in_coded_data_size = __cpu_to_le16(16);
    350		cp.out_coded_data_size = __cpu_to_le16(16);
    351		cp.in_pcm_data_format = 2;
    352		cp.out_pcm_data_format = 2;
    353		cp.in_pcm_sample_payload_msb_pos = 0;
    354		cp.out_pcm_sample_payload_msb_pos = 0;
    355		cp.in_data_path = conn->codec.data_path;
    356		cp.out_data_path = conn->codec.data_path;
    357		cp.in_transport_unit_size = 1;
    358		cp.out_transport_unit_size = 1;
    359		break;
    360
    361	case BT_CODEC_TRANSPARENT:
    362		if (!find_next_esco_param(conn, esco_param_msbc,
    363					  ARRAY_SIZE(esco_param_msbc)))
    364			return false;
    365		param = &esco_param_msbc[conn->attempt - 1];
    366		cp.tx_coding_format.id = 0x03;
    367		cp.rx_coding_format.id = 0x03;
    368		cp.tx_codec_frame_size = __cpu_to_le16(60);
    369		cp.rx_codec_frame_size = __cpu_to_le16(60);
    370		cp.in_bandwidth = __cpu_to_le32(0x1f40);
    371		cp.out_bandwidth = __cpu_to_le32(0x1f40);
    372		cp.in_coding_format.id = 0x03;
    373		cp.out_coding_format.id = 0x03;
    374		cp.in_coded_data_size = __cpu_to_le16(16);
    375		cp.out_coded_data_size = __cpu_to_le16(16);
    376		cp.in_pcm_data_format = 2;
    377		cp.out_pcm_data_format = 2;
    378		cp.in_pcm_sample_payload_msb_pos = 0;
    379		cp.out_pcm_sample_payload_msb_pos = 0;
    380		cp.in_data_path = conn->codec.data_path;
    381		cp.out_data_path = conn->codec.data_path;
    382		cp.in_transport_unit_size = 1;
    383		cp.out_transport_unit_size = 1;
    384		break;
    385
    386	case BT_CODEC_CVSD:
    387		if (lmp_esco_capable(conn->link)) {
    388			if (!find_next_esco_param(conn, esco_param_cvsd,
    389						  ARRAY_SIZE(esco_param_cvsd)))
    390				return false;
    391			param = &esco_param_cvsd[conn->attempt - 1];
    392		} else {
    393			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
    394				return false;
    395			param = &sco_param_cvsd[conn->attempt - 1];
    396		}
    397		cp.tx_coding_format.id = 2;
    398		cp.rx_coding_format.id = 2;
    399		cp.tx_codec_frame_size = __cpu_to_le16(60);
    400		cp.rx_codec_frame_size = __cpu_to_le16(60);
    401		cp.in_bandwidth = __cpu_to_le32(16000);
    402		cp.out_bandwidth = __cpu_to_le32(16000);
    403		cp.in_coding_format.id = 4;
    404		cp.out_coding_format.id = 4;
    405		cp.in_coded_data_size = __cpu_to_le16(16);
    406		cp.out_coded_data_size = __cpu_to_le16(16);
    407		cp.in_pcm_data_format = 2;
    408		cp.out_pcm_data_format = 2;
    409		cp.in_pcm_sample_payload_msb_pos = 0;
    410		cp.out_pcm_sample_payload_msb_pos = 0;
    411		cp.in_data_path = conn->codec.data_path;
    412		cp.out_data_path = conn->codec.data_path;
    413		cp.in_transport_unit_size = 16;
    414		cp.out_transport_unit_size = 16;
    415		break;
    416	default:
    417		return false;
    418	}
    419
    420	cp.retrans_effort = param->retrans_effort;
    421	cp.pkt_type = __cpu_to_le16(param->pkt_type);
    422	cp.max_latency = __cpu_to_le16(param->max_latency);
    423
    424	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
    425		return false;
    426
    427	return true;
    428}
    429
    430static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
    431{
    432	struct hci_dev *hdev = conn->hdev;
    433	struct hci_cp_setup_sync_conn cp;
    434	const struct sco_param *param;
    435
    436	bt_dev_dbg(hdev, "hcon %p", conn);
    437
    438	conn->state = BT_CONNECT;
    439	conn->out = true;
    440
    441	conn->attempt++;
    442
    443	cp.handle   = cpu_to_le16(handle);
    444
    445	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
    446	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
    447	cp.voice_setting  = cpu_to_le16(conn->setting);
    448
    449	switch (conn->setting & SCO_AIRMODE_MASK) {
    450	case SCO_AIRMODE_TRANSP:
    451		if (!find_next_esco_param(conn, esco_param_msbc,
    452					  ARRAY_SIZE(esco_param_msbc)))
    453			return false;
    454		param = &esco_param_msbc[conn->attempt - 1];
    455		break;
    456	case SCO_AIRMODE_CVSD:
    457		if (lmp_esco_capable(conn->link)) {
    458			if (!find_next_esco_param(conn, esco_param_cvsd,
    459						  ARRAY_SIZE(esco_param_cvsd)))
    460				return false;
    461			param = &esco_param_cvsd[conn->attempt - 1];
    462		} else {
    463			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
    464				return false;
    465			param = &sco_param_cvsd[conn->attempt - 1];
    466		}
    467		break;
    468	default:
    469		return false;
    470	}
    471
    472	cp.retrans_effort = param->retrans_effort;
    473	cp.pkt_type = __cpu_to_le16(param->pkt_type);
    474	cp.max_latency = __cpu_to_le16(param->max_latency);
    475
    476	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
    477		return false;
    478
    479	return true;
    480}
    481
    482bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
    483{
    484	if (enhanced_sync_conn_capable(conn->hdev))
    485		return hci_enhanced_setup_sync_conn(conn, handle);
    486
    487	return hci_setup_sync_conn(conn, handle);
    488}
    489
    490u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
    491		      u16 to_multiplier)
    492{
    493	struct hci_dev *hdev = conn->hdev;
    494	struct hci_conn_params *params;
    495	struct hci_cp_le_conn_update cp;
    496
    497	hci_dev_lock(hdev);
    498
    499	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
    500	if (params) {
    501		params->conn_min_interval = min;
    502		params->conn_max_interval = max;
    503		params->conn_latency = latency;
    504		params->supervision_timeout = to_multiplier;
    505	}
    506
    507	hci_dev_unlock(hdev);
    508
    509	memset(&cp, 0, sizeof(cp));
    510	cp.handle		= cpu_to_le16(conn->handle);
    511	cp.conn_interval_min	= cpu_to_le16(min);
    512	cp.conn_interval_max	= cpu_to_le16(max);
    513	cp.conn_latency		= cpu_to_le16(latency);
    514	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
    515	cp.min_ce_len		= cpu_to_le16(0x0000);
    516	cp.max_ce_len		= cpu_to_le16(0x0000);
    517
    518	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
    519
    520	if (params)
    521		return 0x01;
    522
    523	return 0x00;
    524}
    525
    526void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
    527		      __u8 ltk[16], __u8 key_size)
    528{
    529	struct hci_dev *hdev = conn->hdev;
    530	struct hci_cp_le_start_enc cp;
    531
    532	BT_DBG("hcon %p", conn);
    533
    534	memset(&cp, 0, sizeof(cp));
    535
    536	cp.handle = cpu_to_le16(conn->handle);
    537	cp.rand = rand;
    538	cp.ediv = ediv;
    539	memcpy(cp.ltk, ltk, key_size);
    540
    541	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
    542}
    543
    544/* Device _must_ be locked */
    545void hci_sco_setup(struct hci_conn *conn, __u8 status)
    546{
    547	struct hci_conn *sco = conn->link;
    548
    549	if (!sco)
    550		return;
    551
    552	BT_DBG("hcon %p", conn);
    553
    554	if (!status) {
    555		if (lmp_esco_capable(conn->hdev))
    556			hci_setup_sync(sco, conn->handle);
    557		else
    558			hci_add_sco(sco, conn->handle);
    559	} else {
    560		hci_connect_cfm(sco, status);
    561		hci_conn_del(sco);
    562	}
    563}
    564
    565static void hci_conn_timeout(struct work_struct *work)
    566{
    567	struct hci_conn *conn = container_of(work, struct hci_conn,
    568					     disc_work.work);
    569	int refcnt = atomic_read(&conn->refcnt);
    570
    571	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
    572
    573	WARN_ON(refcnt < 0);
    574
    575	/* FIXME: It was observed that in pairing failed scenario, refcnt
    576	 * drops below 0. Probably this is because l2cap_conn_del calls
    577	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
    578	 * dropped. After that loop hci_chan_del is called which also drops
    579	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
    580	 * otherwise drop it.
    581	 */
    582	if (refcnt > 0)
    583		return;
    584
    585	/* LE connections in scanning state need special handling */
    586	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
    587	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
    588		hci_connect_le_scan_remove(conn);
    589		return;
    590	}
    591
    592	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
    593}
    594
    595/* Enter sniff mode */
    596static void hci_conn_idle(struct work_struct *work)
    597{
    598	struct hci_conn *conn = container_of(work, struct hci_conn,
    599					     idle_work.work);
    600	struct hci_dev *hdev = conn->hdev;
    601
    602	BT_DBG("hcon %p mode %d", conn, conn->mode);
    603
    604	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
    605		return;
    606
    607	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
    608		return;
    609
    610	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
    611		struct hci_cp_sniff_subrate cp;
    612		cp.handle             = cpu_to_le16(conn->handle);
    613		cp.max_latency        = cpu_to_le16(0);
    614		cp.min_remote_timeout = cpu_to_le16(0);
    615		cp.min_local_timeout  = cpu_to_le16(0);
    616		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
    617	}
    618
    619	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
    620		struct hci_cp_sniff_mode cp;
    621		cp.handle       = cpu_to_le16(conn->handle);
    622		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
    623		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
    624		cp.attempt      = cpu_to_le16(4);
    625		cp.timeout      = cpu_to_le16(1);
    626		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
    627	}
    628}
    629
    630static void hci_conn_auto_accept(struct work_struct *work)
    631{
    632	struct hci_conn *conn = container_of(work, struct hci_conn,
    633					     auto_accept_work.work);
    634
    635	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
    636		     &conn->dst);
    637}
    638
    639static void le_disable_advertising(struct hci_dev *hdev)
    640{
    641	if (ext_adv_capable(hdev)) {
    642		struct hci_cp_le_set_ext_adv_enable cp;
    643
    644		cp.enable = 0x00;
    645		cp.num_of_sets = 0x00;
    646
    647		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
    648			     &cp);
    649	} else {
    650		u8 enable = 0x00;
    651		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
    652			     &enable);
    653	}
    654}
    655
    656static void le_conn_timeout(struct work_struct *work)
    657{
    658	struct hci_conn *conn = container_of(work, struct hci_conn,
    659					     le_conn_timeout.work);
    660	struct hci_dev *hdev = conn->hdev;
    661
    662	BT_DBG("");
    663
    664	/* We could end up here due to having done directed advertising,
    665	 * so clean up the state if necessary. This should however only
    666	 * happen with broken hardware or if low duty cycle was used
    667	 * (which doesn't have a timeout of its own).
    668	 */
    669	if (conn->role == HCI_ROLE_SLAVE) {
    670		/* Disable LE Advertising */
    671		le_disable_advertising(hdev);
    672		hci_dev_lock(hdev);
    673		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
    674		hci_dev_unlock(hdev);
    675		return;
    676	}
    677
    678	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
    679}
    680
    681struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
    682			      u8 role)
    683{
    684	struct hci_conn *conn;
    685
    686	BT_DBG("%s dst %pMR", hdev->name, dst);
    687
    688	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
    689	if (!conn)
    690		return NULL;
    691
    692	bacpy(&conn->dst, dst);
    693	bacpy(&conn->src, &hdev->bdaddr);
    694	conn->handle = HCI_CONN_HANDLE_UNSET;
    695	conn->hdev  = hdev;
    696	conn->type  = type;
    697	conn->role  = role;
    698	conn->mode  = HCI_CM_ACTIVE;
    699	conn->state = BT_OPEN;
    700	conn->auth_type = HCI_AT_GENERAL_BONDING;
    701	conn->io_capability = hdev->io_capability;
    702	conn->remote_auth = 0xff;
    703	conn->key_type = 0xff;
    704	conn->rssi = HCI_RSSI_INVALID;
    705	conn->tx_power = HCI_TX_POWER_INVALID;
    706	conn->max_tx_power = HCI_TX_POWER_INVALID;
    707
    708	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
    709	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
    710
    711	/* Set Default Authenticated payload timeout to 30s */
    712	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
    713
    714	if (conn->role == HCI_ROLE_MASTER)
    715		conn->out = true;
    716
    717	switch (type) {
    718	case ACL_LINK:
    719		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
    720		break;
    721	case LE_LINK:
    722		/* conn->src should reflect the local identity address */
    723		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
    724		break;
    725	case SCO_LINK:
    726		if (lmp_esco_capable(hdev))
    727			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
    728					(hdev->esco_type & EDR_ESCO_MASK);
    729		else
    730			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
    731		break;
    732	case ESCO_LINK:
    733		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
    734		break;
    735	}
    736
    737	skb_queue_head_init(&conn->data_q);
    738
    739	INIT_LIST_HEAD(&conn->chan_list);
    740
    741	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
    742	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
    743	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
    744	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
    745	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
    746
    747	atomic_set(&conn->refcnt, 0);
    748
    749	hci_dev_hold(hdev);
    750
    751	hci_conn_hash_add(hdev, conn);
    752
    753	/* The SCO and eSCO connections will only be notified when their
    754	 * setup has been completed. This is different to ACL links which
    755	 * can be notified right away.
    756	 */
    757	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
    758		if (hdev->notify)
    759			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
    760	}
    761
    762	hci_conn_init_sysfs(conn);
    763
    764	return conn;
    765}
    766
    767int hci_conn_del(struct hci_conn *conn)
    768{
    769	struct hci_dev *hdev = conn->hdev;
    770
    771	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
    772
    773	cancel_delayed_work_sync(&conn->disc_work);
    774	cancel_delayed_work_sync(&conn->auto_accept_work);
    775	cancel_delayed_work_sync(&conn->idle_work);
    776
    777	if (conn->type == ACL_LINK) {
    778		struct hci_conn *sco = conn->link;
    779		if (sco)
    780			sco->link = NULL;
    781
    782		/* Unacked frames */
    783		hdev->acl_cnt += conn->sent;
    784	} else if (conn->type == LE_LINK) {
    785		cancel_delayed_work(&conn->le_conn_timeout);
    786
    787		if (hdev->le_pkts)
    788			hdev->le_cnt += conn->sent;
    789		else
    790			hdev->acl_cnt += conn->sent;
    791	} else {
    792		struct hci_conn *acl = conn->link;
    793		if (acl) {
    794			acl->link = NULL;
    795			hci_conn_drop(acl);
    796		}
    797	}
    798
    799	if (conn->amp_mgr)
    800		amp_mgr_put(conn->amp_mgr);
    801
    802	skb_queue_purge(&conn->data_q);
    803
    804	/* Remove the connection from the list and cleanup its remaining
    805	 * state. This is a separate function since for some cases like
    806	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
    807	 * rest of hci_conn_del.
    808	 */
    809	hci_conn_cleanup(conn);
    810
    811	return 0;
    812}
    813
    814struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
    815{
    816	int use_src = bacmp(src, BDADDR_ANY);
    817	struct hci_dev *hdev = NULL, *d;
    818
    819	BT_DBG("%pMR -> %pMR", src, dst);
    820
    821	read_lock(&hci_dev_list_lock);
    822
    823	list_for_each_entry(d, &hci_dev_list, list) {
    824		if (!test_bit(HCI_UP, &d->flags) ||
    825		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
    826		    d->dev_type != HCI_PRIMARY)
    827			continue;
    828
    829		/* Simple routing:
    830		 *   No source address - find interface with bdaddr != dst
    831		 *   Source address    - find interface with bdaddr == src
    832		 */
    833
    834		if (use_src) {
    835			bdaddr_t id_addr;
    836			u8 id_addr_type;
    837
    838			if (src_type == BDADDR_BREDR) {
    839				if (!lmp_bredr_capable(d))
    840					continue;
    841				bacpy(&id_addr, &d->bdaddr);
    842				id_addr_type = BDADDR_BREDR;
    843			} else {
    844				if (!lmp_le_capable(d))
    845					continue;
    846
    847				hci_copy_identity_address(d, &id_addr,
    848							  &id_addr_type);
    849
    850				/* Convert from HCI to three-value type */
    851				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
    852					id_addr_type = BDADDR_LE_PUBLIC;
    853				else
    854					id_addr_type = BDADDR_LE_RANDOM;
    855			}
    856
    857			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
    858				hdev = d; break;
    859			}
    860		} else {
    861			if (bacmp(&d->bdaddr, dst)) {
    862				hdev = d; break;
    863			}
    864		}
    865	}
    866
    867	if (hdev)
    868		hdev = hci_dev_hold(hdev);
    869
    870	read_unlock(&hci_dev_list_lock);
    871	return hdev;
    872}
    873EXPORT_SYMBOL(hci_get_route);
    874
    875/* This function requires the caller holds hdev->lock */
    876static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
    877{
    878	struct hci_dev *hdev = conn->hdev;
    879	struct hci_conn_params *params;
    880
    881	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
    882					   conn->dst_type);
    883	if (params && params->conn) {
    884		hci_conn_drop(params->conn);
    885		hci_conn_put(params->conn);
    886		params->conn = NULL;
    887	}
    888
    889	/* If the status indicates successful cancellation of
    890	 * the attempt (i.e. Unknown Connection Id) there's no point of
    891	 * notifying failure since we'll go back to keep trying to
    892	 * connect. The only exception is explicit connect requests
    893	 * where a timeout + cancel does indicate an actual failure.
    894	 */
    895	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
    896	    (params && params->explicit_connect))
    897		mgmt_connect_failed(hdev, &conn->dst, conn->type,
    898				    conn->dst_type, status);
    899
    900	/* Since we may have temporarily stopped the background scanning in
    901	 * favor of connection establishment, we should restart it.
    902	 */
    903	hci_update_passive_scan(hdev);
    904
    905	/* Enable advertising in case this was a failed connection
    906	 * attempt as a peripheral.
    907	 */
    908	hci_enable_advertising(hdev);
    909}
    910
    911/* This function requires the caller holds hdev->lock */
    912void hci_conn_failed(struct hci_conn *conn, u8 status)
    913{
    914	struct hci_dev *hdev = conn->hdev;
    915
    916	bt_dev_dbg(hdev, "status 0x%2.2x", status);
    917
    918	switch (conn->type) {
    919	case LE_LINK:
    920		hci_le_conn_failed(conn, status);
    921		break;
    922	case ACL_LINK:
    923		mgmt_connect_failed(hdev, &conn->dst, conn->type,
    924				    conn->dst_type, status);
    925		break;
    926	}
    927
    928	conn->state = BT_CLOSED;
    929	hci_connect_cfm(conn, status);
    930	hci_conn_del(conn);
    931}
    932
    933static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
    934{
    935	struct hci_conn *conn = data;
    936
    937	hci_dev_lock(hdev);
    938
    939	if (!err) {
    940		hci_connect_le_scan_cleanup(conn);
    941		goto done;
    942	}
    943
    944	bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
    945
    946	/* Check if connection is still pending */
    947	if (conn != hci_lookup_le_connect(hdev))
    948		goto done;
    949
    950	hci_conn_failed(conn, err);
    951
    952done:
    953	hci_dev_unlock(hdev);
    954}
    955
    956static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
    957{
    958	struct hci_conn *conn = data;
    959
    960	bt_dev_dbg(hdev, "conn %p", conn);
    961
    962	return hci_le_create_conn_sync(hdev, conn);
    963}
    964
    965struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
    966				u8 dst_type, bool dst_resolved, u8 sec_level,
    967				u16 conn_timeout, u8 role)
    968{
    969	struct hci_conn *conn;
    970	struct smp_irk *irk;
    971	int err;
    972
    973	/* Let's make sure that le is enabled.*/
    974	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
    975		if (lmp_le_capable(hdev))
    976			return ERR_PTR(-ECONNREFUSED);
    977
    978		return ERR_PTR(-EOPNOTSUPP);
    979	}
    980
    981	/* Since the controller supports only one LE connection attempt at a
    982	 * time, we return -EBUSY if there is any connection attempt running.
    983	 */
    984	if (hci_lookup_le_connect(hdev))
    985		return ERR_PTR(-EBUSY);
    986
    987	/* If there's already a connection object but it's not in
    988	 * scanning state it means it must already be established, in
    989	 * which case we can't do anything else except report a failure
    990	 * to connect.
    991	 */
    992	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
    993	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
    994		return ERR_PTR(-EBUSY);
    995	}
    996
    997	/* Check if the destination address has been resolved by the controller
    998	 * since if it did then the identity address shall be used.
    999	 */
   1000	if (!dst_resolved) {
   1001		/* When given an identity address with existing identity
   1002		 * resolving key, the connection needs to be established
   1003		 * to a resolvable random address.
   1004		 *
   1005		 * Storing the resolvable random address is required here
   1006		 * to handle connection failures. The address will later
   1007		 * be resolved back into the original identity address
   1008		 * from the connect request.
   1009		 */
   1010		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
   1011		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
   1012			dst = &irk->rpa;
   1013			dst_type = ADDR_LE_DEV_RANDOM;
   1014		}
   1015	}
   1016
   1017	if (conn) {
   1018		bacpy(&conn->dst, dst);
   1019	} else {
   1020		conn = hci_conn_add(hdev, LE_LINK, dst, role);
   1021		if (!conn)
   1022			return ERR_PTR(-ENOMEM);
   1023		hci_conn_hold(conn);
   1024		conn->pending_sec_level = sec_level;
   1025	}
   1026
   1027	conn->dst_type = dst_type;
   1028	conn->sec_level = BT_SECURITY_LOW;
   1029	conn->conn_timeout = conn_timeout;
   1030
   1031	conn->state = BT_CONNECT;
   1032	clear_bit(HCI_CONN_SCANNING, &conn->flags);
   1033
   1034	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
   1035				 create_le_conn_complete);
   1036	if (err) {
   1037		hci_conn_del(conn);
   1038		return ERR_PTR(err);
   1039	}
   1040
   1041	return conn;
   1042}
   1043
   1044static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
   1045{
   1046	struct hci_conn *conn;
   1047
   1048	conn = hci_conn_hash_lookup_le(hdev, addr, type);
   1049	if (!conn)
   1050		return false;
   1051
   1052	if (conn->state != BT_CONNECTED)
   1053		return false;
   1054
   1055	return true;
   1056}
   1057
   1058/* This function requires the caller holds hdev->lock */
   1059static int hci_explicit_conn_params_set(struct hci_dev *hdev,
   1060					bdaddr_t *addr, u8 addr_type)
   1061{
   1062	struct hci_conn_params *params;
   1063
   1064	if (is_connected(hdev, addr, addr_type))
   1065		return -EISCONN;
   1066
   1067	params = hci_conn_params_lookup(hdev, addr, addr_type);
   1068	if (!params) {
   1069		params = hci_conn_params_add(hdev, addr, addr_type);
   1070		if (!params)
   1071			return -ENOMEM;
   1072
   1073		/* If we created new params, mark them to be deleted in
   1074		 * hci_connect_le_scan_cleanup. It's different case than
   1075		 * existing disabled params, those will stay after cleanup.
   1076		 */
   1077		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
   1078	}
   1079
   1080	/* We're trying to connect, so make sure params are at pend_le_conns */
   1081	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
   1082	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
   1083	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
   1084		list_del_init(&params->action);
   1085		list_add(&params->action, &hdev->pend_le_conns);
   1086	}
   1087
   1088	params->explicit_connect = true;
   1089
   1090	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
   1091	       params->auto_connect);
   1092
   1093	return 0;
   1094}
   1095
   1096/* This function requires the caller holds hdev->lock */
   1097struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
   1098				     u8 dst_type, u8 sec_level,
   1099				     u16 conn_timeout,
   1100				     enum conn_reasons conn_reason)
   1101{
   1102	struct hci_conn *conn;
   1103
   1104	/* Let's make sure that le is enabled.*/
   1105	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
   1106		if (lmp_le_capable(hdev))
   1107			return ERR_PTR(-ECONNREFUSED);
   1108
   1109		return ERR_PTR(-EOPNOTSUPP);
   1110	}
   1111
   1112	/* Some devices send ATT messages as soon as the physical link is
   1113	 * established. To be able to handle these ATT messages, the user-
   1114	 * space first establishes the connection and then starts the pairing
   1115	 * process.
   1116	 *
   1117	 * So if a hci_conn object already exists for the following connection
   1118	 * attempt, we simply update pending_sec_level and auth_type fields
   1119	 * and return the object found.
   1120	 */
   1121	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
   1122	if (conn) {
   1123		if (conn->pending_sec_level < sec_level)
   1124			conn->pending_sec_level = sec_level;
   1125		goto done;
   1126	}
   1127
   1128	BT_DBG("requesting refresh of dst_addr");
   1129
   1130	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
   1131	if (!conn)
   1132		return ERR_PTR(-ENOMEM);
   1133
   1134	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
   1135		hci_conn_del(conn);
   1136		return ERR_PTR(-EBUSY);
   1137	}
   1138
   1139	conn->state = BT_CONNECT;
   1140	set_bit(HCI_CONN_SCANNING, &conn->flags);
   1141	conn->dst_type = dst_type;
   1142	conn->sec_level = BT_SECURITY_LOW;
   1143	conn->pending_sec_level = sec_level;
   1144	conn->conn_timeout = conn_timeout;
   1145	conn->conn_reason = conn_reason;
   1146
   1147	hci_update_passive_scan(hdev);
   1148
   1149done:
   1150	hci_conn_hold(conn);
   1151	return conn;
   1152}
   1153
   1154struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
   1155				 u8 sec_level, u8 auth_type,
   1156				 enum conn_reasons conn_reason)
   1157{
   1158	struct hci_conn *acl;
   1159
   1160	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
   1161		if (lmp_bredr_capable(hdev))
   1162			return ERR_PTR(-ECONNREFUSED);
   1163
   1164		return ERR_PTR(-EOPNOTSUPP);
   1165	}
   1166
   1167	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
   1168	if (!acl) {
   1169		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
   1170		if (!acl)
   1171			return ERR_PTR(-ENOMEM);
   1172	}
   1173
   1174	hci_conn_hold(acl);
   1175
   1176	acl->conn_reason = conn_reason;
   1177	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
   1178		acl->sec_level = BT_SECURITY_LOW;
   1179		acl->pending_sec_level = sec_level;
   1180		acl->auth_type = auth_type;
   1181		hci_acl_create_connection(acl);
   1182	}
   1183
   1184	return acl;
   1185}
   1186
   1187struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
   1188				 __u16 setting, struct bt_codec *codec)
   1189{
   1190	struct hci_conn *acl;
   1191	struct hci_conn *sco;
   1192
   1193	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
   1194			      CONN_REASON_SCO_CONNECT);
   1195	if (IS_ERR(acl))
   1196		return acl;
   1197
   1198	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
   1199	if (!sco) {
   1200		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
   1201		if (!sco) {
   1202			hci_conn_drop(acl);
   1203			return ERR_PTR(-ENOMEM);
   1204		}
   1205	}
   1206
   1207	acl->link = sco;
   1208	sco->link = acl;
   1209
   1210	hci_conn_hold(sco);
   1211
   1212	sco->setting = setting;
   1213	sco->codec = *codec;
   1214
   1215	if (acl->state == BT_CONNECTED &&
   1216	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
   1217		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
   1218		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
   1219
   1220		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
   1221			/* defer SCO setup until mode change completed */
   1222			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
   1223			return sco;
   1224		}
   1225
   1226		hci_sco_setup(acl, 0x00);
   1227	}
   1228
   1229	return sco;
   1230}
   1231
   1232/* Check link security requirement */
   1233int hci_conn_check_link_mode(struct hci_conn *conn)
   1234{
   1235	BT_DBG("hcon %p", conn);
   1236
   1237	/* In Secure Connections Only mode, it is required that Secure
   1238	 * Connections is used and the link is encrypted with AES-CCM
   1239	 * using a P-256 authenticated combination key.
   1240	 */
   1241	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
   1242		if (!hci_conn_sc_enabled(conn) ||
   1243		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
   1244		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
   1245			return 0;
   1246	}
   1247
   1248	 /* AES encryption is required for Level 4:
   1249	  *
   1250	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
   1251	  * page 1319:
   1252	  *
   1253	  * 128-bit equivalent strength for link and encryption keys
   1254	  * required using FIPS approved algorithms (E0 not allowed,
   1255	  * SAFER+ not allowed, and P-192 not allowed; encryption key
   1256	  * not shortened)
   1257	  */
   1258	if (conn->sec_level == BT_SECURITY_FIPS &&
   1259	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
   1260		bt_dev_err(conn->hdev,
   1261			   "Invalid security: Missing AES-CCM usage");
   1262		return 0;
   1263	}
   1264
   1265	if (hci_conn_ssp_enabled(conn) &&
   1266	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
   1267		return 0;
   1268
   1269	return 1;
   1270}
   1271
   1272/* Authenticate remote device */
   1273static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
   1274{
   1275	BT_DBG("hcon %p", conn);
   1276
   1277	if (conn->pending_sec_level > sec_level)
   1278		sec_level = conn->pending_sec_level;
   1279
   1280	if (sec_level > conn->sec_level)
   1281		conn->pending_sec_level = sec_level;
   1282	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
   1283		return 1;
   1284
   1285	/* Make sure we preserve an existing MITM requirement*/
   1286	auth_type |= (conn->auth_type & 0x01);
   1287
   1288	conn->auth_type = auth_type;
   1289
   1290	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
   1291		struct hci_cp_auth_requested cp;
   1292
   1293		cp.handle = cpu_to_le16(conn->handle);
   1294		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
   1295			     sizeof(cp), &cp);
   1296
   1297		/* If we're already encrypted set the REAUTH_PEND flag,
   1298		 * otherwise set the ENCRYPT_PEND.
   1299		 */
   1300		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
   1301			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
   1302		else
   1303			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
   1304	}
   1305
   1306	return 0;
   1307}
   1308
   1309/* Encrypt the link */
   1310static void hci_conn_encrypt(struct hci_conn *conn)
   1311{
   1312	BT_DBG("hcon %p", conn);
   1313
   1314	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
   1315		struct hci_cp_set_conn_encrypt cp;
   1316		cp.handle  = cpu_to_le16(conn->handle);
   1317		cp.encrypt = 0x01;
   1318		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
   1319			     &cp);
   1320	}
   1321}
   1322
   1323/* Enable security */
   1324int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
   1325		      bool initiator)
   1326{
   1327	BT_DBG("hcon %p", conn);
   1328
   1329	if (conn->type == LE_LINK)
   1330		return smp_conn_security(conn, sec_level);
   1331
   1332	/* For sdp we don't need the link key. */
   1333	if (sec_level == BT_SECURITY_SDP)
   1334		return 1;
   1335
   1336	/* For non 2.1 devices and low security level we don't need the link
   1337	   key. */
   1338	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
   1339		return 1;
   1340
   1341	/* For other security levels we need the link key. */
   1342	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
   1343		goto auth;
   1344
   1345	/* An authenticated FIPS approved combination key has sufficient
   1346	 * security for security level 4. */
   1347	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
   1348	    sec_level == BT_SECURITY_FIPS)
   1349		goto encrypt;
   1350
   1351	/* An authenticated combination key has sufficient security for
   1352	   security level 3. */
   1353	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
   1354	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
   1355	    sec_level == BT_SECURITY_HIGH)
   1356		goto encrypt;
   1357
   1358	/* An unauthenticated combination key has sufficient security for
   1359	   security level 1 and 2. */
   1360	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
   1361	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
   1362	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
   1363		goto encrypt;
   1364
   1365	/* A combination key has always sufficient security for the security
   1366	   levels 1 or 2. High security level requires the combination key
   1367	   is generated using maximum PIN code length (16).
   1368	   For pre 2.1 units. */
   1369	if (conn->key_type == HCI_LK_COMBINATION &&
   1370	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
   1371	     conn->pin_length == 16))
   1372		goto encrypt;
   1373
   1374auth:
   1375	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
   1376		return 0;
   1377
   1378	if (initiator)
   1379		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
   1380
   1381	if (!hci_conn_auth(conn, sec_level, auth_type))
   1382		return 0;
   1383
   1384encrypt:
   1385	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
   1386		/* Ensure that the encryption key size has been read,
   1387		 * otherwise stall the upper layer responses.
   1388		 */
   1389		if (!conn->enc_key_size)
   1390			return 0;
   1391
   1392		/* Nothing else needed, all requirements are met */
   1393		return 1;
   1394	}
   1395
   1396	hci_conn_encrypt(conn);
   1397	return 0;
   1398}
   1399EXPORT_SYMBOL(hci_conn_security);
   1400
   1401/* Check secure link requirement */
   1402int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
   1403{
   1404	BT_DBG("hcon %p", conn);
   1405
   1406	/* Accept if non-secure or higher security level is required */
   1407	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
   1408		return 1;
   1409
   1410	/* Accept if secure or higher security level is already present */
   1411	if (conn->sec_level == BT_SECURITY_HIGH ||
   1412	    conn->sec_level == BT_SECURITY_FIPS)
   1413		return 1;
   1414
   1415	/* Reject not secure link */
   1416	return 0;
   1417}
   1418EXPORT_SYMBOL(hci_conn_check_secure);
   1419
   1420/* Switch role */
   1421int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
   1422{
   1423	BT_DBG("hcon %p", conn);
   1424
   1425	if (role == conn->role)
   1426		return 1;
   1427
   1428	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
   1429		struct hci_cp_switch_role cp;
   1430		bacpy(&cp.bdaddr, &conn->dst);
   1431		cp.role = role;
   1432		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
   1433	}
   1434
   1435	return 0;
   1436}
   1437EXPORT_SYMBOL(hci_conn_switch_role);
   1438
   1439/* Enter active mode */
   1440void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
   1441{
   1442	struct hci_dev *hdev = conn->hdev;
   1443
   1444	BT_DBG("hcon %p mode %d", conn, conn->mode);
   1445
   1446	if (conn->mode != HCI_CM_SNIFF)
   1447		goto timer;
   1448
   1449	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
   1450		goto timer;
   1451
   1452	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
   1453		struct hci_cp_exit_sniff_mode cp;
   1454		cp.handle = cpu_to_le16(conn->handle);
   1455		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
   1456	}
   1457
   1458timer:
   1459	if (hdev->idle_timeout > 0)
   1460		queue_delayed_work(hdev->workqueue, &conn->idle_work,
   1461				   msecs_to_jiffies(hdev->idle_timeout));
   1462}
   1463
   1464/* Drop all connection on the device */
   1465void hci_conn_hash_flush(struct hci_dev *hdev)
   1466{
   1467	struct hci_conn_hash *h = &hdev->conn_hash;
   1468	struct hci_conn *c, *n;
   1469
   1470	BT_DBG("hdev %s", hdev->name);
   1471
   1472	list_for_each_entry_safe(c, n, &h->list, list) {
   1473		c->state = BT_CLOSED;
   1474
   1475		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
   1476		hci_conn_del(c);
   1477	}
   1478}
   1479
   1480/* Check pending connect attempts */
   1481void hci_conn_check_pending(struct hci_dev *hdev)
   1482{
   1483	struct hci_conn *conn;
   1484
   1485	BT_DBG("hdev %s", hdev->name);
   1486
   1487	hci_dev_lock(hdev);
   1488
   1489	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
   1490	if (conn)
   1491		hci_acl_create_connection(conn);
   1492
   1493	hci_dev_unlock(hdev);
   1494}
   1495
   1496static u32 get_link_mode(struct hci_conn *conn)
   1497{
   1498	u32 link_mode = 0;
   1499
   1500	if (conn->role == HCI_ROLE_MASTER)
   1501		link_mode |= HCI_LM_MASTER;
   1502
   1503	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
   1504		link_mode |= HCI_LM_ENCRYPT;
   1505
   1506	if (test_bit(HCI_CONN_AUTH, &conn->flags))
   1507		link_mode |= HCI_LM_AUTH;
   1508
   1509	if (test_bit(HCI_CONN_SECURE, &conn->flags))
   1510		link_mode |= HCI_LM_SECURE;
   1511
   1512	if (test_bit(HCI_CONN_FIPS, &conn->flags))
   1513		link_mode |= HCI_LM_FIPS;
   1514
   1515	return link_mode;
   1516}
   1517
   1518int hci_get_conn_list(void __user *arg)
   1519{
   1520	struct hci_conn *c;
   1521	struct hci_conn_list_req req, *cl;
   1522	struct hci_conn_info *ci;
   1523	struct hci_dev *hdev;
   1524	int n = 0, size, err;
   1525
   1526	if (copy_from_user(&req, arg, sizeof(req)))
   1527		return -EFAULT;
   1528
   1529	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
   1530		return -EINVAL;
   1531
   1532	size = sizeof(req) + req.conn_num * sizeof(*ci);
   1533
   1534	cl = kmalloc(size, GFP_KERNEL);
   1535	if (!cl)
   1536		return -ENOMEM;
   1537
   1538	hdev = hci_dev_get(req.dev_id);
   1539	if (!hdev) {
   1540		kfree(cl);
   1541		return -ENODEV;
   1542	}
   1543
   1544	ci = cl->conn_info;
   1545
   1546	hci_dev_lock(hdev);
   1547	list_for_each_entry(c, &hdev->conn_hash.list, list) {
   1548		bacpy(&(ci + n)->bdaddr, &c->dst);
   1549		(ci + n)->handle = c->handle;
   1550		(ci + n)->type  = c->type;
   1551		(ci + n)->out   = c->out;
   1552		(ci + n)->state = c->state;
   1553		(ci + n)->link_mode = get_link_mode(c);
   1554		if (++n >= req.conn_num)
   1555			break;
   1556	}
   1557	hci_dev_unlock(hdev);
   1558
   1559	cl->dev_id = hdev->id;
   1560	cl->conn_num = n;
   1561	size = sizeof(req) + n * sizeof(*ci);
   1562
   1563	hci_dev_put(hdev);
   1564
   1565	err = copy_to_user(arg, cl, size);
   1566	kfree(cl);
   1567
   1568	return err ? -EFAULT : 0;
   1569}
   1570
   1571int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
   1572{
   1573	struct hci_conn_info_req req;
   1574	struct hci_conn_info ci;
   1575	struct hci_conn *conn;
   1576	char __user *ptr = arg + sizeof(req);
   1577
   1578	if (copy_from_user(&req, arg, sizeof(req)))
   1579		return -EFAULT;
   1580
   1581	hci_dev_lock(hdev);
   1582	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
   1583	if (conn) {
   1584		bacpy(&ci.bdaddr, &conn->dst);
   1585		ci.handle = conn->handle;
   1586		ci.type  = conn->type;
   1587		ci.out   = conn->out;
   1588		ci.state = conn->state;
   1589		ci.link_mode = get_link_mode(conn);
   1590	}
   1591	hci_dev_unlock(hdev);
   1592
   1593	if (!conn)
   1594		return -ENOENT;
   1595
   1596	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
   1597}
   1598
   1599int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
   1600{
   1601	struct hci_auth_info_req req;
   1602	struct hci_conn *conn;
   1603
   1604	if (copy_from_user(&req, arg, sizeof(req)))
   1605		return -EFAULT;
   1606
   1607	hci_dev_lock(hdev);
   1608	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
   1609	if (conn)
   1610		req.type = conn->auth_type;
   1611	hci_dev_unlock(hdev);
   1612
   1613	if (!conn)
   1614		return -ENOENT;
   1615
   1616	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
   1617}
   1618
   1619struct hci_chan *hci_chan_create(struct hci_conn *conn)
   1620{
   1621	struct hci_dev *hdev = conn->hdev;
   1622	struct hci_chan *chan;
   1623
   1624	BT_DBG("%s hcon %p", hdev->name, conn);
   1625
   1626	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
   1627		BT_DBG("Refusing to create new hci_chan");
   1628		return NULL;
   1629	}
   1630
   1631	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
   1632	if (!chan)
   1633		return NULL;
   1634
   1635	chan->conn = hci_conn_get(conn);
   1636	skb_queue_head_init(&chan->data_q);
   1637	chan->state = BT_CONNECTED;
   1638
   1639	list_add_rcu(&chan->list, &conn->chan_list);
   1640
   1641	return chan;
   1642}
   1643
   1644void hci_chan_del(struct hci_chan *chan)
   1645{
   1646	struct hci_conn *conn = chan->conn;
   1647	struct hci_dev *hdev = conn->hdev;
   1648
   1649	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
   1650
   1651	list_del_rcu(&chan->list);
   1652
   1653	synchronize_rcu();
   1654
   1655	/* Prevent new hci_chan's to be created for this hci_conn */
   1656	set_bit(HCI_CONN_DROP, &conn->flags);
   1657
   1658	hci_conn_put(conn);
   1659
   1660	skb_queue_purge(&chan->data_q);
   1661	kfree(chan);
   1662}
   1663
   1664void hci_chan_list_flush(struct hci_conn *conn)
   1665{
   1666	struct hci_chan *chan, *n;
   1667
   1668	BT_DBG("hcon %p", conn);
   1669
   1670	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
   1671		hci_chan_del(chan);
   1672}
   1673
   1674static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
   1675						 __u16 handle)
   1676{
   1677	struct hci_chan *hchan;
   1678
   1679	list_for_each_entry(hchan, &hcon->chan_list, list) {
   1680		if (hchan->handle == handle)
   1681			return hchan;
   1682	}
   1683
   1684	return NULL;
   1685}
   1686
   1687struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
   1688{
   1689	struct hci_conn_hash *h = &hdev->conn_hash;
   1690	struct hci_conn *hcon;
   1691	struct hci_chan *hchan = NULL;
   1692
   1693	rcu_read_lock();
   1694
   1695	list_for_each_entry_rcu(hcon, &h->list, list) {
   1696		hchan = __hci_chan_lookup_handle(hcon, handle);
   1697		if (hchan)
   1698			break;
   1699	}
   1700
   1701	rcu_read_unlock();
   1702
   1703	return hchan;
   1704}
   1705
   1706u32 hci_conn_get_phy(struct hci_conn *conn)
   1707{
   1708	u32 phys = 0;
   1709
   1710	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
   1711	 * Table 6.2: Packets defined for synchronous, asynchronous, and
   1712	 * CPB logical transport types.
   1713	 */
   1714	switch (conn->type) {
   1715	case SCO_LINK:
   1716		/* SCO logical transport (1 Mb/s):
   1717		 * HV1, HV2, HV3 and DV.
   1718		 */
   1719		phys |= BT_PHY_BR_1M_1SLOT;
   1720
   1721		break;
   1722
   1723	case ACL_LINK:
   1724		/* ACL logical transport (1 Mb/s) ptt=0:
   1725		 * DH1, DM3, DH3, DM5 and DH5.
   1726		 */
   1727		phys |= BT_PHY_BR_1M_1SLOT;
   1728
   1729		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
   1730			phys |= BT_PHY_BR_1M_3SLOT;
   1731
   1732		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
   1733			phys |= BT_PHY_BR_1M_5SLOT;
   1734
   1735		/* ACL logical transport (2 Mb/s) ptt=1:
   1736		 * 2-DH1, 2-DH3 and 2-DH5.
   1737		 */
   1738		if (!(conn->pkt_type & HCI_2DH1))
   1739			phys |= BT_PHY_EDR_2M_1SLOT;
   1740
   1741		if (!(conn->pkt_type & HCI_2DH3))
   1742			phys |= BT_PHY_EDR_2M_3SLOT;
   1743
   1744		if (!(conn->pkt_type & HCI_2DH5))
   1745			phys |= BT_PHY_EDR_2M_5SLOT;
   1746
   1747		/* ACL logical transport (3 Mb/s) ptt=1:
   1748		 * 3-DH1, 3-DH3 and 3-DH5.
   1749		 */
   1750		if (!(conn->pkt_type & HCI_3DH1))
   1751			phys |= BT_PHY_EDR_3M_1SLOT;
   1752
   1753		if (!(conn->pkt_type & HCI_3DH3))
   1754			phys |= BT_PHY_EDR_3M_3SLOT;
   1755
   1756		if (!(conn->pkt_type & HCI_3DH5))
   1757			phys |= BT_PHY_EDR_3M_5SLOT;
   1758
   1759		break;
   1760
   1761	case ESCO_LINK:
   1762		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
   1763		phys |= BT_PHY_BR_1M_1SLOT;
   1764
   1765		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
   1766			phys |= BT_PHY_BR_1M_3SLOT;
   1767
   1768		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
   1769		if (!(conn->pkt_type & ESCO_2EV3))
   1770			phys |= BT_PHY_EDR_2M_1SLOT;
   1771
   1772		if (!(conn->pkt_type & ESCO_2EV5))
   1773			phys |= BT_PHY_EDR_2M_3SLOT;
   1774
   1775		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
   1776		if (!(conn->pkt_type & ESCO_3EV3))
   1777			phys |= BT_PHY_EDR_3M_1SLOT;
   1778
   1779		if (!(conn->pkt_type & ESCO_3EV5))
   1780			phys |= BT_PHY_EDR_3M_3SLOT;
   1781
   1782		break;
   1783
   1784	case LE_LINK:
   1785		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
   1786			phys |= BT_PHY_LE_1M_TX;
   1787
   1788		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
   1789			phys |= BT_PHY_LE_1M_RX;
   1790
   1791		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
   1792			phys |= BT_PHY_LE_2M_TX;
   1793
   1794		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
   1795			phys |= BT_PHY_LE_2M_RX;
   1796
   1797		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
   1798			phys |= BT_PHY_LE_CODED_TX;
   1799
   1800		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
   1801			phys |= BT_PHY_LE_CODED_RX;
   1802
   1803		break;
   1804	}
   1805
   1806	return phys;
   1807}