cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

l2cap_core.c (203628B)


      1/*
      2   BlueZ - Bluetooth protocol stack for Linux
      3   Copyright (C) 2000-2001 Qualcomm Incorporated
      4   Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
      5   Copyright (C) 2010 Google Inc.
      6   Copyright (C) 2011 ProFUSION Embedded Systems
      7   Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
      8
      9   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
     10
     11   This program is free software; you can redistribute it and/or modify
     12   it under the terms of the GNU General Public License version 2 as
     13   published by the Free Software Foundation;
     14
     15   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     16   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
     18   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
     19   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
     20   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     21   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     22   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     23
     24   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
     25   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
     26   SOFTWARE IS DISCLAIMED.
     27*/
     28
     29/* Bluetooth L2CAP core. */
     30
     31#include <linux/module.h>
     32
     33#include <linux/debugfs.h>
     34#include <linux/crc16.h>
     35#include <linux/filter.h>
     36
     37#include <net/bluetooth/bluetooth.h>
     38#include <net/bluetooth/hci_core.h>
     39#include <net/bluetooth/l2cap.h>
     40
     41#include "smp.h"
     42#include "a2mp.h"
     43#include "amp.h"
     44
     45#define LE_FLOWCTL_MAX_CREDITS 65535
     46
     47bool disable_ertm;
     48bool enable_ecred;
     49
     50static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
     51
     52static LIST_HEAD(chan_list);
     53static DEFINE_RWLOCK(chan_list_lock);
     54
     55static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
     56				       u8 code, u8 ident, u16 dlen, void *data);
     57static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
     58			   void *data);
     59static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
     60static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
     61
     62static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
     63		     struct sk_buff_head *skbs, u8 event);
     64
     65static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
     66{
     67	if (link_type == LE_LINK) {
     68		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
     69			return BDADDR_LE_PUBLIC;
     70		else
     71			return BDADDR_LE_RANDOM;
     72	}
     73
     74	return BDADDR_BREDR;
     75}
     76
     77static inline u8 bdaddr_src_type(struct hci_conn *hcon)
     78{
     79	return bdaddr_type(hcon->type, hcon->src_type);
     80}
     81
     82static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
     83{
     84	return bdaddr_type(hcon->type, hcon->dst_type);
     85}
     86
     87/* ---- L2CAP channels ---- */
     88
     89static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
     90						   u16 cid)
     91{
     92	struct l2cap_chan *c;
     93
     94	list_for_each_entry(c, &conn->chan_l, list) {
     95		if (c->dcid == cid)
     96			return c;
     97	}
     98	return NULL;
     99}
    100
    101static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
    102						   u16 cid)
    103{
    104	struct l2cap_chan *c;
    105
    106	list_for_each_entry(c, &conn->chan_l, list) {
    107		if (c->scid == cid)
    108			return c;
    109	}
    110	return NULL;
    111}
    112
    113/* Find channel with given SCID.
    114 * Returns locked channel. */
    115static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
    116						 u16 cid)
    117{
    118	struct l2cap_chan *c;
    119
    120	mutex_lock(&conn->chan_lock);
    121	c = __l2cap_get_chan_by_scid(conn, cid);
    122	if (c)
    123		l2cap_chan_lock(c);
    124	mutex_unlock(&conn->chan_lock);
    125
    126	return c;
    127}
    128
    129/* Find channel with given DCID.
    130 * Returns locked channel.
    131 */
    132static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
    133						 u16 cid)
    134{
    135	struct l2cap_chan *c;
    136
    137	mutex_lock(&conn->chan_lock);
    138	c = __l2cap_get_chan_by_dcid(conn, cid);
    139	if (c)
    140		l2cap_chan_lock(c);
    141	mutex_unlock(&conn->chan_lock);
    142
    143	return c;
    144}
    145
    146static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
    147						    u8 ident)
    148{
    149	struct l2cap_chan *c;
    150
    151	list_for_each_entry(c, &conn->chan_l, list) {
    152		if (c->ident == ident)
    153			return c;
    154	}
    155	return NULL;
    156}
    157
    158static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
    159						  u8 ident)
    160{
    161	struct l2cap_chan *c;
    162
    163	mutex_lock(&conn->chan_lock);
    164	c = __l2cap_get_chan_by_ident(conn, ident);
    165	if (c)
    166		l2cap_chan_lock(c);
    167	mutex_unlock(&conn->chan_lock);
    168
    169	return c;
    170}
    171
    172static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
    173						      u8 src_type)
    174{
    175	struct l2cap_chan *c;
    176
    177	list_for_each_entry(c, &chan_list, global_l) {
    178		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
    179			continue;
    180
    181		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
    182			continue;
    183
    184		if (c->sport == psm && !bacmp(&c->src, src))
    185			return c;
    186	}
    187	return NULL;
    188}
    189
    190int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
    191{
    192	int err;
    193
    194	write_lock(&chan_list_lock);
    195
    196	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
    197		err = -EADDRINUSE;
    198		goto done;
    199	}
    200
    201	if (psm) {
    202		chan->psm = psm;
    203		chan->sport = psm;
    204		err = 0;
    205	} else {
    206		u16 p, start, end, incr;
    207
    208		if (chan->src_type == BDADDR_BREDR) {
    209			start = L2CAP_PSM_DYN_START;
    210			end = L2CAP_PSM_AUTO_END;
    211			incr = 2;
    212		} else {
    213			start = L2CAP_PSM_LE_DYN_START;
    214			end = L2CAP_PSM_LE_DYN_END;
    215			incr = 1;
    216		}
    217
    218		err = -EINVAL;
    219		for (p = start; p <= end; p += incr)
    220			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
    221							 chan->src_type)) {
    222				chan->psm   = cpu_to_le16(p);
    223				chan->sport = cpu_to_le16(p);
    224				err = 0;
    225				break;
    226			}
    227	}
    228
    229done:
    230	write_unlock(&chan_list_lock);
    231	return err;
    232}
    233EXPORT_SYMBOL_GPL(l2cap_add_psm);
    234
    235int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
    236{
    237	write_lock(&chan_list_lock);
    238
    239	/* Override the defaults (which are for conn-oriented) */
    240	chan->omtu = L2CAP_DEFAULT_MTU;
    241	chan->chan_type = L2CAP_CHAN_FIXED;
    242
    243	chan->scid = scid;
    244
    245	write_unlock(&chan_list_lock);
    246
    247	return 0;
    248}
    249
    250static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
    251{
    252	u16 cid, dyn_end;
    253
    254	if (conn->hcon->type == LE_LINK)
    255		dyn_end = L2CAP_CID_LE_DYN_END;
    256	else
    257		dyn_end = L2CAP_CID_DYN_END;
    258
    259	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
    260		if (!__l2cap_get_chan_by_scid(conn, cid))
    261			return cid;
    262	}
    263
    264	return 0;
    265}
    266
    267static void l2cap_state_change(struct l2cap_chan *chan, int state)
    268{
    269	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
    270	       state_to_string(state));
    271
    272	chan->state = state;
    273	chan->ops->state_change(chan, state, 0);
    274}
    275
    276static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
    277						int state, int err)
    278{
    279	chan->state = state;
    280	chan->ops->state_change(chan, chan->state, err);
    281}
    282
    283static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
    284{
    285	chan->ops->state_change(chan, chan->state, err);
    286}
    287
    288static void __set_retrans_timer(struct l2cap_chan *chan)
    289{
    290	if (!delayed_work_pending(&chan->monitor_timer) &&
    291	    chan->retrans_timeout) {
    292		l2cap_set_timer(chan, &chan->retrans_timer,
    293				msecs_to_jiffies(chan->retrans_timeout));
    294	}
    295}
    296
    297static void __set_monitor_timer(struct l2cap_chan *chan)
    298{
    299	__clear_retrans_timer(chan);
    300	if (chan->monitor_timeout) {
    301		l2cap_set_timer(chan, &chan->monitor_timer,
    302				msecs_to_jiffies(chan->monitor_timeout));
    303	}
    304}
    305
    306static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
    307					       u16 seq)
    308{
    309	struct sk_buff *skb;
    310
    311	skb_queue_walk(head, skb) {
    312		if (bt_cb(skb)->l2cap.txseq == seq)
    313			return skb;
    314	}
    315
    316	return NULL;
    317}
    318
    319/* ---- L2CAP sequence number lists ---- */
    320
    321/* For ERTM, ordered lists of sequence numbers must be tracked for
    322 * SREJ requests that are received and for frames that are to be
    323 * retransmitted. These seq_list functions implement a singly-linked
    324 * list in an array, where membership in the list can also be checked
    325 * in constant time. Items can also be added to the tail of the list
    326 * and removed from the head in constant time, without further memory
    327 * allocs or frees.
    328 */
    329
    330static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
    331{
    332	size_t alloc_size, i;
    333
    334	/* Allocated size is a power of 2 to map sequence numbers
    335	 * (which may be up to 14 bits) in to a smaller array that is
    336	 * sized for the negotiated ERTM transmit windows.
    337	 */
    338	alloc_size = roundup_pow_of_two(size);
    339
    340	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
    341	if (!seq_list->list)
    342		return -ENOMEM;
    343
    344	seq_list->mask = alloc_size - 1;
    345	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
    346	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
    347	for (i = 0; i < alloc_size; i++)
    348		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
    349
    350	return 0;
    351}
    352
    353static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
    354{
    355	kfree(seq_list->list);
    356}
    357
    358static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
    359					   u16 seq)
    360{
    361	/* Constant-time check for list membership */
    362	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
    363}
    364
    365static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
    366{
    367	u16 seq = seq_list->head;
    368	u16 mask = seq_list->mask;
    369
    370	seq_list->head = seq_list->list[seq & mask];
    371	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
    372
    373	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
    374		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
    375		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
    376	}
    377
    378	return seq;
    379}
    380
    381static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
    382{
    383	u16 i;
    384
    385	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
    386		return;
    387
    388	for (i = 0; i <= seq_list->mask; i++)
    389		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
    390
    391	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
    392	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
    393}
    394
    395static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
    396{
    397	u16 mask = seq_list->mask;
    398
    399	/* All appends happen in constant time */
    400
    401	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
    402		return;
    403
    404	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
    405		seq_list->head = seq;
    406	else
    407		seq_list->list[seq_list->tail & mask] = seq;
    408
    409	seq_list->tail = seq;
    410	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
    411}
    412
    413static void l2cap_chan_timeout(struct work_struct *work)
    414{
    415	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
    416					       chan_timer.work);
    417	struct l2cap_conn *conn = chan->conn;
    418	int reason;
    419
    420	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
    421
    422	mutex_lock(&conn->chan_lock);
    423	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
    424	 * this work. No need to call l2cap_chan_hold(chan) here again.
    425	 */
    426	l2cap_chan_lock(chan);
    427
    428	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
    429		reason = ECONNREFUSED;
    430	else if (chan->state == BT_CONNECT &&
    431		 chan->sec_level != BT_SECURITY_SDP)
    432		reason = ECONNREFUSED;
    433	else
    434		reason = ETIMEDOUT;
    435
    436	l2cap_chan_close(chan, reason);
    437
    438	chan->ops->close(chan);
    439
    440	l2cap_chan_unlock(chan);
    441	l2cap_chan_put(chan);
    442
    443	mutex_unlock(&conn->chan_lock);
    444}
    445
    446struct l2cap_chan *l2cap_chan_create(void)
    447{
    448	struct l2cap_chan *chan;
    449
    450	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
    451	if (!chan)
    452		return NULL;
    453
    454	skb_queue_head_init(&chan->tx_q);
    455	skb_queue_head_init(&chan->srej_q);
    456	mutex_init(&chan->lock);
    457
    458	/* Set default lock nesting level */
    459	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
    460
    461	write_lock(&chan_list_lock);
    462	list_add(&chan->global_l, &chan_list);
    463	write_unlock(&chan_list_lock);
    464
    465	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
    466
    467	chan->state = BT_OPEN;
    468
    469	kref_init(&chan->kref);
    470
    471	/* This flag is cleared in l2cap_chan_ready() */
    472	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
    473
    474	BT_DBG("chan %p", chan);
    475
    476	return chan;
    477}
    478EXPORT_SYMBOL_GPL(l2cap_chan_create);
    479
    480static void l2cap_chan_destroy(struct kref *kref)
    481{
    482	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
    483
    484	BT_DBG("chan %p", chan);
    485
    486	write_lock(&chan_list_lock);
    487	list_del(&chan->global_l);
    488	write_unlock(&chan_list_lock);
    489
    490	kfree(chan);
    491}
    492
    493void l2cap_chan_hold(struct l2cap_chan *c)
    494{
    495	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
    496
    497	kref_get(&c->kref);
    498}
    499
    500void l2cap_chan_put(struct l2cap_chan *c)
    501{
    502	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
    503
    504	kref_put(&c->kref, l2cap_chan_destroy);
    505}
    506EXPORT_SYMBOL_GPL(l2cap_chan_put);
    507
    508void l2cap_chan_set_defaults(struct l2cap_chan *chan)
    509{
    510	chan->fcs  = L2CAP_FCS_CRC16;
    511	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
    512	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
    513	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
    514	chan->remote_max_tx = chan->max_tx;
    515	chan->remote_tx_win = chan->tx_win;
    516	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
    517	chan->sec_level = BT_SECURITY_LOW;
    518	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
    519	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
    520	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
    521
    522	chan->conf_state = 0;
    523	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
    524
    525	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
    526}
    527EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
    528
    529static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
    530{
    531	chan->sdu = NULL;
    532	chan->sdu_last_frag = NULL;
    533	chan->sdu_len = 0;
    534	chan->tx_credits = tx_credits;
    535	/* Derive MPS from connection MTU to stop HCI fragmentation */
    536	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
    537	/* Give enough credits for a full packet */
    538	chan->rx_credits = (chan->imtu / chan->mps) + 1;
    539
    540	skb_queue_head_init(&chan->tx_q);
    541}
    542
    543static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
    544{
    545	l2cap_le_flowctl_init(chan, tx_credits);
    546
    547	/* L2CAP implementations shall support a minimum MPS of 64 octets */
    548	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
    549		chan->mps = L2CAP_ECRED_MIN_MPS;
    550		chan->rx_credits = (chan->imtu / chan->mps) + 1;
    551	}
    552}
    553
    554void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
    555{
    556	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
    557	       __le16_to_cpu(chan->psm), chan->dcid);
    558
    559	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
    560
    561	chan->conn = conn;
    562
    563	switch (chan->chan_type) {
    564	case L2CAP_CHAN_CONN_ORIENTED:
    565		/* Alloc CID for connection-oriented socket */
    566		chan->scid = l2cap_alloc_cid(conn);
    567		if (conn->hcon->type == ACL_LINK)
    568			chan->omtu = L2CAP_DEFAULT_MTU;
    569		break;
    570
    571	case L2CAP_CHAN_CONN_LESS:
    572		/* Connectionless socket */
    573		chan->scid = L2CAP_CID_CONN_LESS;
    574		chan->dcid = L2CAP_CID_CONN_LESS;
    575		chan->omtu = L2CAP_DEFAULT_MTU;
    576		break;
    577
    578	case L2CAP_CHAN_FIXED:
    579		/* Caller will set CID and CID specific MTU values */
    580		break;
    581
    582	default:
    583		/* Raw socket can send/recv signalling messages only */
    584		chan->scid = L2CAP_CID_SIGNALING;
    585		chan->dcid = L2CAP_CID_SIGNALING;
    586		chan->omtu = L2CAP_DEFAULT_MTU;
    587	}
    588
    589	chan->local_id		= L2CAP_BESTEFFORT_ID;
    590	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
    591	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
    592	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
    593	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
    594	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
    595
    596	l2cap_chan_hold(chan);
    597
    598	/* Only keep a reference for fixed channels if they requested it */
    599	if (chan->chan_type != L2CAP_CHAN_FIXED ||
    600	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
    601		hci_conn_hold(conn->hcon);
    602
    603	list_add(&chan->list, &conn->chan_l);
    604}
    605
    606void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
    607{
    608	mutex_lock(&conn->chan_lock);
    609	__l2cap_chan_add(conn, chan);
    610	mutex_unlock(&conn->chan_lock);
    611}
    612
    613void l2cap_chan_del(struct l2cap_chan *chan, int err)
    614{
    615	struct l2cap_conn *conn = chan->conn;
    616
    617	__clear_chan_timer(chan);
    618
    619	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
    620	       state_to_string(chan->state));
    621
    622	chan->ops->teardown(chan, err);
    623
    624	if (conn) {
    625		struct amp_mgr *mgr = conn->hcon->amp_mgr;
    626		/* Delete from channel list */
    627		list_del(&chan->list);
    628
    629		l2cap_chan_put(chan);
    630
    631		chan->conn = NULL;
    632
    633		/* Reference was only held for non-fixed channels or
    634		 * fixed channels that explicitly requested it using the
    635		 * FLAG_HOLD_HCI_CONN flag.
    636		 */
    637		if (chan->chan_type != L2CAP_CHAN_FIXED ||
    638		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
    639			hci_conn_drop(conn->hcon);
    640
    641		if (mgr && mgr->bredr_chan == chan)
    642			mgr->bredr_chan = NULL;
    643	}
    644
    645	if (chan->hs_hchan) {
    646		struct hci_chan *hs_hchan = chan->hs_hchan;
    647
    648		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
    649		amp_disconnect_logical_link(hs_hchan);
    650	}
    651
    652	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
    653		return;
    654
    655	switch (chan->mode) {
    656	case L2CAP_MODE_BASIC:
    657		break;
    658
    659	case L2CAP_MODE_LE_FLOWCTL:
    660	case L2CAP_MODE_EXT_FLOWCTL:
    661		skb_queue_purge(&chan->tx_q);
    662		break;
    663
    664	case L2CAP_MODE_ERTM:
    665		__clear_retrans_timer(chan);
    666		__clear_monitor_timer(chan);
    667		__clear_ack_timer(chan);
    668
    669		skb_queue_purge(&chan->srej_q);
    670
    671		l2cap_seq_list_free(&chan->srej_list);
    672		l2cap_seq_list_free(&chan->retrans_list);
    673		fallthrough;
    674
    675	case L2CAP_MODE_STREAMING:
    676		skb_queue_purge(&chan->tx_q);
    677		break;
    678	}
    679}
    680EXPORT_SYMBOL_GPL(l2cap_chan_del);
    681
    682static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
    683			      void *data)
    684{
    685	struct l2cap_chan *chan;
    686
    687	list_for_each_entry(chan, &conn->chan_l, list) {
    688		func(chan, data);
    689	}
    690}
    691
    692void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
    693		     void *data)
    694{
    695	if (!conn)
    696		return;
    697
    698	mutex_lock(&conn->chan_lock);
    699	__l2cap_chan_list(conn, func, data);
    700	mutex_unlock(&conn->chan_lock);
    701}
    702
    703EXPORT_SYMBOL_GPL(l2cap_chan_list);
    704
    705static void l2cap_conn_update_id_addr(struct work_struct *work)
    706{
    707	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
    708					       id_addr_update_work);
    709	struct hci_conn *hcon = conn->hcon;
    710	struct l2cap_chan *chan;
    711
    712	mutex_lock(&conn->chan_lock);
    713
    714	list_for_each_entry(chan, &conn->chan_l, list) {
    715		l2cap_chan_lock(chan);
    716		bacpy(&chan->dst, &hcon->dst);
    717		chan->dst_type = bdaddr_dst_type(hcon);
    718		l2cap_chan_unlock(chan);
    719	}
    720
    721	mutex_unlock(&conn->chan_lock);
    722}
    723
    724static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
    725{
    726	struct l2cap_conn *conn = chan->conn;
    727	struct l2cap_le_conn_rsp rsp;
    728	u16 result;
    729
    730	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
    731		result = L2CAP_CR_LE_AUTHORIZATION;
    732	else
    733		result = L2CAP_CR_LE_BAD_PSM;
    734
    735	l2cap_state_change(chan, BT_DISCONN);
    736
    737	rsp.dcid    = cpu_to_le16(chan->scid);
    738	rsp.mtu     = cpu_to_le16(chan->imtu);
    739	rsp.mps     = cpu_to_le16(chan->mps);
    740	rsp.credits = cpu_to_le16(chan->rx_credits);
    741	rsp.result  = cpu_to_le16(result);
    742
    743	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
    744		       &rsp);
    745}
    746
    747static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
    748{
    749	struct l2cap_conn *conn = chan->conn;
    750	struct l2cap_ecred_conn_rsp rsp;
    751	u16 result;
    752
    753	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
    754		result = L2CAP_CR_LE_AUTHORIZATION;
    755	else
    756		result = L2CAP_CR_LE_BAD_PSM;
    757
    758	l2cap_state_change(chan, BT_DISCONN);
    759
    760	memset(&rsp, 0, sizeof(rsp));
    761
    762	rsp.result  = cpu_to_le16(result);
    763
    764	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
    765		       &rsp);
    766}
    767
    768static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
    769{
    770	struct l2cap_conn *conn = chan->conn;
    771	struct l2cap_conn_rsp rsp;
    772	u16 result;
    773
    774	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
    775		result = L2CAP_CR_SEC_BLOCK;
    776	else
    777		result = L2CAP_CR_BAD_PSM;
    778
    779	l2cap_state_change(chan, BT_DISCONN);
    780
    781	rsp.scid   = cpu_to_le16(chan->dcid);
    782	rsp.dcid   = cpu_to_le16(chan->scid);
    783	rsp.result = cpu_to_le16(result);
    784	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
    785
    786	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
    787}
    788
    789void l2cap_chan_close(struct l2cap_chan *chan, int reason)
    790{
    791	struct l2cap_conn *conn = chan->conn;
    792
    793	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
    794
    795	switch (chan->state) {
    796	case BT_LISTEN:
    797		chan->ops->teardown(chan, 0);
    798		break;
    799
    800	case BT_CONNECTED:
    801	case BT_CONFIG:
    802		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
    803			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
    804			l2cap_send_disconn_req(chan, reason);
    805		} else
    806			l2cap_chan_del(chan, reason);
    807		break;
    808
    809	case BT_CONNECT2:
    810		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
    811			if (conn->hcon->type == ACL_LINK)
    812				l2cap_chan_connect_reject(chan);
    813			else if (conn->hcon->type == LE_LINK) {
    814				switch (chan->mode) {
    815				case L2CAP_MODE_LE_FLOWCTL:
    816					l2cap_chan_le_connect_reject(chan);
    817					break;
    818				case L2CAP_MODE_EXT_FLOWCTL:
    819					l2cap_chan_ecred_connect_reject(chan);
    820					break;
    821				}
    822			}
    823		}
    824
    825		l2cap_chan_del(chan, reason);
    826		break;
    827
    828	case BT_CONNECT:
    829	case BT_DISCONN:
    830		l2cap_chan_del(chan, reason);
    831		break;
    832
    833	default:
    834		chan->ops->teardown(chan, 0);
    835		break;
    836	}
    837}
    838EXPORT_SYMBOL(l2cap_chan_close);
    839
    840static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
    841{
    842	switch (chan->chan_type) {
    843	case L2CAP_CHAN_RAW:
    844		switch (chan->sec_level) {
    845		case BT_SECURITY_HIGH:
    846		case BT_SECURITY_FIPS:
    847			return HCI_AT_DEDICATED_BONDING_MITM;
    848		case BT_SECURITY_MEDIUM:
    849			return HCI_AT_DEDICATED_BONDING;
    850		default:
    851			return HCI_AT_NO_BONDING;
    852		}
    853		break;
    854	case L2CAP_CHAN_CONN_LESS:
    855		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
    856			if (chan->sec_level == BT_SECURITY_LOW)
    857				chan->sec_level = BT_SECURITY_SDP;
    858		}
    859		if (chan->sec_level == BT_SECURITY_HIGH ||
    860		    chan->sec_level == BT_SECURITY_FIPS)
    861			return HCI_AT_NO_BONDING_MITM;
    862		else
    863			return HCI_AT_NO_BONDING;
    864		break;
    865	case L2CAP_CHAN_CONN_ORIENTED:
    866		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
    867			if (chan->sec_level == BT_SECURITY_LOW)
    868				chan->sec_level = BT_SECURITY_SDP;
    869
    870			if (chan->sec_level == BT_SECURITY_HIGH ||
    871			    chan->sec_level == BT_SECURITY_FIPS)
    872				return HCI_AT_NO_BONDING_MITM;
    873			else
    874				return HCI_AT_NO_BONDING;
    875		}
    876		fallthrough;
    877
    878	default:
    879		switch (chan->sec_level) {
    880		case BT_SECURITY_HIGH:
    881		case BT_SECURITY_FIPS:
    882			return HCI_AT_GENERAL_BONDING_MITM;
    883		case BT_SECURITY_MEDIUM:
    884			return HCI_AT_GENERAL_BONDING;
    885		default:
    886			return HCI_AT_NO_BONDING;
    887		}
    888		break;
    889	}
    890}
    891
    892/* Service level security */
    893int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
    894{
    895	struct l2cap_conn *conn = chan->conn;
    896	__u8 auth_type;
    897
    898	if (conn->hcon->type == LE_LINK)
    899		return smp_conn_security(conn->hcon, chan->sec_level);
    900
    901	auth_type = l2cap_get_auth_type(chan);
    902
    903	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
    904				 initiator);
    905}
    906
    907static u8 l2cap_get_ident(struct l2cap_conn *conn)
    908{
    909	u8 id;
    910
    911	/* Get next available identificator.
    912	 *    1 - 128 are used by kernel.
    913	 *  129 - 199 are reserved.
    914	 *  200 - 254 are used by utilities like l2ping, etc.
    915	 */
    916
    917	mutex_lock(&conn->ident_lock);
    918
    919	if (++conn->tx_ident > 128)
    920		conn->tx_ident = 1;
    921
    922	id = conn->tx_ident;
    923
    924	mutex_unlock(&conn->ident_lock);
    925
    926	return id;
    927}
    928
    929static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
    930			   void *data)
    931{
    932	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
    933	u8 flags;
    934
    935	BT_DBG("code 0x%2.2x", code);
    936
    937	if (!skb)
    938		return;
    939
    940	/* Use NO_FLUSH if supported or we have an LE link (which does
    941	 * not support auto-flushing packets) */
    942	if (lmp_no_flush_capable(conn->hcon->hdev) ||
    943	    conn->hcon->type == LE_LINK)
    944		flags = ACL_START_NO_FLUSH;
    945	else
    946		flags = ACL_START;
    947
    948	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
    949	skb->priority = HCI_PRIO_MAX;
    950
    951	hci_send_acl(conn->hchan, skb, flags);
    952}
    953
    954static bool __chan_is_moving(struct l2cap_chan *chan)
    955{
    956	return chan->move_state != L2CAP_MOVE_STABLE &&
    957	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
    958}
    959
    960static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
    961{
    962	struct hci_conn *hcon = chan->conn->hcon;
    963	u16 flags;
    964
    965	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
    966	       skb->priority);
    967
    968	if (chan->hs_hcon && !__chan_is_moving(chan)) {
    969		if (chan->hs_hchan)
    970			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
    971		else
    972			kfree_skb(skb);
    973
    974		return;
    975	}
    976
    977	/* Use NO_FLUSH for LE links (where this is the only option) or
    978	 * if the BR/EDR link supports it and flushing has not been
    979	 * explicitly requested (through FLAG_FLUSHABLE).
    980	 */
    981	if (hcon->type == LE_LINK ||
    982	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
    983	     lmp_no_flush_capable(hcon->hdev)))
    984		flags = ACL_START_NO_FLUSH;
    985	else
    986		flags = ACL_START;
    987
    988	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
    989	hci_send_acl(chan->conn->hchan, skb, flags);
    990}
    991
    992static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
    993{
    994	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
    995	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
    996
    997	if (enh & L2CAP_CTRL_FRAME_TYPE) {
    998		/* S-Frame */
    999		control->sframe = 1;
   1000		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
   1001		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
   1002
   1003		control->sar = 0;
   1004		control->txseq = 0;
   1005	} else {
   1006		/* I-Frame */
   1007		control->sframe = 0;
   1008		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
   1009		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
   1010
   1011		control->poll = 0;
   1012		control->super = 0;
   1013	}
   1014}
   1015
   1016static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
   1017{
   1018	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
   1019	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
   1020
   1021	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
   1022		/* S-Frame */
   1023		control->sframe = 1;
   1024		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
   1025		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
   1026
   1027		control->sar = 0;
   1028		control->txseq = 0;
   1029	} else {
   1030		/* I-Frame */
   1031		control->sframe = 0;
   1032		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
   1033		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
   1034
   1035		control->poll = 0;
   1036		control->super = 0;
   1037	}
   1038}
   1039
   1040static inline void __unpack_control(struct l2cap_chan *chan,
   1041				    struct sk_buff *skb)
   1042{
   1043	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
   1044		__unpack_extended_control(get_unaligned_le32(skb->data),
   1045					  &bt_cb(skb)->l2cap);
   1046		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
   1047	} else {
   1048		__unpack_enhanced_control(get_unaligned_le16(skb->data),
   1049					  &bt_cb(skb)->l2cap);
   1050		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
   1051	}
   1052}
   1053
   1054static u32 __pack_extended_control(struct l2cap_ctrl *control)
   1055{
   1056	u32 packed;
   1057
   1058	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
   1059	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
   1060
   1061	if (control->sframe) {
   1062		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
   1063		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
   1064		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
   1065	} else {
   1066		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
   1067		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
   1068	}
   1069
   1070	return packed;
   1071}
   1072
   1073static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
   1074{
   1075	u16 packed;
   1076
   1077	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
   1078	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
   1079
   1080	if (control->sframe) {
   1081		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
   1082		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
   1083		packed |= L2CAP_CTRL_FRAME_TYPE;
   1084	} else {
   1085		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
   1086		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
   1087	}
   1088
   1089	return packed;
   1090}
   1091
   1092static inline void __pack_control(struct l2cap_chan *chan,
   1093				  struct l2cap_ctrl *control,
   1094				  struct sk_buff *skb)
   1095{
   1096	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
   1097		put_unaligned_le32(__pack_extended_control(control),
   1098				   skb->data + L2CAP_HDR_SIZE);
   1099	} else {
   1100		put_unaligned_le16(__pack_enhanced_control(control),
   1101				   skb->data + L2CAP_HDR_SIZE);
   1102	}
   1103}
   1104
   1105static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
   1106{
   1107	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   1108		return L2CAP_EXT_HDR_SIZE;
   1109	else
   1110		return L2CAP_ENH_HDR_SIZE;
   1111}
   1112
   1113static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
   1114					       u32 control)
   1115{
   1116	struct sk_buff *skb;
   1117	struct l2cap_hdr *lh;
   1118	int hlen = __ertm_hdr_size(chan);
   1119
   1120	if (chan->fcs == L2CAP_FCS_CRC16)
   1121		hlen += L2CAP_FCS_SIZE;
   1122
   1123	skb = bt_skb_alloc(hlen, GFP_KERNEL);
   1124
   1125	if (!skb)
   1126		return ERR_PTR(-ENOMEM);
   1127
   1128	lh = skb_put(skb, L2CAP_HDR_SIZE);
   1129	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
   1130	lh->cid = cpu_to_le16(chan->dcid);
   1131
   1132	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   1133		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
   1134	else
   1135		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
   1136
   1137	if (chan->fcs == L2CAP_FCS_CRC16) {
   1138		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
   1139		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
   1140	}
   1141
   1142	skb->priority = HCI_PRIO_MAX;
   1143	return skb;
   1144}
   1145
   1146static void l2cap_send_sframe(struct l2cap_chan *chan,
   1147			      struct l2cap_ctrl *control)
   1148{
   1149	struct sk_buff *skb;
   1150	u32 control_field;
   1151
   1152	BT_DBG("chan %p, control %p", chan, control);
   1153
   1154	if (!control->sframe)
   1155		return;
   1156
   1157	if (__chan_is_moving(chan))
   1158		return;
   1159
   1160	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
   1161	    !control->poll)
   1162		control->final = 1;
   1163
   1164	if (control->super == L2CAP_SUPER_RR)
   1165		clear_bit(CONN_RNR_SENT, &chan->conn_state);
   1166	else if (control->super == L2CAP_SUPER_RNR)
   1167		set_bit(CONN_RNR_SENT, &chan->conn_state);
   1168
   1169	if (control->super != L2CAP_SUPER_SREJ) {
   1170		chan->last_acked_seq = control->reqseq;
   1171		__clear_ack_timer(chan);
   1172	}
   1173
   1174	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
   1175	       control->final, control->poll, control->super);
   1176
   1177	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   1178		control_field = __pack_extended_control(control);
   1179	else
   1180		control_field = __pack_enhanced_control(control);
   1181
   1182	skb = l2cap_create_sframe_pdu(chan, control_field);
   1183	if (!IS_ERR(skb))
   1184		l2cap_do_send(chan, skb);
   1185}
   1186
   1187static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
   1188{
   1189	struct l2cap_ctrl control;
   1190
   1191	BT_DBG("chan %p, poll %d", chan, poll);
   1192
   1193	memset(&control, 0, sizeof(control));
   1194	control.sframe = 1;
   1195	control.poll = poll;
   1196
   1197	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
   1198		control.super = L2CAP_SUPER_RNR;
   1199	else
   1200		control.super = L2CAP_SUPER_RR;
   1201
   1202	control.reqseq = chan->buffer_seq;
   1203	l2cap_send_sframe(chan, &control);
   1204}
   1205
   1206static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
   1207{
   1208	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
   1209		return true;
   1210
   1211	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
   1212}
   1213
   1214static bool __amp_capable(struct l2cap_chan *chan)
   1215{
   1216	struct l2cap_conn *conn = chan->conn;
   1217	struct hci_dev *hdev;
   1218	bool amp_available = false;
   1219
   1220	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
   1221		return false;
   1222
   1223	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
   1224		return false;
   1225
   1226	read_lock(&hci_dev_list_lock);
   1227	list_for_each_entry(hdev, &hci_dev_list, list) {
   1228		if (hdev->amp_type != AMP_TYPE_BREDR &&
   1229		    test_bit(HCI_UP, &hdev->flags)) {
   1230			amp_available = true;
   1231			break;
   1232		}
   1233	}
   1234	read_unlock(&hci_dev_list_lock);
   1235
   1236	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
   1237		return amp_available;
   1238
   1239	return false;
   1240}
   1241
   1242static bool l2cap_check_efs(struct l2cap_chan *chan)
   1243{
   1244	/* Check EFS parameters */
   1245	return true;
   1246}
   1247
   1248void l2cap_send_conn_req(struct l2cap_chan *chan)
   1249{
   1250	struct l2cap_conn *conn = chan->conn;
   1251	struct l2cap_conn_req req;
   1252
   1253	req.scid = cpu_to_le16(chan->scid);
   1254	req.psm  = chan->psm;
   1255
   1256	chan->ident = l2cap_get_ident(conn);
   1257
   1258	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
   1259
   1260	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
   1261}
   1262
   1263static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
   1264{
   1265	struct l2cap_create_chan_req req;
   1266	req.scid = cpu_to_le16(chan->scid);
   1267	req.psm  = chan->psm;
   1268	req.amp_id = amp_id;
   1269
   1270	chan->ident = l2cap_get_ident(chan->conn);
   1271
   1272	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
   1273		       sizeof(req), &req);
   1274}
   1275
   1276static void l2cap_move_setup(struct l2cap_chan *chan)
   1277{
   1278	struct sk_buff *skb;
   1279
   1280	BT_DBG("chan %p", chan);
   1281
   1282	if (chan->mode != L2CAP_MODE_ERTM)
   1283		return;
   1284
   1285	__clear_retrans_timer(chan);
   1286	__clear_monitor_timer(chan);
   1287	__clear_ack_timer(chan);
   1288
   1289	chan->retry_count = 0;
   1290	skb_queue_walk(&chan->tx_q, skb) {
   1291		if (bt_cb(skb)->l2cap.retries)
   1292			bt_cb(skb)->l2cap.retries = 1;
   1293		else
   1294			break;
   1295	}
   1296
   1297	chan->expected_tx_seq = chan->buffer_seq;
   1298
   1299	clear_bit(CONN_REJ_ACT, &chan->conn_state);
   1300	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
   1301	l2cap_seq_list_clear(&chan->retrans_list);
   1302	l2cap_seq_list_clear(&chan->srej_list);
   1303	skb_queue_purge(&chan->srej_q);
   1304
   1305	chan->tx_state = L2CAP_TX_STATE_XMIT;
   1306	chan->rx_state = L2CAP_RX_STATE_MOVE;
   1307
   1308	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   1309}
   1310
   1311static void l2cap_move_done(struct l2cap_chan *chan)
   1312{
   1313	u8 move_role = chan->move_role;
   1314	BT_DBG("chan %p", chan);
   1315
   1316	chan->move_state = L2CAP_MOVE_STABLE;
   1317	chan->move_role = L2CAP_MOVE_ROLE_NONE;
   1318
   1319	if (chan->mode != L2CAP_MODE_ERTM)
   1320		return;
   1321
   1322	switch (move_role) {
   1323	case L2CAP_MOVE_ROLE_INITIATOR:
   1324		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
   1325		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
   1326		break;
   1327	case L2CAP_MOVE_ROLE_RESPONDER:
   1328		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
   1329		break;
   1330	}
   1331}
   1332
   1333static void l2cap_chan_ready(struct l2cap_chan *chan)
   1334{
   1335	/* The channel may have already been flagged as connected in
   1336	 * case of receiving data before the L2CAP info req/rsp
   1337	 * procedure is complete.
   1338	 */
   1339	if (chan->state == BT_CONNECTED)
   1340		return;
   1341
   1342	/* This clears all conf flags, including CONF_NOT_COMPLETE */
   1343	chan->conf_state = 0;
   1344	__clear_chan_timer(chan);
   1345
   1346	switch (chan->mode) {
   1347	case L2CAP_MODE_LE_FLOWCTL:
   1348	case L2CAP_MODE_EXT_FLOWCTL:
   1349		if (!chan->tx_credits)
   1350			chan->ops->suspend(chan);
   1351		break;
   1352	}
   1353
   1354	chan->state = BT_CONNECTED;
   1355
   1356	chan->ops->ready(chan);
   1357}
   1358
   1359static void l2cap_le_connect(struct l2cap_chan *chan)
   1360{
   1361	struct l2cap_conn *conn = chan->conn;
   1362	struct l2cap_le_conn_req req;
   1363
   1364	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
   1365		return;
   1366
   1367	if (!chan->imtu)
   1368		chan->imtu = chan->conn->mtu;
   1369
   1370	l2cap_le_flowctl_init(chan, 0);
   1371
   1372	req.psm     = chan->psm;
   1373	req.scid    = cpu_to_le16(chan->scid);
   1374	req.mtu     = cpu_to_le16(chan->imtu);
   1375	req.mps     = cpu_to_le16(chan->mps);
   1376	req.credits = cpu_to_le16(chan->rx_credits);
   1377
   1378	chan->ident = l2cap_get_ident(conn);
   1379
   1380	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
   1381		       sizeof(req), &req);
   1382}
   1383
   1384struct l2cap_ecred_conn_data {
   1385	struct {
   1386		struct l2cap_ecred_conn_req req;
   1387		__le16 scid[5];
   1388	} __packed pdu;
   1389	struct l2cap_chan *chan;
   1390	struct pid *pid;
   1391	int count;
   1392};
   1393
   1394static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
   1395{
   1396	struct l2cap_ecred_conn_data *conn = data;
   1397	struct pid *pid;
   1398
   1399	if (chan == conn->chan)
   1400		return;
   1401
   1402	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
   1403		return;
   1404
   1405	pid = chan->ops->get_peer_pid(chan);
   1406
   1407	/* Only add deferred channels with the same PID/PSM */
   1408	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
   1409	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
   1410		return;
   1411
   1412	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
   1413		return;
   1414
   1415	l2cap_ecred_init(chan, 0);
   1416
   1417	/* Set the same ident so we can match on the rsp */
   1418	chan->ident = conn->chan->ident;
   1419
   1420	/* Include all channels deferred */
   1421	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
   1422
   1423	conn->count++;
   1424}
   1425
   1426static void l2cap_ecred_connect(struct l2cap_chan *chan)
   1427{
   1428	struct l2cap_conn *conn = chan->conn;
   1429	struct l2cap_ecred_conn_data data;
   1430
   1431	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
   1432		return;
   1433
   1434	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
   1435		return;
   1436
   1437	l2cap_ecred_init(chan, 0);
   1438
   1439	memset(&data, 0, sizeof(data));
   1440	data.pdu.req.psm     = chan->psm;
   1441	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
   1442	data.pdu.req.mps     = cpu_to_le16(chan->mps);
   1443	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
   1444	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
   1445
   1446	chan->ident = l2cap_get_ident(conn);
   1447
   1448	data.count = 1;
   1449	data.chan = chan;
   1450	data.pid = chan->ops->get_peer_pid(chan);
   1451
   1452	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
   1453
   1454	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
   1455		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
   1456		       &data.pdu);
   1457}
   1458
   1459static void l2cap_le_start(struct l2cap_chan *chan)
   1460{
   1461	struct l2cap_conn *conn = chan->conn;
   1462
   1463	if (!smp_conn_security(conn->hcon, chan->sec_level))
   1464		return;
   1465
   1466	if (!chan->psm) {
   1467		l2cap_chan_ready(chan);
   1468		return;
   1469	}
   1470
   1471	if (chan->state == BT_CONNECT) {
   1472		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
   1473			l2cap_ecred_connect(chan);
   1474		else
   1475			l2cap_le_connect(chan);
   1476	}
   1477}
   1478
   1479static void l2cap_start_connection(struct l2cap_chan *chan)
   1480{
   1481	if (__amp_capable(chan)) {
   1482		BT_DBG("chan %p AMP capable: discover AMPs", chan);
   1483		a2mp_discover_amp(chan);
   1484	} else if (chan->conn->hcon->type == LE_LINK) {
   1485		l2cap_le_start(chan);
   1486	} else {
   1487		l2cap_send_conn_req(chan);
   1488	}
   1489}
   1490
   1491static void l2cap_request_info(struct l2cap_conn *conn)
   1492{
   1493	struct l2cap_info_req req;
   1494
   1495	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
   1496		return;
   1497
   1498	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
   1499
   1500	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
   1501	conn->info_ident = l2cap_get_ident(conn);
   1502
   1503	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
   1504
   1505	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
   1506		       sizeof(req), &req);
   1507}
   1508
   1509static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
   1510{
   1511	/* The minimum encryption key size needs to be enforced by the
   1512	 * host stack before establishing any L2CAP connections. The
   1513	 * specification in theory allows a minimum of 1, but to align
   1514	 * BR/EDR and LE transports, a minimum of 7 is chosen.
   1515	 *
   1516	 * This check might also be called for unencrypted connections
   1517	 * that have no key size requirements. Ensure that the link is
   1518	 * actually encrypted before enforcing a key size.
   1519	 */
   1520	int min_key_size = hcon->hdev->min_enc_key_size;
   1521
   1522	/* On FIPS security level, key size must be 16 bytes */
   1523	if (hcon->sec_level == BT_SECURITY_FIPS)
   1524		min_key_size = 16;
   1525
   1526	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
   1527		hcon->enc_key_size >= min_key_size);
   1528}
   1529
   1530static void l2cap_do_start(struct l2cap_chan *chan)
   1531{
   1532	struct l2cap_conn *conn = chan->conn;
   1533
   1534	if (conn->hcon->type == LE_LINK) {
   1535		l2cap_le_start(chan);
   1536		return;
   1537	}
   1538
   1539	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
   1540		l2cap_request_info(conn);
   1541		return;
   1542	}
   1543
   1544	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
   1545		return;
   1546
   1547	if (!l2cap_chan_check_security(chan, true) ||
   1548	    !__l2cap_no_conn_pending(chan))
   1549		return;
   1550
   1551	if (l2cap_check_enc_key_size(conn->hcon))
   1552		l2cap_start_connection(chan);
   1553	else
   1554		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
   1555}
   1556
   1557static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
   1558{
   1559	u32 local_feat_mask = l2cap_feat_mask;
   1560	if (!disable_ertm)
   1561		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
   1562
   1563	switch (mode) {
   1564	case L2CAP_MODE_ERTM:
   1565		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
   1566	case L2CAP_MODE_STREAMING:
   1567		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
   1568	default:
   1569		return 0x00;
   1570	}
   1571}
   1572
   1573static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
   1574{
   1575	struct l2cap_conn *conn = chan->conn;
   1576	struct l2cap_disconn_req req;
   1577
   1578	if (!conn)
   1579		return;
   1580
   1581	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
   1582		__clear_retrans_timer(chan);
   1583		__clear_monitor_timer(chan);
   1584		__clear_ack_timer(chan);
   1585	}
   1586
   1587	if (chan->scid == L2CAP_CID_A2MP) {
   1588		l2cap_state_change(chan, BT_DISCONN);
   1589		return;
   1590	}
   1591
   1592	req.dcid = cpu_to_le16(chan->dcid);
   1593	req.scid = cpu_to_le16(chan->scid);
   1594	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
   1595		       sizeof(req), &req);
   1596
   1597	l2cap_state_change_and_error(chan, BT_DISCONN, err);
   1598}
   1599
   1600/* ---- L2CAP connections ---- */
   1601static void l2cap_conn_start(struct l2cap_conn *conn)
   1602{
   1603	struct l2cap_chan *chan, *tmp;
   1604
   1605	BT_DBG("conn %p", conn);
   1606
   1607	mutex_lock(&conn->chan_lock);
   1608
   1609	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
   1610		l2cap_chan_lock(chan);
   1611
   1612		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
   1613			l2cap_chan_ready(chan);
   1614			l2cap_chan_unlock(chan);
   1615			continue;
   1616		}
   1617
   1618		if (chan->state == BT_CONNECT) {
   1619			if (!l2cap_chan_check_security(chan, true) ||
   1620			    !__l2cap_no_conn_pending(chan)) {
   1621				l2cap_chan_unlock(chan);
   1622				continue;
   1623			}
   1624
   1625			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
   1626			    && test_bit(CONF_STATE2_DEVICE,
   1627					&chan->conf_state)) {
   1628				l2cap_chan_close(chan, ECONNRESET);
   1629				l2cap_chan_unlock(chan);
   1630				continue;
   1631			}
   1632
   1633			if (l2cap_check_enc_key_size(conn->hcon))
   1634				l2cap_start_connection(chan);
   1635			else
   1636				l2cap_chan_close(chan, ECONNREFUSED);
   1637
   1638		} else if (chan->state == BT_CONNECT2) {
   1639			struct l2cap_conn_rsp rsp;
   1640			char buf[128];
   1641			rsp.scid = cpu_to_le16(chan->dcid);
   1642			rsp.dcid = cpu_to_le16(chan->scid);
   1643
   1644			if (l2cap_chan_check_security(chan, false)) {
   1645				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
   1646					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
   1647					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
   1648					chan->ops->defer(chan);
   1649
   1650				} else {
   1651					l2cap_state_change(chan, BT_CONFIG);
   1652					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
   1653					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
   1654				}
   1655			} else {
   1656				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
   1657				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
   1658			}
   1659
   1660			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
   1661				       sizeof(rsp), &rsp);
   1662
   1663			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
   1664			    rsp.result != L2CAP_CR_SUCCESS) {
   1665				l2cap_chan_unlock(chan);
   1666				continue;
   1667			}
   1668
   1669			set_bit(CONF_REQ_SENT, &chan->conf_state);
   1670			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
   1671				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
   1672			chan->num_conf_req++;
   1673		}
   1674
   1675		l2cap_chan_unlock(chan);
   1676	}
   1677
   1678	mutex_unlock(&conn->chan_lock);
   1679}
   1680
   1681static void l2cap_le_conn_ready(struct l2cap_conn *conn)
   1682{
   1683	struct hci_conn *hcon = conn->hcon;
   1684	struct hci_dev *hdev = hcon->hdev;
   1685
   1686	BT_DBG("%s conn %p", hdev->name, conn);
   1687
   1688	/* For outgoing pairing which doesn't necessarily have an
   1689	 * associated socket (e.g. mgmt_pair_device).
   1690	 */
   1691	if (hcon->out)
   1692		smp_conn_security(hcon, hcon->pending_sec_level);
   1693
   1694	/* For LE peripheral connections, make sure the connection interval
   1695	 * is in the range of the minimum and maximum interval that has
   1696	 * been configured for this connection. If not, then trigger
   1697	 * the connection update procedure.
   1698	 */
   1699	if (hcon->role == HCI_ROLE_SLAVE &&
   1700	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
   1701	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
   1702		struct l2cap_conn_param_update_req req;
   1703
   1704		req.min = cpu_to_le16(hcon->le_conn_min_interval);
   1705		req.max = cpu_to_le16(hcon->le_conn_max_interval);
   1706		req.latency = cpu_to_le16(hcon->le_conn_latency);
   1707		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
   1708
   1709		l2cap_send_cmd(conn, l2cap_get_ident(conn),
   1710			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
   1711	}
   1712}
   1713
   1714static void l2cap_conn_ready(struct l2cap_conn *conn)
   1715{
   1716	struct l2cap_chan *chan;
   1717	struct hci_conn *hcon = conn->hcon;
   1718
   1719	BT_DBG("conn %p", conn);
   1720
   1721	if (hcon->type == ACL_LINK)
   1722		l2cap_request_info(conn);
   1723
   1724	mutex_lock(&conn->chan_lock);
   1725
   1726	list_for_each_entry(chan, &conn->chan_l, list) {
   1727
   1728		l2cap_chan_lock(chan);
   1729
   1730		if (chan->scid == L2CAP_CID_A2MP) {
   1731			l2cap_chan_unlock(chan);
   1732			continue;
   1733		}
   1734
   1735		if (hcon->type == LE_LINK) {
   1736			l2cap_le_start(chan);
   1737		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
   1738			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
   1739				l2cap_chan_ready(chan);
   1740		} else if (chan->state == BT_CONNECT) {
   1741			l2cap_do_start(chan);
   1742		}
   1743
   1744		l2cap_chan_unlock(chan);
   1745	}
   1746
   1747	mutex_unlock(&conn->chan_lock);
   1748
   1749	if (hcon->type == LE_LINK)
   1750		l2cap_le_conn_ready(conn);
   1751
   1752	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
   1753}
   1754
   1755/* Notify sockets that we cannot guaranty reliability anymore */
   1756static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
   1757{
   1758	struct l2cap_chan *chan;
   1759
   1760	BT_DBG("conn %p", conn);
   1761
   1762	mutex_lock(&conn->chan_lock);
   1763
   1764	list_for_each_entry(chan, &conn->chan_l, list) {
   1765		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
   1766			l2cap_chan_set_err(chan, err);
   1767	}
   1768
   1769	mutex_unlock(&conn->chan_lock);
   1770}
   1771
   1772static void l2cap_info_timeout(struct work_struct *work)
   1773{
   1774	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
   1775					       info_timer.work);
   1776
   1777	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
   1778	conn->info_ident = 0;
   1779
   1780	l2cap_conn_start(conn);
   1781}
   1782
   1783/*
   1784 * l2cap_user
   1785 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
   1786 * callback is called during registration. The ->remove callback is called
   1787 * during unregistration.
   1788 * An l2cap_user object can either be explicitly unregistered or when the
   1789 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
   1790 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
   1791 * External modules must own a reference to the l2cap_conn object if they intend
   1792 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
   1793 * any time if they don't.
   1794 */
   1795
   1796int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
   1797{
   1798	struct hci_dev *hdev = conn->hcon->hdev;
   1799	int ret;
   1800
   1801	/* We need to check whether l2cap_conn is registered. If it is not, we
   1802	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
   1803	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
   1804	 * relies on the parent hci_conn object to be locked. This itself relies
   1805	 * on the hci_dev object to be locked. So we must lock the hci device
   1806	 * here, too. */
   1807
   1808	hci_dev_lock(hdev);
   1809
   1810	if (!list_empty(&user->list)) {
   1811		ret = -EINVAL;
   1812		goto out_unlock;
   1813	}
   1814
   1815	/* conn->hchan is NULL after l2cap_conn_del() was called */
   1816	if (!conn->hchan) {
   1817		ret = -ENODEV;
   1818		goto out_unlock;
   1819	}
   1820
   1821	ret = user->probe(conn, user);
   1822	if (ret)
   1823		goto out_unlock;
   1824
   1825	list_add(&user->list, &conn->users);
   1826	ret = 0;
   1827
   1828out_unlock:
   1829	hci_dev_unlock(hdev);
   1830	return ret;
   1831}
   1832EXPORT_SYMBOL(l2cap_register_user);
   1833
   1834void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
   1835{
   1836	struct hci_dev *hdev = conn->hcon->hdev;
   1837
   1838	hci_dev_lock(hdev);
   1839
   1840	if (list_empty(&user->list))
   1841		goto out_unlock;
   1842
   1843	list_del_init(&user->list);
   1844	user->remove(conn, user);
   1845
   1846out_unlock:
   1847	hci_dev_unlock(hdev);
   1848}
   1849EXPORT_SYMBOL(l2cap_unregister_user);
   1850
   1851static void l2cap_unregister_all_users(struct l2cap_conn *conn)
   1852{
   1853	struct l2cap_user *user;
   1854
   1855	while (!list_empty(&conn->users)) {
   1856		user = list_first_entry(&conn->users, struct l2cap_user, list);
   1857		list_del_init(&user->list);
   1858		user->remove(conn, user);
   1859	}
   1860}
   1861
   1862static void l2cap_conn_del(struct hci_conn *hcon, int err)
   1863{
   1864	struct l2cap_conn *conn = hcon->l2cap_data;
   1865	struct l2cap_chan *chan, *l;
   1866
   1867	if (!conn)
   1868		return;
   1869
   1870	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
   1871
   1872	kfree_skb(conn->rx_skb);
   1873
   1874	skb_queue_purge(&conn->pending_rx);
   1875
   1876	/* We can not call flush_work(&conn->pending_rx_work) here since we
   1877	 * might block if we are running on a worker from the same workqueue
   1878	 * pending_rx_work is waiting on.
   1879	 */
   1880	if (work_pending(&conn->pending_rx_work))
   1881		cancel_work_sync(&conn->pending_rx_work);
   1882
   1883	if (work_pending(&conn->id_addr_update_work))
   1884		cancel_work_sync(&conn->id_addr_update_work);
   1885
   1886	l2cap_unregister_all_users(conn);
   1887
   1888	/* Force the connection to be immediately dropped */
   1889	hcon->disc_timeout = 0;
   1890
   1891	mutex_lock(&conn->chan_lock);
   1892
   1893	/* Kill channels */
   1894	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
   1895		l2cap_chan_hold(chan);
   1896		l2cap_chan_lock(chan);
   1897
   1898		l2cap_chan_del(chan, err);
   1899
   1900		chan->ops->close(chan);
   1901
   1902		l2cap_chan_unlock(chan);
   1903		l2cap_chan_put(chan);
   1904	}
   1905
   1906	mutex_unlock(&conn->chan_lock);
   1907
   1908	hci_chan_del(conn->hchan);
   1909
   1910	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
   1911		cancel_delayed_work_sync(&conn->info_timer);
   1912
   1913	hcon->l2cap_data = NULL;
   1914	conn->hchan = NULL;
   1915	l2cap_conn_put(conn);
   1916}
   1917
   1918static void l2cap_conn_free(struct kref *ref)
   1919{
   1920	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
   1921
   1922	hci_conn_put(conn->hcon);
   1923	kfree(conn);
   1924}
   1925
   1926struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
   1927{
   1928	kref_get(&conn->ref);
   1929	return conn;
   1930}
   1931EXPORT_SYMBOL(l2cap_conn_get);
   1932
   1933void l2cap_conn_put(struct l2cap_conn *conn)
   1934{
   1935	kref_put(&conn->ref, l2cap_conn_free);
   1936}
   1937EXPORT_SYMBOL(l2cap_conn_put);
   1938
   1939/* ---- Socket interface ---- */
   1940
   1941/* Find socket with psm and source / destination bdaddr.
   1942 * Returns closest match.
   1943 */
   1944static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
   1945						   bdaddr_t *src,
   1946						   bdaddr_t *dst,
   1947						   u8 link_type)
   1948{
   1949	struct l2cap_chan *c, *c1 = NULL;
   1950
   1951	read_lock(&chan_list_lock);
   1952
   1953	list_for_each_entry(c, &chan_list, global_l) {
   1954		if (state && c->state != state)
   1955			continue;
   1956
   1957		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
   1958			continue;
   1959
   1960		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
   1961			continue;
   1962
   1963		if (c->psm == psm) {
   1964			int src_match, dst_match;
   1965			int src_any, dst_any;
   1966
   1967			/* Exact match. */
   1968			src_match = !bacmp(&c->src, src);
   1969			dst_match = !bacmp(&c->dst, dst);
   1970			if (src_match && dst_match) {
   1971				l2cap_chan_hold(c);
   1972				read_unlock(&chan_list_lock);
   1973				return c;
   1974			}
   1975
   1976			/* Closest match */
   1977			src_any = !bacmp(&c->src, BDADDR_ANY);
   1978			dst_any = !bacmp(&c->dst, BDADDR_ANY);
   1979			if ((src_match && dst_any) || (src_any && dst_match) ||
   1980			    (src_any && dst_any))
   1981				c1 = c;
   1982		}
   1983	}
   1984
   1985	if (c1)
   1986		l2cap_chan_hold(c1);
   1987
   1988	read_unlock(&chan_list_lock);
   1989
   1990	return c1;
   1991}
   1992
   1993static void l2cap_monitor_timeout(struct work_struct *work)
   1994{
   1995	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
   1996					       monitor_timer.work);
   1997
   1998	BT_DBG("chan %p", chan);
   1999
   2000	l2cap_chan_lock(chan);
   2001
   2002	if (!chan->conn) {
   2003		l2cap_chan_unlock(chan);
   2004		l2cap_chan_put(chan);
   2005		return;
   2006	}
   2007
   2008	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
   2009
   2010	l2cap_chan_unlock(chan);
   2011	l2cap_chan_put(chan);
   2012}
   2013
   2014static void l2cap_retrans_timeout(struct work_struct *work)
   2015{
   2016	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
   2017					       retrans_timer.work);
   2018
   2019	BT_DBG("chan %p", chan);
   2020
   2021	l2cap_chan_lock(chan);
   2022
   2023	if (!chan->conn) {
   2024		l2cap_chan_unlock(chan);
   2025		l2cap_chan_put(chan);
   2026		return;
   2027	}
   2028
   2029	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
   2030	l2cap_chan_unlock(chan);
   2031	l2cap_chan_put(chan);
   2032}
   2033
   2034static void l2cap_streaming_send(struct l2cap_chan *chan,
   2035				 struct sk_buff_head *skbs)
   2036{
   2037	struct sk_buff *skb;
   2038	struct l2cap_ctrl *control;
   2039
   2040	BT_DBG("chan %p, skbs %p", chan, skbs);
   2041
   2042	if (__chan_is_moving(chan))
   2043		return;
   2044
   2045	skb_queue_splice_tail_init(skbs, &chan->tx_q);
   2046
   2047	while (!skb_queue_empty(&chan->tx_q)) {
   2048
   2049		skb = skb_dequeue(&chan->tx_q);
   2050
   2051		bt_cb(skb)->l2cap.retries = 1;
   2052		control = &bt_cb(skb)->l2cap;
   2053
   2054		control->reqseq = 0;
   2055		control->txseq = chan->next_tx_seq;
   2056
   2057		__pack_control(chan, control, skb);
   2058
   2059		if (chan->fcs == L2CAP_FCS_CRC16) {
   2060			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
   2061			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
   2062		}
   2063
   2064		l2cap_do_send(chan, skb);
   2065
   2066		BT_DBG("Sent txseq %u", control->txseq);
   2067
   2068		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
   2069		chan->frames_sent++;
   2070	}
   2071}
   2072
   2073static int l2cap_ertm_send(struct l2cap_chan *chan)
   2074{
   2075	struct sk_buff *skb, *tx_skb;
   2076	struct l2cap_ctrl *control;
   2077	int sent = 0;
   2078
   2079	BT_DBG("chan %p", chan);
   2080
   2081	if (chan->state != BT_CONNECTED)
   2082		return -ENOTCONN;
   2083
   2084	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
   2085		return 0;
   2086
   2087	if (__chan_is_moving(chan))
   2088		return 0;
   2089
   2090	while (chan->tx_send_head &&
   2091	       chan->unacked_frames < chan->remote_tx_win &&
   2092	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
   2093
   2094		skb = chan->tx_send_head;
   2095
   2096		bt_cb(skb)->l2cap.retries = 1;
   2097		control = &bt_cb(skb)->l2cap;
   2098
   2099		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
   2100			control->final = 1;
   2101
   2102		control->reqseq = chan->buffer_seq;
   2103		chan->last_acked_seq = chan->buffer_seq;
   2104		control->txseq = chan->next_tx_seq;
   2105
   2106		__pack_control(chan, control, skb);
   2107
   2108		if (chan->fcs == L2CAP_FCS_CRC16) {
   2109			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
   2110			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
   2111		}
   2112
   2113		/* Clone after data has been modified. Data is assumed to be
   2114		   read-only (for locking purposes) on cloned sk_buffs.
   2115		 */
   2116		tx_skb = skb_clone(skb, GFP_KERNEL);
   2117
   2118		if (!tx_skb)
   2119			break;
   2120
   2121		__set_retrans_timer(chan);
   2122
   2123		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
   2124		chan->unacked_frames++;
   2125		chan->frames_sent++;
   2126		sent++;
   2127
   2128		if (skb_queue_is_last(&chan->tx_q, skb))
   2129			chan->tx_send_head = NULL;
   2130		else
   2131			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
   2132
   2133		l2cap_do_send(chan, tx_skb);
   2134		BT_DBG("Sent txseq %u", control->txseq);
   2135	}
   2136
   2137	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
   2138	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
   2139
   2140	return sent;
   2141}
   2142
   2143static void l2cap_ertm_resend(struct l2cap_chan *chan)
   2144{
   2145	struct l2cap_ctrl control;
   2146	struct sk_buff *skb;
   2147	struct sk_buff *tx_skb;
   2148	u16 seq;
   2149
   2150	BT_DBG("chan %p", chan);
   2151
   2152	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
   2153		return;
   2154
   2155	if (__chan_is_moving(chan))
   2156		return;
   2157
   2158	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
   2159		seq = l2cap_seq_list_pop(&chan->retrans_list);
   2160
   2161		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
   2162		if (!skb) {
   2163			BT_DBG("Error: Can't retransmit seq %d, frame missing",
   2164			       seq);
   2165			continue;
   2166		}
   2167
   2168		bt_cb(skb)->l2cap.retries++;
   2169		control = bt_cb(skb)->l2cap;
   2170
   2171		if (chan->max_tx != 0 &&
   2172		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
   2173			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
   2174			l2cap_send_disconn_req(chan, ECONNRESET);
   2175			l2cap_seq_list_clear(&chan->retrans_list);
   2176			break;
   2177		}
   2178
   2179		control.reqseq = chan->buffer_seq;
   2180		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
   2181			control.final = 1;
   2182		else
   2183			control.final = 0;
   2184
   2185		if (skb_cloned(skb)) {
   2186			/* Cloned sk_buffs are read-only, so we need a
   2187			 * writeable copy
   2188			 */
   2189			tx_skb = skb_copy(skb, GFP_KERNEL);
   2190		} else {
   2191			tx_skb = skb_clone(skb, GFP_KERNEL);
   2192		}
   2193
   2194		if (!tx_skb) {
   2195			l2cap_seq_list_clear(&chan->retrans_list);
   2196			break;
   2197		}
   2198
   2199		/* Update skb contents */
   2200		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
   2201			put_unaligned_le32(__pack_extended_control(&control),
   2202					   tx_skb->data + L2CAP_HDR_SIZE);
   2203		} else {
   2204			put_unaligned_le16(__pack_enhanced_control(&control),
   2205					   tx_skb->data + L2CAP_HDR_SIZE);
   2206		}
   2207
   2208		/* Update FCS */
   2209		if (chan->fcs == L2CAP_FCS_CRC16) {
   2210			u16 fcs = crc16(0, (u8 *) tx_skb->data,
   2211					tx_skb->len - L2CAP_FCS_SIZE);
   2212			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
   2213						L2CAP_FCS_SIZE);
   2214		}
   2215
   2216		l2cap_do_send(chan, tx_skb);
   2217
   2218		BT_DBG("Resent txseq %d", control.txseq);
   2219
   2220		chan->last_acked_seq = chan->buffer_seq;
   2221	}
   2222}
   2223
   2224static void l2cap_retransmit(struct l2cap_chan *chan,
   2225			     struct l2cap_ctrl *control)
   2226{
   2227	BT_DBG("chan %p, control %p", chan, control);
   2228
   2229	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
   2230	l2cap_ertm_resend(chan);
   2231}
   2232
   2233static void l2cap_retransmit_all(struct l2cap_chan *chan,
   2234				 struct l2cap_ctrl *control)
   2235{
   2236	struct sk_buff *skb;
   2237
   2238	BT_DBG("chan %p, control %p", chan, control);
   2239
   2240	if (control->poll)
   2241		set_bit(CONN_SEND_FBIT, &chan->conn_state);
   2242
   2243	l2cap_seq_list_clear(&chan->retrans_list);
   2244
   2245	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
   2246		return;
   2247
   2248	if (chan->unacked_frames) {
   2249		skb_queue_walk(&chan->tx_q, skb) {
   2250			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
   2251			    skb == chan->tx_send_head)
   2252				break;
   2253		}
   2254
   2255		skb_queue_walk_from(&chan->tx_q, skb) {
   2256			if (skb == chan->tx_send_head)
   2257				break;
   2258
   2259			l2cap_seq_list_append(&chan->retrans_list,
   2260					      bt_cb(skb)->l2cap.txseq);
   2261		}
   2262
   2263		l2cap_ertm_resend(chan);
   2264	}
   2265}
   2266
   2267static void l2cap_send_ack(struct l2cap_chan *chan)
   2268{
   2269	struct l2cap_ctrl control;
   2270	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
   2271					 chan->last_acked_seq);
   2272	int threshold;
   2273
   2274	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
   2275	       chan, chan->last_acked_seq, chan->buffer_seq);
   2276
   2277	memset(&control, 0, sizeof(control));
   2278	control.sframe = 1;
   2279
   2280	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
   2281	    chan->rx_state == L2CAP_RX_STATE_RECV) {
   2282		__clear_ack_timer(chan);
   2283		control.super = L2CAP_SUPER_RNR;
   2284		control.reqseq = chan->buffer_seq;
   2285		l2cap_send_sframe(chan, &control);
   2286	} else {
   2287		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
   2288			l2cap_ertm_send(chan);
   2289			/* If any i-frames were sent, they included an ack */
   2290			if (chan->buffer_seq == chan->last_acked_seq)
   2291				frames_to_ack = 0;
   2292		}
   2293
   2294		/* Ack now if the window is 3/4ths full.
   2295		 * Calculate without mul or div
   2296		 */
   2297		threshold = chan->ack_win;
   2298		threshold += threshold << 1;
   2299		threshold >>= 2;
   2300
   2301		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
   2302		       threshold);
   2303
   2304		if (frames_to_ack >= threshold) {
   2305			__clear_ack_timer(chan);
   2306			control.super = L2CAP_SUPER_RR;
   2307			control.reqseq = chan->buffer_seq;
   2308			l2cap_send_sframe(chan, &control);
   2309			frames_to_ack = 0;
   2310		}
   2311
   2312		if (frames_to_ack)
   2313			__set_ack_timer(chan);
   2314	}
   2315}
   2316
   2317static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
   2318					 struct msghdr *msg, int len,
   2319					 int count, struct sk_buff *skb)
   2320{
   2321	struct l2cap_conn *conn = chan->conn;
   2322	struct sk_buff **frag;
   2323	int sent = 0;
   2324
   2325	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
   2326		return -EFAULT;
   2327
   2328	sent += count;
   2329	len  -= count;
   2330
   2331	/* Continuation fragments (no L2CAP header) */
   2332	frag = &skb_shinfo(skb)->frag_list;
   2333	while (len) {
   2334		struct sk_buff *tmp;
   2335
   2336		count = min_t(unsigned int, conn->mtu, len);
   2337
   2338		tmp = chan->ops->alloc_skb(chan, 0, count,
   2339					   msg->msg_flags & MSG_DONTWAIT);
   2340		if (IS_ERR(tmp))
   2341			return PTR_ERR(tmp);
   2342
   2343		*frag = tmp;
   2344
   2345		if (!copy_from_iter_full(skb_put(*frag, count), count,
   2346				   &msg->msg_iter))
   2347			return -EFAULT;
   2348
   2349		sent += count;
   2350		len  -= count;
   2351
   2352		skb->len += (*frag)->len;
   2353		skb->data_len += (*frag)->len;
   2354
   2355		frag = &(*frag)->next;
   2356	}
   2357
   2358	return sent;
   2359}
   2360
   2361static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
   2362						 struct msghdr *msg, size_t len)
   2363{
   2364	struct l2cap_conn *conn = chan->conn;
   2365	struct sk_buff *skb;
   2366	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
   2367	struct l2cap_hdr *lh;
   2368
   2369	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
   2370	       __le16_to_cpu(chan->psm), len);
   2371
   2372	count = min_t(unsigned int, (conn->mtu - hlen), len);
   2373
   2374	skb = chan->ops->alloc_skb(chan, hlen, count,
   2375				   msg->msg_flags & MSG_DONTWAIT);
   2376	if (IS_ERR(skb))
   2377		return skb;
   2378
   2379	/* Create L2CAP header */
   2380	lh = skb_put(skb, L2CAP_HDR_SIZE);
   2381	lh->cid = cpu_to_le16(chan->dcid);
   2382	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
   2383	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
   2384
   2385	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
   2386	if (unlikely(err < 0)) {
   2387		kfree_skb(skb);
   2388		return ERR_PTR(err);
   2389	}
   2390	return skb;
   2391}
   2392
   2393static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
   2394					      struct msghdr *msg, size_t len)
   2395{
   2396	struct l2cap_conn *conn = chan->conn;
   2397	struct sk_buff *skb;
   2398	int err, count;
   2399	struct l2cap_hdr *lh;
   2400
   2401	BT_DBG("chan %p len %zu", chan, len);
   2402
   2403	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
   2404
   2405	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
   2406				   msg->msg_flags & MSG_DONTWAIT);
   2407	if (IS_ERR(skb))
   2408		return skb;
   2409
   2410	/* Create L2CAP header */
   2411	lh = skb_put(skb, L2CAP_HDR_SIZE);
   2412	lh->cid = cpu_to_le16(chan->dcid);
   2413	lh->len = cpu_to_le16(len);
   2414
   2415	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
   2416	if (unlikely(err < 0)) {
   2417		kfree_skb(skb);
   2418		return ERR_PTR(err);
   2419	}
   2420	return skb;
   2421}
   2422
   2423static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
   2424					       struct msghdr *msg, size_t len,
   2425					       u16 sdulen)
   2426{
   2427	struct l2cap_conn *conn = chan->conn;
   2428	struct sk_buff *skb;
   2429	int err, count, hlen;
   2430	struct l2cap_hdr *lh;
   2431
   2432	BT_DBG("chan %p len %zu", chan, len);
   2433
   2434	if (!conn)
   2435		return ERR_PTR(-ENOTCONN);
   2436
   2437	hlen = __ertm_hdr_size(chan);
   2438
   2439	if (sdulen)
   2440		hlen += L2CAP_SDULEN_SIZE;
   2441
   2442	if (chan->fcs == L2CAP_FCS_CRC16)
   2443		hlen += L2CAP_FCS_SIZE;
   2444
   2445	count = min_t(unsigned int, (conn->mtu - hlen), len);
   2446
   2447	skb = chan->ops->alloc_skb(chan, hlen, count,
   2448				   msg->msg_flags & MSG_DONTWAIT);
   2449	if (IS_ERR(skb))
   2450		return skb;
   2451
   2452	/* Create L2CAP header */
   2453	lh = skb_put(skb, L2CAP_HDR_SIZE);
   2454	lh->cid = cpu_to_le16(chan->dcid);
   2455	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
   2456
   2457	/* Control header is populated later */
   2458	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   2459		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
   2460	else
   2461		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
   2462
   2463	if (sdulen)
   2464		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
   2465
   2466	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
   2467	if (unlikely(err < 0)) {
   2468		kfree_skb(skb);
   2469		return ERR_PTR(err);
   2470	}
   2471
   2472	bt_cb(skb)->l2cap.fcs = chan->fcs;
   2473	bt_cb(skb)->l2cap.retries = 0;
   2474	return skb;
   2475}
   2476
   2477static int l2cap_segment_sdu(struct l2cap_chan *chan,
   2478			     struct sk_buff_head *seg_queue,
   2479			     struct msghdr *msg, size_t len)
   2480{
   2481	struct sk_buff *skb;
   2482	u16 sdu_len;
   2483	size_t pdu_len;
   2484	u8 sar;
   2485
   2486	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
   2487
   2488	/* It is critical that ERTM PDUs fit in a single HCI fragment,
   2489	 * so fragmented skbs are not used.  The HCI layer's handling
   2490	 * of fragmented skbs is not compatible with ERTM's queueing.
   2491	 */
   2492
   2493	/* PDU size is derived from the HCI MTU */
   2494	pdu_len = chan->conn->mtu;
   2495
   2496	/* Constrain PDU size for BR/EDR connections */
   2497	if (!chan->hs_hcon)
   2498		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
   2499
   2500	/* Adjust for largest possible L2CAP overhead. */
   2501	if (chan->fcs)
   2502		pdu_len -= L2CAP_FCS_SIZE;
   2503
   2504	pdu_len -= __ertm_hdr_size(chan);
   2505
   2506	/* Remote device may have requested smaller PDUs */
   2507	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
   2508
   2509	if (len <= pdu_len) {
   2510		sar = L2CAP_SAR_UNSEGMENTED;
   2511		sdu_len = 0;
   2512		pdu_len = len;
   2513	} else {
   2514		sar = L2CAP_SAR_START;
   2515		sdu_len = len;
   2516	}
   2517
   2518	while (len > 0) {
   2519		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
   2520
   2521		if (IS_ERR(skb)) {
   2522			__skb_queue_purge(seg_queue);
   2523			return PTR_ERR(skb);
   2524		}
   2525
   2526		bt_cb(skb)->l2cap.sar = sar;
   2527		__skb_queue_tail(seg_queue, skb);
   2528
   2529		len -= pdu_len;
   2530		if (sdu_len)
   2531			sdu_len = 0;
   2532
   2533		if (len <= pdu_len) {
   2534			sar = L2CAP_SAR_END;
   2535			pdu_len = len;
   2536		} else {
   2537			sar = L2CAP_SAR_CONTINUE;
   2538		}
   2539	}
   2540
   2541	return 0;
   2542}
   2543
   2544static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
   2545						   struct msghdr *msg,
   2546						   size_t len, u16 sdulen)
   2547{
   2548	struct l2cap_conn *conn = chan->conn;
   2549	struct sk_buff *skb;
   2550	int err, count, hlen;
   2551	struct l2cap_hdr *lh;
   2552
   2553	BT_DBG("chan %p len %zu", chan, len);
   2554
   2555	if (!conn)
   2556		return ERR_PTR(-ENOTCONN);
   2557
   2558	hlen = L2CAP_HDR_SIZE;
   2559
   2560	if (sdulen)
   2561		hlen += L2CAP_SDULEN_SIZE;
   2562
   2563	count = min_t(unsigned int, (conn->mtu - hlen), len);
   2564
   2565	skb = chan->ops->alloc_skb(chan, hlen, count,
   2566				   msg->msg_flags & MSG_DONTWAIT);
   2567	if (IS_ERR(skb))
   2568		return skb;
   2569
   2570	/* Create L2CAP header */
   2571	lh = skb_put(skb, L2CAP_HDR_SIZE);
   2572	lh->cid = cpu_to_le16(chan->dcid);
   2573	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
   2574
   2575	if (sdulen)
   2576		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
   2577
   2578	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
   2579	if (unlikely(err < 0)) {
   2580		kfree_skb(skb);
   2581		return ERR_PTR(err);
   2582	}
   2583
   2584	return skb;
   2585}
   2586
   2587static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
   2588				struct sk_buff_head *seg_queue,
   2589				struct msghdr *msg, size_t len)
   2590{
   2591	struct sk_buff *skb;
   2592	size_t pdu_len;
   2593	u16 sdu_len;
   2594
   2595	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
   2596
   2597	sdu_len = len;
   2598	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
   2599
   2600	while (len > 0) {
   2601		if (len <= pdu_len)
   2602			pdu_len = len;
   2603
   2604		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
   2605		if (IS_ERR(skb)) {
   2606			__skb_queue_purge(seg_queue);
   2607			return PTR_ERR(skb);
   2608		}
   2609
   2610		__skb_queue_tail(seg_queue, skb);
   2611
   2612		len -= pdu_len;
   2613
   2614		if (sdu_len) {
   2615			sdu_len = 0;
   2616			pdu_len += L2CAP_SDULEN_SIZE;
   2617		}
   2618	}
   2619
   2620	return 0;
   2621}
   2622
   2623static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
   2624{
   2625	int sent = 0;
   2626
   2627	BT_DBG("chan %p", chan);
   2628
   2629	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
   2630		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
   2631		chan->tx_credits--;
   2632		sent++;
   2633	}
   2634
   2635	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
   2636	       skb_queue_len(&chan->tx_q));
   2637}
   2638
   2639int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
   2640{
   2641	struct sk_buff *skb;
   2642	int err;
   2643	struct sk_buff_head seg_queue;
   2644
   2645	if (!chan->conn)
   2646		return -ENOTCONN;
   2647
   2648	/* Connectionless channel */
   2649	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
   2650		skb = l2cap_create_connless_pdu(chan, msg, len);
   2651		if (IS_ERR(skb))
   2652			return PTR_ERR(skb);
   2653
   2654		/* Channel lock is released before requesting new skb and then
   2655		 * reacquired thus we need to recheck channel state.
   2656		 */
   2657		if (chan->state != BT_CONNECTED) {
   2658			kfree_skb(skb);
   2659			return -ENOTCONN;
   2660		}
   2661
   2662		l2cap_do_send(chan, skb);
   2663		return len;
   2664	}
   2665
   2666	switch (chan->mode) {
   2667	case L2CAP_MODE_LE_FLOWCTL:
   2668	case L2CAP_MODE_EXT_FLOWCTL:
   2669		/* Check outgoing MTU */
   2670		if (len > chan->omtu)
   2671			return -EMSGSIZE;
   2672
   2673		__skb_queue_head_init(&seg_queue);
   2674
   2675		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
   2676
   2677		if (chan->state != BT_CONNECTED) {
   2678			__skb_queue_purge(&seg_queue);
   2679			err = -ENOTCONN;
   2680		}
   2681
   2682		if (err)
   2683			return err;
   2684
   2685		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
   2686
   2687		l2cap_le_flowctl_send(chan);
   2688
   2689		if (!chan->tx_credits)
   2690			chan->ops->suspend(chan);
   2691
   2692		err = len;
   2693
   2694		break;
   2695
   2696	case L2CAP_MODE_BASIC:
   2697		/* Check outgoing MTU */
   2698		if (len > chan->omtu)
   2699			return -EMSGSIZE;
   2700
   2701		/* Create a basic PDU */
   2702		skb = l2cap_create_basic_pdu(chan, msg, len);
   2703		if (IS_ERR(skb))
   2704			return PTR_ERR(skb);
   2705
   2706		/* Channel lock is released before requesting new skb and then
   2707		 * reacquired thus we need to recheck channel state.
   2708		 */
   2709		if (chan->state != BT_CONNECTED) {
   2710			kfree_skb(skb);
   2711			return -ENOTCONN;
   2712		}
   2713
   2714		l2cap_do_send(chan, skb);
   2715		err = len;
   2716		break;
   2717
   2718	case L2CAP_MODE_ERTM:
   2719	case L2CAP_MODE_STREAMING:
   2720		/* Check outgoing MTU */
   2721		if (len > chan->omtu) {
   2722			err = -EMSGSIZE;
   2723			break;
   2724		}
   2725
   2726		__skb_queue_head_init(&seg_queue);
   2727
   2728		/* Do segmentation before calling in to the state machine,
   2729		 * since it's possible to block while waiting for memory
   2730		 * allocation.
   2731		 */
   2732		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
   2733
   2734		/* The channel could have been closed while segmenting,
   2735		 * check that it is still connected.
   2736		 */
   2737		if (chan->state != BT_CONNECTED) {
   2738			__skb_queue_purge(&seg_queue);
   2739			err = -ENOTCONN;
   2740		}
   2741
   2742		if (err)
   2743			break;
   2744
   2745		if (chan->mode == L2CAP_MODE_ERTM)
   2746			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
   2747		else
   2748			l2cap_streaming_send(chan, &seg_queue);
   2749
   2750		err = len;
   2751
   2752		/* If the skbs were not queued for sending, they'll still be in
   2753		 * seg_queue and need to be purged.
   2754		 */
   2755		__skb_queue_purge(&seg_queue);
   2756		break;
   2757
   2758	default:
   2759		BT_DBG("bad state %1.1x", chan->mode);
   2760		err = -EBADFD;
   2761	}
   2762
   2763	return err;
   2764}
   2765EXPORT_SYMBOL_GPL(l2cap_chan_send);
   2766
   2767static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
   2768{
   2769	struct l2cap_ctrl control;
   2770	u16 seq;
   2771
   2772	BT_DBG("chan %p, txseq %u", chan, txseq);
   2773
   2774	memset(&control, 0, sizeof(control));
   2775	control.sframe = 1;
   2776	control.super = L2CAP_SUPER_SREJ;
   2777
   2778	for (seq = chan->expected_tx_seq; seq != txseq;
   2779	     seq = __next_seq(chan, seq)) {
   2780		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
   2781			control.reqseq = seq;
   2782			l2cap_send_sframe(chan, &control);
   2783			l2cap_seq_list_append(&chan->srej_list, seq);
   2784		}
   2785	}
   2786
   2787	chan->expected_tx_seq = __next_seq(chan, txseq);
   2788}
   2789
   2790static void l2cap_send_srej_tail(struct l2cap_chan *chan)
   2791{
   2792	struct l2cap_ctrl control;
   2793
   2794	BT_DBG("chan %p", chan);
   2795
   2796	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
   2797		return;
   2798
   2799	memset(&control, 0, sizeof(control));
   2800	control.sframe = 1;
   2801	control.super = L2CAP_SUPER_SREJ;
   2802	control.reqseq = chan->srej_list.tail;
   2803	l2cap_send_sframe(chan, &control);
   2804}
   2805
   2806static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
   2807{
   2808	struct l2cap_ctrl control;
   2809	u16 initial_head;
   2810	u16 seq;
   2811
   2812	BT_DBG("chan %p, txseq %u", chan, txseq);
   2813
   2814	memset(&control, 0, sizeof(control));
   2815	control.sframe = 1;
   2816	control.super = L2CAP_SUPER_SREJ;
   2817
   2818	/* Capture initial list head to allow only one pass through the list. */
   2819	initial_head = chan->srej_list.head;
   2820
   2821	do {
   2822		seq = l2cap_seq_list_pop(&chan->srej_list);
   2823		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
   2824			break;
   2825
   2826		control.reqseq = seq;
   2827		l2cap_send_sframe(chan, &control);
   2828		l2cap_seq_list_append(&chan->srej_list, seq);
   2829	} while (chan->srej_list.head != initial_head);
   2830}
   2831
   2832static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
   2833{
   2834	struct sk_buff *acked_skb;
   2835	u16 ackseq;
   2836
   2837	BT_DBG("chan %p, reqseq %u", chan, reqseq);
   2838
   2839	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
   2840		return;
   2841
   2842	BT_DBG("expected_ack_seq %u, unacked_frames %u",
   2843	       chan->expected_ack_seq, chan->unacked_frames);
   2844
   2845	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
   2846	     ackseq = __next_seq(chan, ackseq)) {
   2847
   2848		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
   2849		if (acked_skb) {
   2850			skb_unlink(acked_skb, &chan->tx_q);
   2851			kfree_skb(acked_skb);
   2852			chan->unacked_frames--;
   2853		}
   2854	}
   2855
   2856	chan->expected_ack_seq = reqseq;
   2857
   2858	if (chan->unacked_frames == 0)
   2859		__clear_retrans_timer(chan);
   2860
   2861	BT_DBG("unacked_frames %u", chan->unacked_frames);
   2862}
   2863
   2864static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
   2865{
   2866	BT_DBG("chan %p", chan);
   2867
   2868	chan->expected_tx_seq = chan->buffer_seq;
   2869	l2cap_seq_list_clear(&chan->srej_list);
   2870	skb_queue_purge(&chan->srej_q);
   2871	chan->rx_state = L2CAP_RX_STATE_RECV;
   2872}
   2873
   2874static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
   2875				struct l2cap_ctrl *control,
   2876				struct sk_buff_head *skbs, u8 event)
   2877{
   2878	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
   2879	       event);
   2880
   2881	switch (event) {
   2882	case L2CAP_EV_DATA_REQUEST:
   2883		if (chan->tx_send_head == NULL)
   2884			chan->tx_send_head = skb_peek(skbs);
   2885
   2886		skb_queue_splice_tail_init(skbs, &chan->tx_q);
   2887		l2cap_ertm_send(chan);
   2888		break;
   2889	case L2CAP_EV_LOCAL_BUSY_DETECTED:
   2890		BT_DBG("Enter LOCAL_BUSY");
   2891		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
   2892
   2893		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
   2894			/* The SREJ_SENT state must be aborted if we are to
   2895			 * enter the LOCAL_BUSY state.
   2896			 */
   2897			l2cap_abort_rx_srej_sent(chan);
   2898		}
   2899
   2900		l2cap_send_ack(chan);
   2901
   2902		break;
   2903	case L2CAP_EV_LOCAL_BUSY_CLEAR:
   2904		BT_DBG("Exit LOCAL_BUSY");
   2905		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
   2906
   2907		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
   2908			struct l2cap_ctrl local_control;
   2909
   2910			memset(&local_control, 0, sizeof(local_control));
   2911			local_control.sframe = 1;
   2912			local_control.super = L2CAP_SUPER_RR;
   2913			local_control.poll = 1;
   2914			local_control.reqseq = chan->buffer_seq;
   2915			l2cap_send_sframe(chan, &local_control);
   2916
   2917			chan->retry_count = 1;
   2918			__set_monitor_timer(chan);
   2919			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
   2920		}
   2921		break;
   2922	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
   2923		l2cap_process_reqseq(chan, control->reqseq);
   2924		break;
   2925	case L2CAP_EV_EXPLICIT_POLL:
   2926		l2cap_send_rr_or_rnr(chan, 1);
   2927		chan->retry_count = 1;
   2928		__set_monitor_timer(chan);
   2929		__clear_ack_timer(chan);
   2930		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
   2931		break;
   2932	case L2CAP_EV_RETRANS_TO:
   2933		l2cap_send_rr_or_rnr(chan, 1);
   2934		chan->retry_count = 1;
   2935		__set_monitor_timer(chan);
   2936		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
   2937		break;
   2938	case L2CAP_EV_RECV_FBIT:
   2939		/* Nothing to process */
   2940		break;
   2941	default:
   2942		break;
   2943	}
   2944}
   2945
   2946static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
   2947				  struct l2cap_ctrl *control,
   2948				  struct sk_buff_head *skbs, u8 event)
   2949{
   2950	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
   2951	       event);
   2952
   2953	switch (event) {
   2954	case L2CAP_EV_DATA_REQUEST:
   2955		if (chan->tx_send_head == NULL)
   2956			chan->tx_send_head = skb_peek(skbs);
   2957		/* Queue data, but don't send. */
   2958		skb_queue_splice_tail_init(skbs, &chan->tx_q);
   2959		break;
   2960	case L2CAP_EV_LOCAL_BUSY_DETECTED:
   2961		BT_DBG("Enter LOCAL_BUSY");
   2962		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
   2963
   2964		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
   2965			/* The SREJ_SENT state must be aborted if we are to
   2966			 * enter the LOCAL_BUSY state.
   2967			 */
   2968			l2cap_abort_rx_srej_sent(chan);
   2969		}
   2970
   2971		l2cap_send_ack(chan);
   2972
   2973		break;
   2974	case L2CAP_EV_LOCAL_BUSY_CLEAR:
   2975		BT_DBG("Exit LOCAL_BUSY");
   2976		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
   2977
   2978		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
   2979			struct l2cap_ctrl local_control;
   2980			memset(&local_control, 0, sizeof(local_control));
   2981			local_control.sframe = 1;
   2982			local_control.super = L2CAP_SUPER_RR;
   2983			local_control.poll = 1;
   2984			local_control.reqseq = chan->buffer_seq;
   2985			l2cap_send_sframe(chan, &local_control);
   2986
   2987			chan->retry_count = 1;
   2988			__set_monitor_timer(chan);
   2989			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
   2990		}
   2991		break;
   2992	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
   2993		l2cap_process_reqseq(chan, control->reqseq);
   2994		fallthrough;
   2995
   2996	case L2CAP_EV_RECV_FBIT:
   2997		if (control && control->final) {
   2998			__clear_monitor_timer(chan);
   2999			if (chan->unacked_frames > 0)
   3000				__set_retrans_timer(chan);
   3001			chan->retry_count = 0;
   3002			chan->tx_state = L2CAP_TX_STATE_XMIT;
   3003			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
   3004		}
   3005		break;
   3006	case L2CAP_EV_EXPLICIT_POLL:
   3007		/* Ignore */
   3008		break;
   3009	case L2CAP_EV_MONITOR_TO:
   3010		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
   3011			l2cap_send_rr_or_rnr(chan, 1);
   3012			__set_monitor_timer(chan);
   3013			chan->retry_count++;
   3014		} else {
   3015			l2cap_send_disconn_req(chan, ECONNABORTED);
   3016		}
   3017		break;
   3018	default:
   3019		break;
   3020	}
   3021}
   3022
   3023static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
   3024		     struct sk_buff_head *skbs, u8 event)
   3025{
   3026	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
   3027	       chan, control, skbs, event, chan->tx_state);
   3028
   3029	switch (chan->tx_state) {
   3030	case L2CAP_TX_STATE_XMIT:
   3031		l2cap_tx_state_xmit(chan, control, skbs, event);
   3032		break;
   3033	case L2CAP_TX_STATE_WAIT_F:
   3034		l2cap_tx_state_wait_f(chan, control, skbs, event);
   3035		break;
   3036	default:
   3037		/* Ignore event */
   3038		break;
   3039	}
   3040}
   3041
   3042static void l2cap_pass_to_tx(struct l2cap_chan *chan,
   3043			     struct l2cap_ctrl *control)
   3044{
   3045	BT_DBG("chan %p, control %p", chan, control);
   3046	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
   3047}
   3048
   3049static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
   3050				  struct l2cap_ctrl *control)
   3051{
   3052	BT_DBG("chan %p, control %p", chan, control);
   3053	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
   3054}
   3055
   3056/* Copy frame to all raw sockets on that connection */
   3057static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
   3058{
   3059	struct sk_buff *nskb;
   3060	struct l2cap_chan *chan;
   3061
   3062	BT_DBG("conn %p", conn);
   3063
   3064	mutex_lock(&conn->chan_lock);
   3065
   3066	list_for_each_entry(chan, &conn->chan_l, list) {
   3067		if (chan->chan_type != L2CAP_CHAN_RAW)
   3068			continue;
   3069
   3070		/* Don't send frame to the channel it came from */
   3071		if (bt_cb(skb)->l2cap.chan == chan)
   3072			continue;
   3073
   3074		nskb = skb_clone(skb, GFP_KERNEL);
   3075		if (!nskb)
   3076			continue;
   3077		if (chan->ops->recv(chan, nskb))
   3078			kfree_skb(nskb);
   3079	}
   3080
   3081	mutex_unlock(&conn->chan_lock);
   3082}
   3083
   3084/* ---- L2CAP signalling commands ---- */
   3085static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
   3086				       u8 ident, u16 dlen, void *data)
   3087{
   3088	struct sk_buff *skb, **frag;
   3089	struct l2cap_cmd_hdr *cmd;
   3090	struct l2cap_hdr *lh;
   3091	int len, count;
   3092
   3093	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
   3094	       conn, code, ident, dlen);
   3095
   3096	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
   3097		return NULL;
   3098
   3099	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
   3100	count = min_t(unsigned int, conn->mtu, len);
   3101
   3102	skb = bt_skb_alloc(count, GFP_KERNEL);
   3103	if (!skb)
   3104		return NULL;
   3105
   3106	lh = skb_put(skb, L2CAP_HDR_SIZE);
   3107	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
   3108
   3109	if (conn->hcon->type == LE_LINK)
   3110		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
   3111	else
   3112		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
   3113
   3114	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
   3115	cmd->code  = code;
   3116	cmd->ident = ident;
   3117	cmd->len   = cpu_to_le16(dlen);
   3118
   3119	if (dlen) {
   3120		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
   3121		skb_put_data(skb, data, count);
   3122		data += count;
   3123	}
   3124
   3125	len -= skb->len;
   3126
   3127	/* Continuation fragments (no L2CAP header) */
   3128	frag = &skb_shinfo(skb)->frag_list;
   3129	while (len) {
   3130		count = min_t(unsigned int, conn->mtu, len);
   3131
   3132		*frag = bt_skb_alloc(count, GFP_KERNEL);
   3133		if (!*frag)
   3134			goto fail;
   3135
   3136		skb_put_data(*frag, data, count);
   3137
   3138		len  -= count;
   3139		data += count;
   3140
   3141		frag = &(*frag)->next;
   3142	}
   3143
   3144	return skb;
   3145
   3146fail:
   3147	kfree_skb(skb);
   3148	return NULL;
   3149}
   3150
   3151static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
   3152				     unsigned long *val)
   3153{
   3154	struct l2cap_conf_opt *opt = *ptr;
   3155	int len;
   3156
   3157	len = L2CAP_CONF_OPT_SIZE + opt->len;
   3158	*ptr += len;
   3159
   3160	*type = opt->type;
   3161	*olen = opt->len;
   3162
   3163	switch (opt->len) {
   3164	case 1:
   3165		*val = *((u8 *) opt->val);
   3166		break;
   3167
   3168	case 2:
   3169		*val = get_unaligned_le16(opt->val);
   3170		break;
   3171
   3172	case 4:
   3173		*val = get_unaligned_le32(opt->val);
   3174		break;
   3175
   3176	default:
   3177		*val = (unsigned long) opt->val;
   3178		break;
   3179	}
   3180
   3181	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
   3182	return len;
   3183}
   3184
   3185static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
   3186{
   3187	struct l2cap_conf_opt *opt = *ptr;
   3188
   3189	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
   3190
   3191	if (size < L2CAP_CONF_OPT_SIZE + len)
   3192		return;
   3193
   3194	opt->type = type;
   3195	opt->len  = len;
   3196
   3197	switch (len) {
   3198	case 1:
   3199		*((u8 *) opt->val)  = val;
   3200		break;
   3201
   3202	case 2:
   3203		put_unaligned_le16(val, opt->val);
   3204		break;
   3205
   3206	case 4:
   3207		put_unaligned_le32(val, opt->val);
   3208		break;
   3209
   3210	default:
   3211		memcpy(opt->val, (void *) val, len);
   3212		break;
   3213	}
   3214
   3215	*ptr += L2CAP_CONF_OPT_SIZE + len;
   3216}
   3217
   3218static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
   3219{
   3220	struct l2cap_conf_efs efs;
   3221
   3222	switch (chan->mode) {
   3223	case L2CAP_MODE_ERTM:
   3224		efs.id		= chan->local_id;
   3225		efs.stype	= chan->local_stype;
   3226		efs.msdu	= cpu_to_le16(chan->local_msdu);
   3227		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
   3228		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
   3229		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
   3230		break;
   3231
   3232	case L2CAP_MODE_STREAMING:
   3233		efs.id		= 1;
   3234		efs.stype	= L2CAP_SERV_BESTEFFORT;
   3235		efs.msdu	= cpu_to_le16(chan->local_msdu);
   3236		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
   3237		efs.acc_lat	= 0;
   3238		efs.flush_to	= 0;
   3239		break;
   3240
   3241	default:
   3242		return;
   3243	}
   3244
   3245	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
   3246			   (unsigned long) &efs, size);
   3247}
   3248
   3249static void l2cap_ack_timeout(struct work_struct *work)
   3250{
   3251	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
   3252					       ack_timer.work);
   3253	u16 frames_to_ack;
   3254
   3255	BT_DBG("chan %p", chan);
   3256
   3257	l2cap_chan_lock(chan);
   3258
   3259	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
   3260				     chan->last_acked_seq);
   3261
   3262	if (frames_to_ack)
   3263		l2cap_send_rr_or_rnr(chan, 0);
   3264
   3265	l2cap_chan_unlock(chan);
   3266	l2cap_chan_put(chan);
   3267}
   3268
   3269int l2cap_ertm_init(struct l2cap_chan *chan)
   3270{
   3271	int err;
   3272
   3273	chan->next_tx_seq = 0;
   3274	chan->expected_tx_seq = 0;
   3275	chan->expected_ack_seq = 0;
   3276	chan->unacked_frames = 0;
   3277	chan->buffer_seq = 0;
   3278	chan->frames_sent = 0;
   3279	chan->last_acked_seq = 0;
   3280	chan->sdu = NULL;
   3281	chan->sdu_last_frag = NULL;
   3282	chan->sdu_len = 0;
   3283
   3284	skb_queue_head_init(&chan->tx_q);
   3285
   3286	chan->local_amp_id = AMP_ID_BREDR;
   3287	chan->move_id = AMP_ID_BREDR;
   3288	chan->move_state = L2CAP_MOVE_STABLE;
   3289	chan->move_role = L2CAP_MOVE_ROLE_NONE;
   3290
   3291	if (chan->mode != L2CAP_MODE_ERTM)
   3292		return 0;
   3293
   3294	chan->rx_state = L2CAP_RX_STATE_RECV;
   3295	chan->tx_state = L2CAP_TX_STATE_XMIT;
   3296
   3297	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
   3298	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
   3299	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
   3300
   3301	skb_queue_head_init(&chan->srej_q);
   3302
   3303	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
   3304	if (err < 0)
   3305		return err;
   3306
   3307	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
   3308	if (err < 0)
   3309		l2cap_seq_list_free(&chan->srej_list);
   3310
   3311	return err;
   3312}
   3313
   3314static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
   3315{
   3316	switch (mode) {
   3317	case L2CAP_MODE_STREAMING:
   3318	case L2CAP_MODE_ERTM:
   3319		if (l2cap_mode_supported(mode, remote_feat_mask))
   3320			return mode;
   3321		fallthrough;
   3322	default:
   3323		return L2CAP_MODE_BASIC;
   3324	}
   3325}
   3326
   3327static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
   3328{
   3329	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
   3330		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
   3331}
   3332
   3333static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
   3334{
   3335	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
   3336		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
   3337}
   3338
   3339static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
   3340				      struct l2cap_conf_rfc *rfc)
   3341{
   3342	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
   3343		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
   3344
   3345		/* Class 1 devices have must have ERTM timeouts
   3346		 * exceeding the Link Supervision Timeout.  The
   3347		 * default Link Supervision Timeout for AMP
   3348		 * controllers is 10 seconds.
   3349		 *
   3350		 * Class 1 devices use 0xffffffff for their
   3351		 * best-effort flush timeout, so the clamping logic
   3352		 * will result in a timeout that meets the above
   3353		 * requirement.  ERTM timeouts are 16-bit values, so
   3354		 * the maximum timeout is 65.535 seconds.
   3355		 */
   3356
   3357		/* Convert timeout to milliseconds and round */
   3358		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
   3359
   3360		/* This is the recommended formula for class 2 devices
   3361		 * that start ERTM timers when packets are sent to the
   3362		 * controller.
   3363		 */
   3364		ertm_to = 3 * ertm_to + 500;
   3365
   3366		if (ertm_to > 0xffff)
   3367			ertm_to = 0xffff;
   3368
   3369		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
   3370		rfc->monitor_timeout = rfc->retrans_timeout;
   3371	} else {
   3372		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
   3373		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
   3374	}
   3375}
   3376
   3377static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
   3378{
   3379	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
   3380	    __l2cap_ews_supported(chan->conn)) {
   3381		/* use extended control field */
   3382		set_bit(FLAG_EXT_CTRL, &chan->flags);
   3383		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
   3384	} else {
   3385		chan->tx_win = min_t(u16, chan->tx_win,
   3386				     L2CAP_DEFAULT_TX_WINDOW);
   3387		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
   3388	}
   3389	chan->ack_win = chan->tx_win;
   3390}
   3391
   3392static void l2cap_mtu_auto(struct l2cap_chan *chan)
   3393{
   3394	struct hci_conn *conn = chan->conn->hcon;
   3395
   3396	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
   3397
   3398	/* The 2-DH1 packet has between 2 and 56 information bytes
   3399	 * (including the 2-byte payload header)
   3400	 */
   3401	if (!(conn->pkt_type & HCI_2DH1))
   3402		chan->imtu = 54;
   3403
   3404	/* The 3-DH1 packet has between 2 and 85 information bytes
   3405	 * (including the 2-byte payload header)
   3406	 */
   3407	if (!(conn->pkt_type & HCI_3DH1))
   3408		chan->imtu = 83;
   3409
   3410	/* The 2-DH3 packet has between 2 and 369 information bytes
   3411	 * (including the 2-byte payload header)
   3412	 */
   3413	if (!(conn->pkt_type & HCI_2DH3))
   3414		chan->imtu = 367;
   3415
   3416	/* The 3-DH3 packet has between 2 and 554 information bytes
   3417	 * (including the 2-byte payload header)
   3418	 */
   3419	if (!(conn->pkt_type & HCI_3DH3))
   3420		chan->imtu = 552;
   3421
   3422	/* The 2-DH5 packet has between 2 and 681 information bytes
   3423	 * (including the 2-byte payload header)
   3424	 */
   3425	if (!(conn->pkt_type & HCI_2DH5))
   3426		chan->imtu = 679;
   3427
   3428	/* The 3-DH5 packet has between 2 and 1023 information bytes
   3429	 * (including the 2-byte payload header)
   3430	 */
   3431	if (!(conn->pkt_type & HCI_3DH5))
   3432		chan->imtu = 1021;
   3433}
   3434
   3435static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
   3436{
   3437	struct l2cap_conf_req *req = data;
   3438	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
   3439	void *ptr = req->data;
   3440	void *endptr = data + data_size;
   3441	u16 size;
   3442
   3443	BT_DBG("chan %p", chan);
   3444
   3445	if (chan->num_conf_req || chan->num_conf_rsp)
   3446		goto done;
   3447
   3448	switch (chan->mode) {
   3449	case L2CAP_MODE_STREAMING:
   3450	case L2CAP_MODE_ERTM:
   3451		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
   3452			break;
   3453
   3454		if (__l2cap_efs_supported(chan->conn))
   3455			set_bit(FLAG_EFS_ENABLE, &chan->flags);
   3456
   3457		fallthrough;
   3458	default:
   3459		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
   3460		break;
   3461	}
   3462
   3463done:
   3464	if (chan->imtu != L2CAP_DEFAULT_MTU) {
   3465		if (!chan->imtu)
   3466			l2cap_mtu_auto(chan);
   3467		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
   3468				   endptr - ptr);
   3469	}
   3470
   3471	switch (chan->mode) {
   3472	case L2CAP_MODE_BASIC:
   3473		if (disable_ertm)
   3474			break;
   3475
   3476		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
   3477		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
   3478			break;
   3479
   3480		rfc.mode            = L2CAP_MODE_BASIC;
   3481		rfc.txwin_size      = 0;
   3482		rfc.max_transmit    = 0;
   3483		rfc.retrans_timeout = 0;
   3484		rfc.monitor_timeout = 0;
   3485		rfc.max_pdu_size    = 0;
   3486
   3487		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3488				   (unsigned long) &rfc, endptr - ptr);
   3489		break;
   3490
   3491	case L2CAP_MODE_ERTM:
   3492		rfc.mode            = L2CAP_MODE_ERTM;
   3493		rfc.max_transmit    = chan->max_tx;
   3494
   3495		__l2cap_set_ertm_timeouts(chan, &rfc);
   3496
   3497		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
   3498			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
   3499			     L2CAP_FCS_SIZE);
   3500		rfc.max_pdu_size = cpu_to_le16(size);
   3501
   3502		l2cap_txwin_setup(chan);
   3503
   3504		rfc.txwin_size = min_t(u16, chan->tx_win,
   3505				       L2CAP_DEFAULT_TX_WINDOW);
   3506
   3507		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3508				   (unsigned long) &rfc, endptr - ptr);
   3509
   3510		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
   3511			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
   3512
   3513		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   3514			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
   3515					   chan->tx_win, endptr - ptr);
   3516
   3517		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
   3518			if (chan->fcs == L2CAP_FCS_NONE ||
   3519			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
   3520				chan->fcs = L2CAP_FCS_NONE;
   3521				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
   3522						   chan->fcs, endptr - ptr);
   3523			}
   3524		break;
   3525
   3526	case L2CAP_MODE_STREAMING:
   3527		l2cap_txwin_setup(chan);
   3528		rfc.mode            = L2CAP_MODE_STREAMING;
   3529		rfc.txwin_size      = 0;
   3530		rfc.max_transmit    = 0;
   3531		rfc.retrans_timeout = 0;
   3532		rfc.monitor_timeout = 0;
   3533
   3534		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
   3535			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
   3536			     L2CAP_FCS_SIZE);
   3537		rfc.max_pdu_size = cpu_to_le16(size);
   3538
   3539		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3540				   (unsigned long) &rfc, endptr - ptr);
   3541
   3542		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
   3543			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
   3544
   3545		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
   3546			if (chan->fcs == L2CAP_FCS_NONE ||
   3547			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
   3548				chan->fcs = L2CAP_FCS_NONE;
   3549				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
   3550						   chan->fcs, endptr - ptr);
   3551			}
   3552		break;
   3553	}
   3554
   3555	req->dcid  = cpu_to_le16(chan->dcid);
   3556	req->flags = cpu_to_le16(0);
   3557
   3558	return ptr - data;
   3559}
   3560
   3561static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
   3562{
   3563	struct l2cap_conf_rsp *rsp = data;
   3564	void *ptr = rsp->data;
   3565	void *endptr = data + data_size;
   3566	void *req = chan->conf_req;
   3567	int len = chan->conf_len;
   3568	int type, hint, olen;
   3569	unsigned long val;
   3570	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
   3571	struct l2cap_conf_efs efs;
   3572	u8 remote_efs = 0;
   3573	u16 mtu = L2CAP_DEFAULT_MTU;
   3574	u16 result = L2CAP_CONF_SUCCESS;
   3575	u16 size;
   3576
   3577	BT_DBG("chan %p", chan);
   3578
   3579	while (len >= L2CAP_CONF_OPT_SIZE) {
   3580		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
   3581		if (len < 0)
   3582			break;
   3583
   3584		hint  = type & L2CAP_CONF_HINT;
   3585		type &= L2CAP_CONF_MASK;
   3586
   3587		switch (type) {
   3588		case L2CAP_CONF_MTU:
   3589			if (olen != 2)
   3590				break;
   3591			mtu = val;
   3592			break;
   3593
   3594		case L2CAP_CONF_FLUSH_TO:
   3595			if (olen != 2)
   3596				break;
   3597			chan->flush_to = val;
   3598			break;
   3599
   3600		case L2CAP_CONF_QOS:
   3601			break;
   3602
   3603		case L2CAP_CONF_RFC:
   3604			if (olen != sizeof(rfc))
   3605				break;
   3606			memcpy(&rfc, (void *) val, olen);
   3607			break;
   3608
   3609		case L2CAP_CONF_FCS:
   3610			if (olen != 1)
   3611				break;
   3612			if (val == L2CAP_FCS_NONE)
   3613				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
   3614			break;
   3615
   3616		case L2CAP_CONF_EFS:
   3617			if (olen != sizeof(efs))
   3618				break;
   3619			remote_efs = 1;
   3620			memcpy(&efs, (void *) val, olen);
   3621			break;
   3622
   3623		case L2CAP_CONF_EWS:
   3624			if (olen != 2)
   3625				break;
   3626			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
   3627				return -ECONNREFUSED;
   3628			set_bit(FLAG_EXT_CTRL, &chan->flags);
   3629			set_bit(CONF_EWS_RECV, &chan->conf_state);
   3630			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
   3631			chan->remote_tx_win = val;
   3632			break;
   3633
   3634		default:
   3635			if (hint)
   3636				break;
   3637			result = L2CAP_CONF_UNKNOWN;
   3638			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
   3639			break;
   3640		}
   3641	}
   3642
   3643	if (chan->num_conf_rsp || chan->num_conf_req > 1)
   3644		goto done;
   3645
   3646	switch (chan->mode) {
   3647	case L2CAP_MODE_STREAMING:
   3648	case L2CAP_MODE_ERTM:
   3649		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
   3650			chan->mode = l2cap_select_mode(rfc.mode,
   3651						       chan->conn->feat_mask);
   3652			break;
   3653		}
   3654
   3655		if (remote_efs) {
   3656			if (__l2cap_efs_supported(chan->conn))
   3657				set_bit(FLAG_EFS_ENABLE, &chan->flags);
   3658			else
   3659				return -ECONNREFUSED;
   3660		}
   3661
   3662		if (chan->mode != rfc.mode)
   3663			return -ECONNREFUSED;
   3664
   3665		break;
   3666	}
   3667
   3668done:
   3669	if (chan->mode != rfc.mode) {
   3670		result = L2CAP_CONF_UNACCEPT;
   3671		rfc.mode = chan->mode;
   3672
   3673		if (chan->num_conf_rsp == 1)
   3674			return -ECONNREFUSED;
   3675
   3676		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3677				   (unsigned long) &rfc, endptr - ptr);
   3678	}
   3679
   3680	if (result == L2CAP_CONF_SUCCESS) {
   3681		/* Configure output options and let the other side know
   3682		 * which ones we don't like. */
   3683
   3684		if (mtu < L2CAP_DEFAULT_MIN_MTU)
   3685			result = L2CAP_CONF_UNACCEPT;
   3686		else {
   3687			chan->omtu = mtu;
   3688			set_bit(CONF_MTU_DONE, &chan->conf_state);
   3689		}
   3690		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
   3691
   3692		if (remote_efs) {
   3693			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
   3694			    efs.stype != L2CAP_SERV_NOTRAFIC &&
   3695			    efs.stype != chan->local_stype) {
   3696
   3697				result = L2CAP_CONF_UNACCEPT;
   3698
   3699				if (chan->num_conf_req >= 1)
   3700					return -ECONNREFUSED;
   3701
   3702				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
   3703						   sizeof(efs),
   3704						   (unsigned long) &efs, endptr - ptr);
   3705			} else {
   3706				/* Send PENDING Conf Rsp */
   3707				result = L2CAP_CONF_PENDING;
   3708				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
   3709			}
   3710		}
   3711
   3712		switch (rfc.mode) {
   3713		case L2CAP_MODE_BASIC:
   3714			chan->fcs = L2CAP_FCS_NONE;
   3715			set_bit(CONF_MODE_DONE, &chan->conf_state);
   3716			break;
   3717
   3718		case L2CAP_MODE_ERTM:
   3719			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
   3720				chan->remote_tx_win = rfc.txwin_size;
   3721			else
   3722				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
   3723
   3724			chan->remote_max_tx = rfc.max_transmit;
   3725
   3726			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
   3727				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
   3728				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
   3729			rfc.max_pdu_size = cpu_to_le16(size);
   3730			chan->remote_mps = size;
   3731
   3732			__l2cap_set_ertm_timeouts(chan, &rfc);
   3733
   3734			set_bit(CONF_MODE_DONE, &chan->conf_state);
   3735
   3736			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
   3737					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
   3738
   3739			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
   3740				chan->remote_id = efs.id;
   3741				chan->remote_stype = efs.stype;
   3742				chan->remote_msdu = le16_to_cpu(efs.msdu);
   3743				chan->remote_flush_to =
   3744					le32_to_cpu(efs.flush_to);
   3745				chan->remote_acc_lat =
   3746					le32_to_cpu(efs.acc_lat);
   3747				chan->remote_sdu_itime =
   3748					le32_to_cpu(efs.sdu_itime);
   3749				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
   3750						   sizeof(efs),
   3751						   (unsigned long) &efs, endptr - ptr);
   3752			}
   3753			break;
   3754
   3755		case L2CAP_MODE_STREAMING:
   3756			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
   3757				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
   3758				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
   3759			rfc.max_pdu_size = cpu_to_le16(size);
   3760			chan->remote_mps = size;
   3761
   3762			set_bit(CONF_MODE_DONE, &chan->conf_state);
   3763
   3764			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3765					   (unsigned long) &rfc, endptr - ptr);
   3766
   3767			break;
   3768
   3769		default:
   3770			result = L2CAP_CONF_UNACCEPT;
   3771
   3772			memset(&rfc, 0, sizeof(rfc));
   3773			rfc.mode = chan->mode;
   3774		}
   3775
   3776		if (result == L2CAP_CONF_SUCCESS)
   3777			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
   3778	}
   3779	rsp->scid   = cpu_to_le16(chan->dcid);
   3780	rsp->result = cpu_to_le16(result);
   3781	rsp->flags  = cpu_to_le16(0);
   3782
   3783	return ptr - data;
   3784}
   3785
   3786static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
   3787				void *data, size_t size, u16 *result)
   3788{
   3789	struct l2cap_conf_req *req = data;
   3790	void *ptr = req->data;
   3791	void *endptr = data + size;
   3792	int type, olen;
   3793	unsigned long val;
   3794	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
   3795	struct l2cap_conf_efs efs;
   3796
   3797	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
   3798
   3799	while (len >= L2CAP_CONF_OPT_SIZE) {
   3800		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
   3801		if (len < 0)
   3802			break;
   3803
   3804		switch (type) {
   3805		case L2CAP_CONF_MTU:
   3806			if (olen != 2)
   3807				break;
   3808			if (val < L2CAP_DEFAULT_MIN_MTU) {
   3809				*result = L2CAP_CONF_UNACCEPT;
   3810				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
   3811			} else
   3812				chan->imtu = val;
   3813			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
   3814					   endptr - ptr);
   3815			break;
   3816
   3817		case L2CAP_CONF_FLUSH_TO:
   3818			if (olen != 2)
   3819				break;
   3820			chan->flush_to = val;
   3821			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
   3822					   chan->flush_to, endptr - ptr);
   3823			break;
   3824
   3825		case L2CAP_CONF_RFC:
   3826			if (olen != sizeof(rfc))
   3827				break;
   3828			memcpy(&rfc, (void *)val, olen);
   3829			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
   3830			    rfc.mode != chan->mode)
   3831				return -ECONNREFUSED;
   3832			chan->fcs = 0;
   3833			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
   3834					   (unsigned long) &rfc, endptr - ptr);
   3835			break;
   3836
   3837		case L2CAP_CONF_EWS:
   3838			if (olen != 2)
   3839				break;
   3840			chan->ack_win = min_t(u16, val, chan->ack_win);
   3841			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
   3842					   chan->tx_win, endptr - ptr);
   3843			break;
   3844
   3845		case L2CAP_CONF_EFS:
   3846			if (olen != sizeof(efs))
   3847				break;
   3848			memcpy(&efs, (void *)val, olen);
   3849			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
   3850			    efs.stype != L2CAP_SERV_NOTRAFIC &&
   3851			    efs.stype != chan->local_stype)
   3852				return -ECONNREFUSED;
   3853			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
   3854					   (unsigned long) &efs, endptr - ptr);
   3855			break;
   3856
   3857		case L2CAP_CONF_FCS:
   3858			if (olen != 1)
   3859				break;
   3860			if (*result == L2CAP_CONF_PENDING)
   3861				if (val == L2CAP_FCS_NONE)
   3862					set_bit(CONF_RECV_NO_FCS,
   3863						&chan->conf_state);
   3864			break;
   3865		}
   3866	}
   3867
   3868	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
   3869		return -ECONNREFUSED;
   3870
   3871	chan->mode = rfc.mode;
   3872
   3873	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
   3874		switch (rfc.mode) {
   3875		case L2CAP_MODE_ERTM:
   3876			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
   3877			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
   3878			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
   3879			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
   3880				chan->ack_win = min_t(u16, chan->ack_win,
   3881						      rfc.txwin_size);
   3882
   3883			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
   3884				chan->local_msdu = le16_to_cpu(efs.msdu);
   3885				chan->local_sdu_itime =
   3886					le32_to_cpu(efs.sdu_itime);
   3887				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
   3888				chan->local_flush_to =
   3889					le32_to_cpu(efs.flush_to);
   3890			}
   3891			break;
   3892
   3893		case L2CAP_MODE_STREAMING:
   3894			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
   3895		}
   3896	}
   3897
   3898	req->dcid   = cpu_to_le16(chan->dcid);
   3899	req->flags  = cpu_to_le16(0);
   3900
   3901	return ptr - data;
   3902}
   3903
   3904static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
   3905				u16 result, u16 flags)
   3906{
   3907	struct l2cap_conf_rsp *rsp = data;
   3908	void *ptr = rsp->data;
   3909
   3910	BT_DBG("chan %p", chan);
   3911
   3912	rsp->scid   = cpu_to_le16(chan->dcid);
   3913	rsp->result = cpu_to_le16(result);
   3914	rsp->flags  = cpu_to_le16(flags);
   3915
   3916	return ptr - data;
   3917}
   3918
   3919void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
   3920{
   3921	struct l2cap_le_conn_rsp rsp;
   3922	struct l2cap_conn *conn = chan->conn;
   3923
   3924	BT_DBG("chan %p", chan);
   3925
   3926	rsp.dcid    = cpu_to_le16(chan->scid);
   3927	rsp.mtu     = cpu_to_le16(chan->imtu);
   3928	rsp.mps     = cpu_to_le16(chan->mps);
   3929	rsp.credits = cpu_to_le16(chan->rx_credits);
   3930	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
   3931
   3932	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
   3933		       &rsp);
   3934}
   3935
   3936void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
   3937{
   3938	struct {
   3939		struct l2cap_ecred_conn_rsp rsp;
   3940		__le16 dcid[5];
   3941	} __packed pdu;
   3942	struct l2cap_conn *conn = chan->conn;
   3943	u16 ident = chan->ident;
   3944	int i = 0;
   3945
   3946	if (!ident)
   3947		return;
   3948
   3949	BT_DBG("chan %p ident %d", chan, ident);
   3950
   3951	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
   3952	pdu.rsp.mps     = cpu_to_le16(chan->mps);
   3953	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
   3954	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
   3955
   3956	mutex_lock(&conn->chan_lock);
   3957
   3958	list_for_each_entry(chan, &conn->chan_l, list) {
   3959		if (chan->ident != ident)
   3960			continue;
   3961
   3962		/* Reset ident so only one response is sent */
   3963		chan->ident = 0;
   3964
   3965		/* Include all channels pending with the same ident */
   3966		pdu.dcid[i++] = cpu_to_le16(chan->scid);
   3967	}
   3968
   3969	mutex_unlock(&conn->chan_lock);
   3970
   3971	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
   3972			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
   3973}
   3974
   3975void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
   3976{
   3977	struct l2cap_conn_rsp rsp;
   3978	struct l2cap_conn *conn = chan->conn;
   3979	u8 buf[128];
   3980	u8 rsp_code;
   3981
   3982	rsp.scid   = cpu_to_le16(chan->dcid);
   3983	rsp.dcid   = cpu_to_le16(chan->scid);
   3984	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
   3985	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
   3986
   3987	if (chan->hs_hcon)
   3988		rsp_code = L2CAP_CREATE_CHAN_RSP;
   3989	else
   3990		rsp_code = L2CAP_CONN_RSP;
   3991
   3992	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
   3993
   3994	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
   3995
   3996	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
   3997		return;
   3998
   3999	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
   4000		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
   4001	chan->num_conf_req++;
   4002}
   4003
   4004static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
   4005{
   4006	int type, olen;
   4007	unsigned long val;
   4008	/* Use sane default values in case a misbehaving remote device
   4009	 * did not send an RFC or extended window size option.
   4010	 */
   4011	u16 txwin_ext = chan->ack_win;
   4012	struct l2cap_conf_rfc rfc = {
   4013		.mode = chan->mode,
   4014		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
   4015		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
   4016		.max_pdu_size = cpu_to_le16(chan->imtu),
   4017		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
   4018	};
   4019
   4020	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
   4021
   4022	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
   4023		return;
   4024
   4025	while (len >= L2CAP_CONF_OPT_SIZE) {
   4026		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
   4027		if (len < 0)
   4028			break;
   4029
   4030		switch (type) {
   4031		case L2CAP_CONF_RFC:
   4032			if (olen != sizeof(rfc))
   4033				break;
   4034			memcpy(&rfc, (void *)val, olen);
   4035			break;
   4036		case L2CAP_CONF_EWS:
   4037			if (olen != 2)
   4038				break;
   4039			txwin_ext = val;
   4040			break;
   4041		}
   4042	}
   4043
   4044	switch (rfc.mode) {
   4045	case L2CAP_MODE_ERTM:
   4046		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
   4047		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
   4048		chan->mps = le16_to_cpu(rfc.max_pdu_size);
   4049		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   4050			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
   4051		else
   4052			chan->ack_win = min_t(u16, chan->ack_win,
   4053					      rfc.txwin_size);
   4054		break;
   4055	case L2CAP_MODE_STREAMING:
   4056		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
   4057	}
   4058}
   4059
   4060static inline int l2cap_command_rej(struct l2cap_conn *conn,
   4061				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4062				    u8 *data)
   4063{
   4064	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
   4065
   4066	if (cmd_len < sizeof(*rej))
   4067		return -EPROTO;
   4068
   4069	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
   4070		return 0;
   4071
   4072	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
   4073	    cmd->ident == conn->info_ident) {
   4074		cancel_delayed_work(&conn->info_timer);
   4075
   4076		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
   4077		conn->info_ident = 0;
   4078
   4079		l2cap_conn_start(conn);
   4080	}
   4081
   4082	return 0;
   4083}
   4084
   4085static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
   4086					struct l2cap_cmd_hdr *cmd,
   4087					u8 *data, u8 rsp_code, u8 amp_id)
   4088{
   4089	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
   4090	struct l2cap_conn_rsp rsp;
   4091	struct l2cap_chan *chan = NULL, *pchan;
   4092	int result, status = L2CAP_CS_NO_INFO;
   4093
   4094	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
   4095	__le16 psm = req->psm;
   4096
   4097	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
   4098
   4099	/* Check if we have socket listening on psm */
   4100	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
   4101					 &conn->hcon->dst, ACL_LINK);
   4102	if (!pchan) {
   4103		result = L2CAP_CR_BAD_PSM;
   4104		goto sendresp;
   4105	}
   4106
   4107	mutex_lock(&conn->chan_lock);
   4108	l2cap_chan_lock(pchan);
   4109
   4110	/* Check if the ACL is secure enough (if not SDP) */
   4111	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
   4112	    !hci_conn_check_link_mode(conn->hcon)) {
   4113		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
   4114		result = L2CAP_CR_SEC_BLOCK;
   4115		goto response;
   4116	}
   4117
   4118	result = L2CAP_CR_NO_MEM;
   4119
   4120	/* Check for valid dynamic CID range (as per Erratum 3253) */
   4121	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
   4122		result = L2CAP_CR_INVALID_SCID;
   4123		goto response;
   4124	}
   4125
   4126	/* Check if we already have channel with that dcid */
   4127	if (__l2cap_get_chan_by_dcid(conn, scid)) {
   4128		result = L2CAP_CR_SCID_IN_USE;
   4129		goto response;
   4130	}
   4131
   4132	chan = pchan->ops->new_connection(pchan);
   4133	if (!chan)
   4134		goto response;
   4135
   4136	/* For certain devices (ex: HID mouse), support for authentication,
   4137	 * pairing and bonding is optional. For such devices, inorder to avoid
   4138	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
   4139	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
   4140	 */
   4141	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
   4142
   4143	bacpy(&chan->src, &conn->hcon->src);
   4144	bacpy(&chan->dst, &conn->hcon->dst);
   4145	chan->src_type = bdaddr_src_type(conn->hcon);
   4146	chan->dst_type = bdaddr_dst_type(conn->hcon);
   4147	chan->psm  = psm;
   4148	chan->dcid = scid;
   4149	chan->local_amp_id = amp_id;
   4150
   4151	__l2cap_chan_add(conn, chan);
   4152
   4153	dcid = chan->scid;
   4154
   4155	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
   4156
   4157	chan->ident = cmd->ident;
   4158
   4159	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
   4160		if (l2cap_chan_check_security(chan, false)) {
   4161			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
   4162				l2cap_state_change(chan, BT_CONNECT2);
   4163				result = L2CAP_CR_PEND;
   4164				status = L2CAP_CS_AUTHOR_PEND;
   4165				chan->ops->defer(chan);
   4166			} else {
   4167				/* Force pending result for AMP controllers.
   4168				 * The connection will succeed after the
   4169				 * physical link is up.
   4170				 */
   4171				if (amp_id == AMP_ID_BREDR) {
   4172					l2cap_state_change(chan, BT_CONFIG);
   4173					result = L2CAP_CR_SUCCESS;
   4174				} else {
   4175					l2cap_state_change(chan, BT_CONNECT2);
   4176					result = L2CAP_CR_PEND;
   4177				}
   4178				status = L2CAP_CS_NO_INFO;
   4179			}
   4180		} else {
   4181			l2cap_state_change(chan, BT_CONNECT2);
   4182			result = L2CAP_CR_PEND;
   4183			status = L2CAP_CS_AUTHEN_PEND;
   4184		}
   4185	} else {
   4186		l2cap_state_change(chan, BT_CONNECT2);
   4187		result = L2CAP_CR_PEND;
   4188		status = L2CAP_CS_NO_INFO;
   4189	}
   4190
   4191response:
   4192	l2cap_chan_unlock(pchan);
   4193	mutex_unlock(&conn->chan_lock);
   4194	l2cap_chan_put(pchan);
   4195
   4196sendresp:
   4197	rsp.scid   = cpu_to_le16(scid);
   4198	rsp.dcid   = cpu_to_le16(dcid);
   4199	rsp.result = cpu_to_le16(result);
   4200	rsp.status = cpu_to_le16(status);
   4201	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
   4202
   4203	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
   4204		struct l2cap_info_req info;
   4205		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
   4206
   4207		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
   4208		conn->info_ident = l2cap_get_ident(conn);
   4209
   4210		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
   4211
   4212		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
   4213			       sizeof(info), &info);
   4214	}
   4215
   4216	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
   4217	    result == L2CAP_CR_SUCCESS) {
   4218		u8 buf[128];
   4219		set_bit(CONF_REQ_SENT, &chan->conf_state);
   4220		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
   4221			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
   4222		chan->num_conf_req++;
   4223	}
   4224
   4225	return chan;
   4226}
   4227
   4228static int l2cap_connect_req(struct l2cap_conn *conn,
   4229			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
   4230{
   4231	struct hci_dev *hdev = conn->hcon->hdev;
   4232	struct hci_conn *hcon = conn->hcon;
   4233
   4234	if (cmd_len < sizeof(struct l2cap_conn_req))
   4235		return -EPROTO;
   4236
   4237	hci_dev_lock(hdev);
   4238	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
   4239	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
   4240		mgmt_device_connected(hdev, hcon, NULL, 0);
   4241	hci_dev_unlock(hdev);
   4242
   4243	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
   4244	return 0;
   4245}
   4246
   4247static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
   4248				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4249				    u8 *data)
   4250{
   4251	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
   4252	u16 scid, dcid, result, status;
   4253	struct l2cap_chan *chan;
   4254	u8 req[128];
   4255	int err;
   4256
   4257	if (cmd_len < sizeof(*rsp))
   4258		return -EPROTO;
   4259
   4260	scid   = __le16_to_cpu(rsp->scid);
   4261	dcid   = __le16_to_cpu(rsp->dcid);
   4262	result = __le16_to_cpu(rsp->result);
   4263	status = __le16_to_cpu(rsp->status);
   4264
   4265	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
   4266	       dcid, scid, result, status);
   4267
   4268	mutex_lock(&conn->chan_lock);
   4269
   4270	if (scid) {
   4271		chan = __l2cap_get_chan_by_scid(conn, scid);
   4272		if (!chan) {
   4273			err = -EBADSLT;
   4274			goto unlock;
   4275		}
   4276	} else {
   4277		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
   4278		if (!chan) {
   4279			err = -EBADSLT;
   4280			goto unlock;
   4281		}
   4282	}
   4283
   4284	err = 0;
   4285
   4286	l2cap_chan_lock(chan);
   4287
   4288	switch (result) {
   4289	case L2CAP_CR_SUCCESS:
   4290		l2cap_state_change(chan, BT_CONFIG);
   4291		chan->ident = 0;
   4292		chan->dcid = dcid;
   4293		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
   4294
   4295		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
   4296			break;
   4297
   4298		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
   4299			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
   4300		chan->num_conf_req++;
   4301		break;
   4302
   4303	case L2CAP_CR_PEND:
   4304		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
   4305		break;
   4306
   4307	default:
   4308		l2cap_chan_del(chan, ECONNREFUSED);
   4309		break;
   4310	}
   4311
   4312	l2cap_chan_unlock(chan);
   4313
   4314unlock:
   4315	mutex_unlock(&conn->chan_lock);
   4316
   4317	return err;
   4318}
   4319
   4320static inline void set_default_fcs(struct l2cap_chan *chan)
   4321{
   4322	/* FCS is enabled only in ERTM or streaming mode, if one or both
   4323	 * sides request it.
   4324	 */
   4325	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
   4326		chan->fcs = L2CAP_FCS_NONE;
   4327	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
   4328		chan->fcs = L2CAP_FCS_CRC16;
   4329}
   4330
   4331static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
   4332				    u8 ident, u16 flags)
   4333{
   4334	struct l2cap_conn *conn = chan->conn;
   4335
   4336	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
   4337	       flags);
   4338
   4339	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
   4340	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
   4341
   4342	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
   4343		       l2cap_build_conf_rsp(chan, data,
   4344					    L2CAP_CONF_SUCCESS, flags), data);
   4345}
   4346
   4347static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
   4348				   u16 scid, u16 dcid)
   4349{
   4350	struct l2cap_cmd_rej_cid rej;
   4351
   4352	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
   4353	rej.scid = __cpu_to_le16(scid);
   4354	rej.dcid = __cpu_to_le16(dcid);
   4355
   4356	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
   4357}
   4358
   4359static inline int l2cap_config_req(struct l2cap_conn *conn,
   4360				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4361				   u8 *data)
   4362{
   4363	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
   4364	u16 dcid, flags;
   4365	u8 rsp[64];
   4366	struct l2cap_chan *chan;
   4367	int len, err = 0;
   4368
   4369	if (cmd_len < sizeof(*req))
   4370		return -EPROTO;
   4371
   4372	dcid  = __le16_to_cpu(req->dcid);
   4373	flags = __le16_to_cpu(req->flags);
   4374
   4375	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
   4376
   4377	chan = l2cap_get_chan_by_scid(conn, dcid);
   4378	if (!chan) {
   4379		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
   4380		return 0;
   4381	}
   4382
   4383	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
   4384	    chan->state != BT_CONNECTED) {
   4385		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
   4386				       chan->dcid);
   4387		goto unlock;
   4388	}
   4389
   4390	/* Reject if config buffer is too small. */
   4391	len = cmd_len - sizeof(*req);
   4392	if (chan->conf_len + len > sizeof(chan->conf_req)) {
   4393		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
   4394			       l2cap_build_conf_rsp(chan, rsp,
   4395			       L2CAP_CONF_REJECT, flags), rsp);
   4396		goto unlock;
   4397	}
   4398
   4399	/* Store config. */
   4400	memcpy(chan->conf_req + chan->conf_len, req->data, len);
   4401	chan->conf_len += len;
   4402
   4403	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
   4404		/* Incomplete config. Send empty response. */
   4405		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
   4406			       l2cap_build_conf_rsp(chan, rsp,
   4407			       L2CAP_CONF_SUCCESS, flags), rsp);
   4408		goto unlock;
   4409	}
   4410
   4411	/* Complete config. */
   4412	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
   4413	if (len < 0) {
   4414		l2cap_send_disconn_req(chan, ECONNRESET);
   4415		goto unlock;
   4416	}
   4417
   4418	chan->ident = cmd->ident;
   4419	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
   4420	chan->num_conf_rsp++;
   4421
   4422	/* Reset config buffer. */
   4423	chan->conf_len = 0;
   4424
   4425	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
   4426		goto unlock;
   4427
   4428	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
   4429		set_default_fcs(chan);
   4430
   4431		if (chan->mode == L2CAP_MODE_ERTM ||
   4432		    chan->mode == L2CAP_MODE_STREAMING)
   4433			err = l2cap_ertm_init(chan);
   4434
   4435		if (err < 0)
   4436			l2cap_send_disconn_req(chan, -err);
   4437		else
   4438			l2cap_chan_ready(chan);
   4439
   4440		goto unlock;
   4441	}
   4442
   4443	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
   4444		u8 buf[64];
   4445		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
   4446			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
   4447		chan->num_conf_req++;
   4448	}
   4449
   4450	/* Got Conf Rsp PENDING from remote side and assume we sent
   4451	   Conf Rsp PENDING in the code above */
   4452	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
   4453	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
   4454
   4455		/* check compatibility */
   4456
   4457		/* Send rsp for BR/EDR channel */
   4458		if (!chan->hs_hcon)
   4459			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
   4460		else
   4461			chan->ident = cmd->ident;
   4462	}
   4463
   4464unlock:
   4465	l2cap_chan_unlock(chan);
   4466	return err;
   4467}
   4468
   4469static inline int l2cap_config_rsp(struct l2cap_conn *conn,
   4470				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4471				   u8 *data)
   4472{
   4473	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
   4474	u16 scid, flags, result;
   4475	struct l2cap_chan *chan;
   4476	int len = cmd_len - sizeof(*rsp);
   4477	int err = 0;
   4478
   4479	if (cmd_len < sizeof(*rsp))
   4480		return -EPROTO;
   4481
   4482	scid   = __le16_to_cpu(rsp->scid);
   4483	flags  = __le16_to_cpu(rsp->flags);
   4484	result = __le16_to_cpu(rsp->result);
   4485
   4486	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
   4487	       result, len);
   4488
   4489	chan = l2cap_get_chan_by_scid(conn, scid);
   4490	if (!chan)
   4491		return 0;
   4492
   4493	switch (result) {
   4494	case L2CAP_CONF_SUCCESS:
   4495		l2cap_conf_rfc_get(chan, rsp->data, len);
   4496		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
   4497		break;
   4498
   4499	case L2CAP_CONF_PENDING:
   4500		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
   4501
   4502		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
   4503			char buf[64];
   4504
   4505			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
   4506						   buf, sizeof(buf), &result);
   4507			if (len < 0) {
   4508				l2cap_send_disconn_req(chan, ECONNRESET);
   4509				goto done;
   4510			}
   4511
   4512			if (!chan->hs_hcon) {
   4513				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
   4514							0);
   4515			} else {
   4516				if (l2cap_check_efs(chan)) {
   4517					amp_create_logical_link(chan);
   4518					chan->ident = cmd->ident;
   4519				}
   4520			}
   4521		}
   4522		goto done;
   4523
   4524	case L2CAP_CONF_UNKNOWN:
   4525	case L2CAP_CONF_UNACCEPT:
   4526		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
   4527			char req[64];
   4528
   4529			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
   4530				l2cap_send_disconn_req(chan, ECONNRESET);
   4531				goto done;
   4532			}
   4533
   4534			/* throw out any old stored conf requests */
   4535			result = L2CAP_CONF_SUCCESS;
   4536			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
   4537						   req, sizeof(req), &result);
   4538			if (len < 0) {
   4539				l2cap_send_disconn_req(chan, ECONNRESET);
   4540				goto done;
   4541			}
   4542
   4543			l2cap_send_cmd(conn, l2cap_get_ident(conn),
   4544				       L2CAP_CONF_REQ, len, req);
   4545			chan->num_conf_req++;
   4546			if (result != L2CAP_CONF_SUCCESS)
   4547				goto done;
   4548			break;
   4549		}
   4550		fallthrough;
   4551
   4552	default:
   4553		l2cap_chan_set_err(chan, ECONNRESET);
   4554
   4555		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
   4556		l2cap_send_disconn_req(chan, ECONNRESET);
   4557		goto done;
   4558	}
   4559
   4560	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
   4561		goto done;
   4562
   4563	set_bit(CONF_INPUT_DONE, &chan->conf_state);
   4564
   4565	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
   4566		set_default_fcs(chan);
   4567
   4568		if (chan->mode == L2CAP_MODE_ERTM ||
   4569		    chan->mode == L2CAP_MODE_STREAMING)
   4570			err = l2cap_ertm_init(chan);
   4571
   4572		if (err < 0)
   4573			l2cap_send_disconn_req(chan, -err);
   4574		else
   4575			l2cap_chan_ready(chan);
   4576	}
   4577
   4578done:
   4579	l2cap_chan_unlock(chan);
   4580	return err;
   4581}
   4582
   4583static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
   4584				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4585				       u8 *data)
   4586{
   4587	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
   4588	struct l2cap_disconn_rsp rsp;
   4589	u16 dcid, scid;
   4590	struct l2cap_chan *chan;
   4591
   4592	if (cmd_len != sizeof(*req))
   4593		return -EPROTO;
   4594
   4595	scid = __le16_to_cpu(req->scid);
   4596	dcid = __le16_to_cpu(req->dcid);
   4597
   4598	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
   4599
   4600	mutex_lock(&conn->chan_lock);
   4601
   4602	chan = __l2cap_get_chan_by_scid(conn, dcid);
   4603	if (!chan) {
   4604		mutex_unlock(&conn->chan_lock);
   4605		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
   4606		return 0;
   4607	}
   4608
   4609	l2cap_chan_hold(chan);
   4610	l2cap_chan_lock(chan);
   4611
   4612	rsp.dcid = cpu_to_le16(chan->scid);
   4613	rsp.scid = cpu_to_le16(chan->dcid);
   4614	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
   4615
   4616	chan->ops->set_shutdown(chan);
   4617
   4618	l2cap_chan_del(chan, ECONNRESET);
   4619
   4620	chan->ops->close(chan);
   4621
   4622	l2cap_chan_unlock(chan);
   4623	l2cap_chan_put(chan);
   4624
   4625	mutex_unlock(&conn->chan_lock);
   4626
   4627	return 0;
   4628}
   4629
   4630static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
   4631				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4632				       u8 *data)
   4633{
   4634	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
   4635	u16 dcid, scid;
   4636	struct l2cap_chan *chan;
   4637
   4638	if (cmd_len != sizeof(*rsp))
   4639		return -EPROTO;
   4640
   4641	scid = __le16_to_cpu(rsp->scid);
   4642	dcid = __le16_to_cpu(rsp->dcid);
   4643
   4644	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
   4645
   4646	mutex_lock(&conn->chan_lock);
   4647
   4648	chan = __l2cap_get_chan_by_scid(conn, scid);
   4649	if (!chan) {
   4650		mutex_unlock(&conn->chan_lock);
   4651		return 0;
   4652	}
   4653
   4654	l2cap_chan_hold(chan);
   4655	l2cap_chan_lock(chan);
   4656
   4657	if (chan->state != BT_DISCONN) {
   4658		l2cap_chan_unlock(chan);
   4659		l2cap_chan_put(chan);
   4660		mutex_unlock(&conn->chan_lock);
   4661		return 0;
   4662	}
   4663
   4664	l2cap_chan_del(chan, 0);
   4665
   4666	chan->ops->close(chan);
   4667
   4668	l2cap_chan_unlock(chan);
   4669	l2cap_chan_put(chan);
   4670
   4671	mutex_unlock(&conn->chan_lock);
   4672
   4673	return 0;
   4674}
   4675
   4676static inline int l2cap_information_req(struct l2cap_conn *conn,
   4677					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4678					u8 *data)
   4679{
   4680	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
   4681	u16 type;
   4682
   4683	if (cmd_len != sizeof(*req))
   4684		return -EPROTO;
   4685
   4686	type = __le16_to_cpu(req->type);
   4687
   4688	BT_DBG("type 0x%4.4x", type);
   4689
   4690	if (type == L2CAP_IT_FEAT_MASK) {
   4691		u8 buf[8];
   4692		u32 feat_mask = l2cap_feat_mask;
   4693		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
   4694		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
   4695		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
   4696		if (!disable_ertm)
   4697			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
   4698				| L2CAP_FEAT_FCS;
   4699		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
   4700			feat_mask |= L2CAP_FEAT_EXT_FLOW
   4701				| L2CAP_FEAT_EXT_WINDOW;
   4702
   4703		put_unaligned_le32(feat_mask, rsp->data);
   4704		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
   4705			       buf);
   4706	} else if (type == L2CAP_IT_FIXED_CHAN) {
   4707		u8 buf[12];
   4708		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
   4709
   4710		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
   4711		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
   4712		rsp->data[0] = conn->local_fixed_chan;
   4713		memset(rsp->data + 1, 0, 7);
   4714		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
   4715			       buf);
   4716	} else {
   4717		struct l2cap_info_rsp rsp;
   4718		rsp.type   = cpu_to_le16(type);
   4719		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
   4720		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
   4721			       &rsp);
   4722	}
   4723
   4724	return 0;
   4725}
   4726
   4727static inline int l2cap_information_rsp(struct l2cap_conn *conn,
   4728					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   4729					u8 *data)
   4730{
   4731	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
   4732	u16 type, result;
   4733
   4734	if (cmd_len < sizeof(*rsp))
   4735		return -EPROTO;
   4736
   4737	type   = __le16_to_cpu(rsp->type);
   4738	result = __le16_to_cpu(rsp->result);
   4739
   4740	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
   4741
   4742	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
   4743	if (cmd->ident != conn->info_ident ||
   4744	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
   4745		return 0;
   4746
   4747	cancel_delayed_work(&conn->info_timer);
   4748
   4749	if (result != L2CAP_IR_SUCCESS) {
   4750		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
   4751		conn->info_ident = 0;
   4752
   4753		l2cap_conn_start(conn);
   4754
   4755		return 0;
   4756	}
   4757
   4758	switch (type) {
   4759	case L2CAP_IT_FEAT_MASK:
   4760		conn->feat_mask = get_unaligned_le32(rsp->data);
   4761
   4762		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
   4763			struct l2cap_info_req req;
   4764			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
   4765
   4766			conn->info_ident = l2cap_get_ident(conn);
   4767
   4768			l2cap_send_cmd(conn, conn->info_ident,
   4769				       L2CAP_INFO_REQ, sizeof(req), &req);
   4770		} else {
   4771			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
   4772			conn->info_ident = 0;
   4773
   4774			l2cap_conn_start(conn);
   4775		}
   4776		break;
   4777
   4778	case L2CAP_IT_FIXED_CHAN:
   4779		conn->remote_fixed_chan = rsp->data[0];
   4780		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
   4781		conn->info_ident = 0;
   4782
   4783		l2cap_conn_start(conn);
   4784		break;
   4785	}
   4786
   4787	return 0;
   4788}
   4789
   4790static int l2cap_create_channel_req(struct l2cap_conn *conn,
   4791				    struct l2cap_cmd_hdr *cmd,
   4792				    u16 cmd_len, void *data)
   4793{
   4794	struct l2cap_create_chan_req *req = data;
   4795	struct l2cap_create_chan_rsp rsp;
   4796	struct l2cap_chan *chan;
   4797	struct hci_dev *hdev;
   4798	u16 psm, scid;
   4799
   4800	if (cmd_len != sizeof(*req))
   4801		return -EPROTO;
   4802
   4803	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
   4804		return -EINVAL;
   4805
   4806	psm = le16_to_cpu(req->psm);
   4807	scid = le16_to_cpu(req->scid);
   4808
   4809	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
   4810
   4811	/* For controller id 0 make BR/EDR connection */
   4812	if (req->amp_id == AMP_ID_BREDR) {
   4813		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
   4814			      req->amp_id);
   4815		return 0;
   4816	}
   4817
   4818	/* Validate AMP controller id */
   4819	hdev = hci_dev_get(req->amp_id);
   4820	if (!hdev)
   4821		goto error;
   4822
   4823	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
   4824		hci_dev_put(hdev);
   4825		goto error;
   4826	}
   4827
   4828	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
   4829			     req->amp_id);
   4830	if (chan) {
   4831		struct amp_mgr *mgr = conn->hcon->amp_mgr;
   4832		struct hci_conn *hs_hcon;
   4833
   4834		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
   4835						  &conn->hcon->dst);
   4836		if (!hs_hcon) {
   4837			hci_dev_put(hdev);
   4838			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
   4839					       chan->dcid);
   4840			return 0;
   4841		}
   4842
   4843		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
   4844
   4845		mgr->bredr_chan = chan;
   4846		chan->hs_hcon = hs_hcon;
   4847		chan->fcs = L2CAP_FCS_NONE;
   4848		conn->mtu = hdev->block_mtu;
   4849	}
   4850
   4851	hci_dev_put(hdev);
   4852
   4853	return 0;
   4854
   4855error:
   4856	rsp.dcid = 0;
   4857	rsp.scid = cpu_to_le16(scid);
   4858	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
   4859	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
   4860
   4861	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
   4862		       sizeof(rsp), &rsp);
   4863
   4864	return 0;
   4865}
   4866
   4867static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
   4868{
   4869	struct l2cap_move_chan_req req;
   4870	u8 ident;
   4871
   4872	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
   4873
   4874	ident = l2cap_get_ident(chan->conn);
   4875	chan->ident = ident;
   4876
   4877	req.icid = cpu_to_le16(chan->scid);
   4878	req.dest_amp_id = dest_amp_id;
   4879
   4880	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
   4881		       &req);
   4882
   4883	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
   4884}
   4885
   4886static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
   4887{
   4888	struct l2cap_move_chan_rsp rsp;
   4889
   4890	BT_DBG("chan %p, result 0x%4.4x", chan, result);
   4891
   4892	rsp.icid = cpu_to_le16(chan->dcid);
   4893	rsp.result = cpu_to_le16(result);
   4894
   4895	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
   4896		       sizeof(rsp), &rsp);
   4897}
   4898
   4899static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
   4900{
   4901	struct l2cap_move_chan_cfm cfm;
   4902
   4903	BT_DBG("chan %p, result 0x%4.4x", chan, result);
   4904
   4905	chan->ident = l2cap_get_ident(chan->conn);
   4906
   4907	cfm.icid = cpu_to_le16(chan->scid);
   4908	cfm.result = cpu_to_le16(result);
   4909
   4910	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
   4911		       sizeof(cfm), &cfm);
   4912
   4913	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
   4914}
   4915
   4916static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
   4917{
   4918	struct l2cap_move_chan_cfm cfm;
   4919
   4920	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
   4921
   4922	cfm.icid = cpu_to_le16(icid);
   4923	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
   4924
   4925	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
   4926		       sizeof(cfm), &cfm);
   4927}
   4928
   4929static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
   4930					 u16 icid)
   4931{
   4932	struct l2cap_move_chan_cfm_rsp rsp;
   4933
   4934	BT_DBG("icid 0x%4.4x", icid);
   4935
   4936	rsp.icid = cpu_to_le16(icid);
   4937	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
   4938}
   4939
   4940static void __release_logical_link(struct l2cap_chan *chan)
   4941{
   4942	chan->hs_hchan = NULL;
   4943	chan->hs_hcon = NULL;
   4944
   4945	/* Placeholder - release the logical link */
   4946}
   4947
   4948static void l2cap_logical_fail(struct l2cap_chan *chan)
   4949{
   4950	/* Logical link setup failed */
   4951	if (chan->state != BT_CONNECTED) {
   4952		/* Create channel failure, disconnect */
   4953		l2cap_send_disconn_req(chan, ECONNRESET);
   4954		return;
   4955	}
   4956
   4957	switch (chan->move_role) {
   4958	case L2CAP_MOVE_ROLE_RESPONDER:
   4959		l2cap_move_done(chan);
   4960		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
   4961		break;
   4962	case L2CAP_MOVE_ROLE_INITIATOR:
   4963		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
   4964		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
   4965			/* Remote has only sent pending or
   4966			 * success responses, clean up
   4967			 */
   4968			l2cap_move_done(chan);
   4969		}
   4970
   4971		/* Other amp move states imply that the move
   4972		 * has already aborted
   4973		 */
   4974		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
   4975		break;
   4976	}
   4977}
   4978
   4979static void l2cap_logical_finish_create(struct l2cap_chan *chan,
   4980					struct hci_chan *hchan)
   4981{
   4982	struct l2cap_conf_rsp rsp;
   4983
   4984	chan->hs_hchan = hchan;
   4985	chan->hs_hcon->l2cap_data = chan->conn;
   4986
   4987	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
   4988
   4989	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
   4990		int err;
   4991
   4992		set_default_fcs(chan);
   4993
   4994		err = l2cap_ertm_init(chan);
   4995		if (err < 0)
   4996			l2cap_send_disconn_req(chan, -err);
   4997		else
   4998			l2cap_chan_ready(chan);
   4999	}
   5000}
   5001
   5002static void l2cap_logical_finish_move(struct l2cap_chan *chan,
   5003				      struct hci_chan *hchan)
   5004{
   5005	chan->hs_hcon = hchan->conn;
   5006	chan->hs_hcon->l2cap_data = chan->conn;
   5007
   5008	BT_DBG("move_state %d", chan->move_state);
   5009
   5010	switch (chan->move_state) {
   5011	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
   5012		/* Move confirm will be sent after a success
   5013		 * response is received
   5014		 */
   5015		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
   5016		break;
   5017	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
   5018		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   5019			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
   5020		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
   5021			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
   5022			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
   5023		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
   5024			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
   5025			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
   5026		}
   5027		break;
   5028	default:
   5029		/* Move was not in expected state, free the channel */
   5030		__release_logical_link(chan);
   5031
   5032		chan->move_state = L2CAP_MOVE_STABLE;
   5033	}
   5034}
   5035
   5036/* Call with chan locked */
   5037void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
   5038		       u8 status)
   5039{
   5040	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
   5041
   5042	if (status) {
   5043		l2cap_logical_fail(chan);
   5044		__release_logical_link(chan);
   5045		return;
   5046	}
   5047
   5048	if (chan->state != BT_CONNECTED) {
   5049		/* Ignore logical link if channel is on BR/EDR */
   5050		if (chan->local_amp_id != AMP_ID_BREDR)
   5051			l2cap_logical_finish_create(chan, hchan);
   5052	} else {
   5053		l2cap_logical_finish_move(chan, hchan);
   5054	}
   5055}
   5056
   5057void l2cap_move_start(struct l2cap_chan *chan)
   5058{
   5059	BT_DBG("chan %p", chan);
   5060
   5061	if (chan->local_amp_id == AMP_ID_BREDR) {
   5062		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
   5063			return;
   5064		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
   5065		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
   5066		/* Placeholder - start physical link setup */
   5067	} else {
   5068		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
   5069		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
   5070		chan->move_id = 0;
   5071		l2cap_move_setup(chan);
   5072		l2cap_send_move_chan_req(chan, 0);
   5073	}
   5074}
   5075
   5076static void l2cap_do_create(struct l2cap_chan *chan, int result,
   5077			    u8 local_amp_id, u8 remote_amp_id)
   5078{
   5079	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
   5080	       local_amp_id, remote_amp_id);
   5081
   5082	chan->fcs = L2CAP_FCS_NONE;
   5083
   5084	/* Outgoing channel on AMP */
   5085	if (chan->state == BT_CONNECT) {
   5086		if (result == L2CAP_CR_SUCCESS) {
   5087			chan->local_amp_id = local_amp_id;
   5088			l2cap_send_create_chan_req(chan, remote_amp_id);
   5089		} else {
   5090			/* Revert to BR/EDR connect */
   5091			l2cap_send_conn_req(chan);
   5092		}
   5093
   5094		return;
   5095	}
   5096
   5097	/* Incoming channel on AMP */
   5098	if (__l2cap_no_conn_pending(chan)) {
   5099		struct l2cap_conn_rsp rsp;
   5100		char buf[128];
   5101		rsp.scid = cpu_to_le16(chan->dcid);
   5102		rsp.dcid = cpu_to_le16(chan->scid);
   5103
   5104		if (result == L2CAP_CR_SUCCESS) {
   5105			/* Send successful response */
   5106			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
   5107			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
   5108		} else {
   5109			/* Send negative response */
   5110			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
   5111			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
   5112		}
   5113
   5114		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
   5115			       sizeof(rsp), &rsp);
   5116
   5117		if (result == L2CAP_CR_SUCCESS) {
   5118			l2cap_state_change(chan, BT_CONFIG);
   5119			set_bit(CONF_REQ_SENT, &chan->conf_state);
   5120			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
   5121				       L2CAP_CONF_REQ,
   5122				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
   5123			chan->num_conf_req++;
   5124		}
   5125	}
   5126}
   5127
   5128static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
   5129				   u8 remote_amp_id)
   5130{
   5131	l2cap_move_setup(chan);
   5132	chan->move_id = local_amp_id;
   5133	chan->move_state = L2CAP_MOVE_WAIT_RSP;
   5134
   5135	l2cap_send_move_chan_req(chan, remote_amp_id);
   5136}
   5137
   5138static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
   5139{
   5140	struct hci_chan *hchan = NULL;
   5141
   5142	/* Placeholder - get hci_chan for logical link */
   5143
   5144	if (hchan) {
   5145		if (hchan->state == BT_CONNECTED) {
   5146			/* Logical link is ready to go */
   5147			chan->hs_hcon = hchan->conn;
   5148			chan->hs_hcon->l2cap_data = chan->conn;
   5149			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
   5150			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
   5151
   5152			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
   5153		} else {
   5154			/* Wait for logical link to be ready */
   5155			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
   5156		}
   5157	} else {
   5158		/* Logical link not available */
   5159		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
   5160	}
   5161}
   5162
   5163static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
   5164{
   5165	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
   5166		u8 rsp_result;
   5167		if (result == -EINVAL)
   5168			rsp_result = L2CAP_MR_BAD_ID;
   5169		else
   5170			rsp_result = L2CAP_MR_NOT_ALLOWED;
   5171
   5172		l2cap_send_move_chan_rsp(chan, rsp_result);
   5173	}
   5174
   5175	chan->move_role = L2CAP_MOVE_ROLE_NONE;
   5176	chan->move_state = L2CAP_MOVE_STABLE;
   5177
   5178	/* Restart data transmission */
   5179	l2cap_ertm_send(chan);
   5180}
   5181
   5182/* Invoke with locked chan */
   5183void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
   5184{
   5185	u8 local_amp_id = chan->local_amp_id;
   5186	u8 remote_amp_id = chan->remote_amp_id;
   5187
   5188	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
   5189	       chan, result, local_amp_id, remote_amp_id);
   5190
   5191	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
   5192		return;
   5193
   5194	if (chan->state != BT_CONNECTED) {
   5195		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
   5196	} else if (result != L2CAP_MR_SUCCESS) {
   5197		l2cap_do_move_cancel(chan, result);
   5198	} else {
   5199		switch (chan->move_role) {
   5200		case L2CAP_MOVE_ROLE_INITIATOR:
   5201			l2cap_do_move_initiate(chan, local_amp_id,
   5202					       remote_amp_id);
   5203			break;
   5204		case L2CAP_MOVE_ROLE_RESPONDER:
   5205			l2cap_do_move_respond(chan, result);
   5206			break;
   5207		default:
   5208			l2cap_do_move_cancel(chan, result);
   5209			break;
   5210		}
   5211	}
   5212}
   5213
   5214static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
   5215					 struct l2cap_cmd_hdr *cmd,
   5216					 u16 cmd_len, void *data)
   5217{
   5218	struct l2cap_move_chan_req *req = data;
   5219	struct l2cap_move_chan_rsp rsp;
   5220	struct l2cap_chan *chan;
   5221	u16 icid = 0;
   5222	u16 result = L2CAP_MR_NOT_ALLOWED;
   5223
   5224	if (cmd_len != sizeof(*req))
   5225		return -EPROTO;
   5226
   5227	icid = le16_to_cpu(req->icid);
   5228
   5229	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
   5230
   5231	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
   5232		return -EINVAL;
   5233
   5234	chan = l2cap_get_chan_by_dcid(conn, icid);
   5235	if (!chan) {
   5236		rsp.icid = cpu_to_le16(icid);
   5237		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
   5238		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
   5239			       sizeof(rsp), &rsp);
   5240		return 0;
   5241	}
   5242
   5243	chan->ident = cmd->ident;
   5244
   5245	if (chan->scid < L2CAP_CID_DYN_START ||
   5246	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
   5247	    (chan->mode != L2CAP_MODE_ERTM &&
   5248	     chan->mode != L2CAP_MODE_STREAMING)) {
   5249		result = L2CAP_MR_NOT_ALLOWED;
   5250		goto send_move_response;
   5251	}
   5252
   5253	if (chan->local_amp_id == req->dest_amp_id) {
   5254		result = L2CAP_MR_SAME_ID;
   5255		goto send_move_response;
   5256	}
   5257
   5258	if (req->dest_amp_id != AMP_ID_BREDR) {
   5259		struct hci_dev *hdev;
   5260		hdev = hci_dev_get(req->dest_amp_id);
   5261		if (!hdev || hdev->dev_type != HCI_AMP ||
   5262		    !test_bit(HCI_UP, &hdev->flags)) {
   5263			if (hdev)
   5264				hci_dev_put(hdev);
   5265
   5266			result = L2CAP_MR_BAD_ID;
   5267			goto send_move_response;
   5268		}
   5269		hci_dev_put(hdev);
   5270	}
   5271
   5272	/* Detect a move collision.  Only send a collision response
   5273	 * if this side has "lost", otherwise proceed with the move.
   5274	 * The winner has the larger bd_addr.
   5275	 */
   5276	if ((__chan_is_moving(chan) ||
   5277	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
   5278	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
   5279		result = L2CAP_MR_COLLISION;
   5280		goto send_move_response;
   5281	}
   5282
   5283	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
   5284	l2cap_move_setup(chan);
   5285	chan->move_id = req->dest_amp_id;
   5286
   5287	if (req->dest_amp_id == AMP_ID_BREDR) {
   5288		/* Moving to BR/EDR */
   5289		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   5290			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
   5291			result = L2CAP_MR_PEND;
   5292		} else {
   5293			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
   5294			result = L2CAP_MR_SUCCESS;
   5295		}
   5296	} else {
   5297		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
   5298		/* Placeholder - uncomment when amp functions are available */
   5299		/*amp_accept_physical(chan, req->dest_amp_id);*/
   5300		result = L2CAP_MR_PEND;
   5301	}
   5302
   5303send_move_response:
   5304	l2cap_send_move_chan_rsp(chan, result);
   5305
   5306	l2cap_chan_unlock(chan);
   5307
   5308	return 0;
   5309}
   5310
   5311static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
   5312{
   5313	struct l2cap_chan *chan;
   5314	struct hci_chan *hchan = NULL;
   5315
   5316	chan = l2cap_get_chan_by_scid(conn, icid);
   5317	if (!chan) {
   5318		l2cap_send_move_chan_cfm_icid(conn, icid);
   5319		return;
   5320	}
   5321
   5322	__clear_chan_timer(chan);
   5323	if (result == L2CAP_MR_PEND)
   5324		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
   5325
   5326	switch (chan->move_state) {
   5327	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
   5328		/* Move confirm will be sent when logical link
   5329		 * is complete.
   5330		 */
   5331		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
   5332		break;
   5333	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
   5334		if (result == L2CAP_MR_PEND) {
   5335			break;
   5336		} else if (test_bit(CONN_LOCAL_BUSY,
   5337				    &chan->conn_state)) {
   5338			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
   5339		} else {
   5340			/* Logical link is up or moving to BR/EDR,
   5341			 * proceed with move
   5342			 */
   5343			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
   5344			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
   5345		}
   5346		break;
   5347	case L2CAP_MOVE_WAIT_RSP:
   5348		/* Moving to AMP */
   5349		if (result == L2CAP_MR_SUCCESS) {
   5350			/* Remote is ready, send confirm immediately
   5351			 * after logical link is ready
   5352			 */
   5353			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
   5354		} else {
   5355			/* Both logical link and move success
   5356			 * are required to confirm
   5357			 */
   5358			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
   5359		}
   5360
   5361		/* Placeholder - get hci_chan for logical link */
   5362		if (!hchan) {
   5363			/* Logical link not available */
   5364			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
   5365			break;
   5366		}
   5367
   5368		/* If the logical link is not yet connected, do not
   5369		 * send confirmation.
   5370		 */
   5371		if (hchan->state != BT_CONNECTED)
   5372			break;
   5373
   5374		/* Logical link is already ready to go */
   5375
   5376		chan->hs_hcon = hchan->conn;
   5377		chan->hs_hcon->l2cap_data = chan->conn;
   5378
   5379		if (result == L2CAP_MR_SUCCESS) {
   5380			/* Can confirm now */
   5381			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
   5382		} else {
   5383			/* Now only need move success
   5384			 * to confirm
   5385			 */
   5386			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
   5387		}
   5388
   5389		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
   5390		break;
   5391	default:
   5392		/* Any other amp move state means the move failed. */
   5393		chan->move_id = chan->local_amp_id;
   5394		l2cap_move_done(chan);
   5395		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
   5396	}
   5397
   5398	l2cap_chan_unlock(chan);
   5399}
   5400
   5401static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
   5402			    u16 result)
   5403{
   5404	struct l2cap_chan *chan;
   5405
   5406	chan = l2cap_get_chan_by_ident(conn, ident);
   5407	if (!chan) {
   5408		/* Could not locate channel, icid is best guess */
   5409		l2cap_send_move_chan_cfm_icid(conn, icid);
   5410		return;
   5411	}
   5412
   5413	__clear_chan_timer(chan);
   5414
   5415	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
   5416		if (result == L2CAP_MR_COLLISION) {
   5417			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
   5418		} else {
   5419			/* Cleanup - cancel move */
   5420			chan->move_id = chan->local_amp_id;
   5421			l2cap_move_done(chan);
   5422		}
   5423	}
   5424
   5425	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
   5426
   5427	l2cap_chan_unlock(chan);
   5428}
   5429
   5430static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
   5431				  struct l2cap_cmd_hdr *cmd,
   5432				  u16 cmd_len, void *data)
   5433{
   5434	struct l2cap_move_chan_rsp *rsp = data;
   5435	u16 icid, result;
   5436
   5437	if (cmd_len != sizeof(*rsp))
   5438		return -EPROTO;
   5439
   5440	icid = le16_to_cpu(rsp->icid);
   5441	result = le16_to_cpu(rsp->result);
   5442
   5443	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
   5444
   5445	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
   5446		l2cap_move_continue(conn, icid, result);
   5447	else
   5448		l2cap_move_fail(conn, cmd->ident, icid, result);
   5449
   5450	return 0;
   5451}
   5452
   5453static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
   5454				      struct l2cap_cmd_hdr *cmd,
   5455				      u16 cmd_len, void *data)
   5456{
   5457	struct l2cap_move_chan_cfm *cfm = data;
   5458	struct l2cap_chan *chan;
   5459	u16 icid, result;
   5460
   5461	if (cmd_len != sizeof(*cfm))
   5462		return -EPROTO;
   5463
   5464	icid = le16_to_cpu(cfm->icid);
   5465	result = le16_to_cpu(cfm->result);
   5466
   5467	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
   5468
   5469	chan = l2cap_get_chan_by_dcid(conn, icid);
   5470	if (!chan) {
   5471		/* Spec requires a response even if the icid was not found */
   5472		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
   5473		return 0;
   5474	}
   5475
   5476	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
   5477		if (result == L2CAP_MC_CONFIRMED) {
   5478			chan->local_amp_id = chan->move_id;
   5479			if (chan->local_amp_id == AMP_ID_BREDR)
   5480				__release_logical_link(chan);
   5481		} else {
   5482			chan->move_id = chan->local_amp_id;
   5483		}
   5484
   5485		l2cap_move_done(chan);
   5486	}
   5487
   5488	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
   5489
   5490	l2cap_chan_unlock(chan);
   5491
   5492	return 0;
   5493}
   5494
   5495static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
   5496						 struct l2cap_cmd_hdr *cmd,
   5497						 u16 cmd_len, void *data)
   5498{
   5499	struct l2cap_move_chan_cfm_rsp *rsp = data;
   5500	struct l2cap_chan *chan;
   5501	u16 icid;
   5502
   5503	if (cmd_len != sizeof(*rsp))
   5504		return -EPROTO;
   5505
   5506	icid = le16_to_cpu(rsp->icid);
   5507
   5508	BT_DBG("icid 0x%4.4x", icid);
   5509
   5510	chan = l2cap_get_chan_by_scid(conn, icid);
   5511	if (!chan)
   5512		return 0;
   5513
   5514	__clear_chan_timer(chan);
   5515
   5516	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
   5517		chan->local_amp_id = chan->move_id;
   5518
   5519		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
   5520			__release_logical_link(chan);
   5521
   5522		l2cap_move_done(chan);
   5523	}
   5524
   5525	l2cap_chan_unlock(chan);
   5526
   5527	return 0;
   5528}
   5529
   5530static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
   5531					      struct l2cap_cmd_hdr *cmd,
   5532					      u16 cmd_len, u8 *data)
   5533{
   5534	struct hci_conn *hcon = conn->hcon;
   5535	struct l2cap_conn_param_update_req *req;
   5536	struct l2cap_conn_param_update_rsp rsp;
   5537	u16 min, max, latency, to_multiplier;
   5538	int err;
   5539
   5540	if (hcon->role != HCI_ROLE_MASTER)
   5541		return -EINVAL;
   5542
   5543	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
   5544		return -EPROTO;
   5545
   5546	req = (struct l2cap_conn_param_update_req *) data;
   5547	min		= __le16_to_cpu(req->min);
   5548	max		= __le16_to_cpu(req->max);
   5549	latency		= __le16_to_cpu(req->latency);
   5550	to_multiplier	= __le16_to_cpu(req->to_multiplier);
   5551
   5552	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
   5553	       min, max, latency, to_multiplier);
   5554
   5555	memset(&rsp, 0, sizeof(rsp));
   5556
   5557	err = hci_check_conn_params(min, max, latency, to_multiplier);
   5558	if (err)
   5559		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
   5560	else
   5561		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
   5562
   5563	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
   5564		       sizeof(rsp), &rsp);
   5565
   5566	if (!err) {
   5567		u8 store_hint;
   5568
   5569		store_hint = hci_le_conn_update(hcon, min, max, latency,
   5570						to_multiplier);
   5571		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
   5572				    store_hint, min, max, latency,
   5573				    to_multiplier);
   5574
   5575	}
   5576
   5577	return 0;
   5578}
   5579
   5580static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
   5581				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   5582				u8 *data)
   5583{
   5584	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
   5585	struct hci_conn *hcon = conn->hcon;
   5586	u16 dcid, mtu, mps, credits, result;
   5587	struct l2cap_chan *chan;
   5588	int err, sec_level;
   5589
   5590	if (cmd_len < sizeof(*rsp))
   5591		return -EPROTO;
   5592
   5593	dcid    = __le16_to_cpu(rsp->dcid);
   5594	mtu     = __le16_to_cpu(rsp->mtu);
   5595	mps     = __le16_to_cpu(rsp->mps);
   5596	credits = __le16_to_cpu(rsp->credits);
   5597	result  = __le16_to_cpu(rsp->result);
   5598
   5599	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
   5600					   dcid < L2CAP_CID_DYN_START ||
   5601					   dcid > L2CAP_CID_LE_DYN_END))
   5602		return -EPROTO;
   5603
   5604	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
   5605	       dcid, mtu, mps, credits, result);
   5606
   5607	mutex_lock(&conn->chan_lock);
   5608
   5609	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
   5610	if (!chan) {
   5611		err = -EBADSLT;
   5612		goto unlock;
   5613	}
   5614
   5615	err = 0;
   5616
   5617	l2cap_chan_lock(chan);
   5618
   5619	switch (result) {
   5620	case L2CAP_CR_LE_SUCCESS:
   5621		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
   5622			err = -EBADSLT;
   5623			break;
   5624		}
   5625
   5626		chan->ident = 0;
   5627		chan->dcid = dcid;
   5628		chan->omtu = mtu;
   5629		chan->remote_mps = mps;
   5630		chan->tx_credits = credits;
   5631		l2cap_chan_ready(chan);
   5632		break;
   5633
   5634	case L2CAP_CR_LE_AUTHENTICATION:
   5635	case L2CAP_CR_LE_ENCRYPTION:
   5636		/* If we already have MITM protection we can't do
   5637		 * anything.
   5638		 */
   5639		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
   5640			l2cap_chan_del(chan, ECONNREFUSED);
   5641			break;
   5642		}
   5643
   5644		sec_level = hcon->sec_level + 1;
   5645		if (chan->sec_level < sec_level)
   5646			chan->sec_level = sec_level;
   5647
   5648		/* We'll need to send a new Connect Request */
   5649		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
   5650
   5651		smp_conn_security(hcon, chan->sec_level);
   5652		break;
   5653
   5654	default:
   5655		l2cap_chan_del(chan, ECONNREFUSED);
   5656		break;
   5657	}
   5658
   5659	l2cap_chan_unlock(chan);
   5660
   5661unlock:
   5662	mutex_unlock(&conn->chan_lock);
   5663
   5664	return err;
   5665}
   5666
   5667static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
   5668				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   5669				      u8 *data)
   5670{
   5671	int err = 0;
   5672
   5673	switch (cmd->code) {
   5674	case L2CAP_COMMAND_REJ:
   5675		l2cap_command_rej(conn, cmd, cmd_len, data);
   5676		break;
   5677
   5678	case L2CAP_CONN_REQ:
   5679		err = l2cap_connect_req(conn, cmd, cmd_len, data);
   5680		break;
   5681
   5682	case L2CAP_CONN_RSP:
   5683	case L2CAP_CREATE_CHAN_RSP:
   5684		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
   5685		break;
   5686
   5687	case L2CAP_CONF_REQ:
   5688		err = l2cap_config_req(conn, cmd, cmd_len, data);
   5689		break;
   5690
   5691	case L2CAP_CONF_RSP:
   5692		l2cap_config_rsp(conn, cmd, cmd_len, data);
   5693		break;
   5694
   5695	case L2CAP_DISCONN_REQ:
   5696		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
   5697		break;
   5698
   5699	case L2CAP_DISCONN_RSP:
   5700		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
   5701		break;
   5702
   5703	case L2CAP_ECHO_REQ:
   5704		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
   5705		break;
   5706
   5707	case L2CAP_ECHO_RSP:
   5708		break;
   5709
   5710	case L2CAP_INFO_REQ:
   5711		err = l2cap_information_req(conn, cmd, cmd_len, data);
   5712		break;
   5713
   5714	case L2CAP_INFO_RSP:
   5715		l2cap_information_rsp(conn, cmd, cmd_len, data);
   5716		break;
   5717
   5718	case L2CAP_CREATE_CHAN_REQ:
   5719		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
   5720		break;
   5721
   5722	case L2CAP_MOVE_CHAN_REQ:
   5723		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
   5724		break;
   5725
   5726	case L2CAP_MOVE_CHAN_RSP:
   5727		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
   5728		break;
   5729
   5730	case L2CAP_MOVE_CHAN_CFM:
   5731		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
   5732		break;
   5733
   5734	case L2CAP_MOVE_CHAN_CFM_RSP:
   5735		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
   5736		break;
   5737
   5738	default:
   5739		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
   5740		err = -EINVAL;
   5741		break;
   5742	}
   5743
   5744	return err;
   5745}
   5746
   5747static int l2cap_le_connect_req(struct l2cap_conn *conn,
   5748				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   5749				u8 *data)
   5750{
   5751	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
   5752	struct l2cap_le_conn_rsp rsp;
   5753	struct l2cap_chan *chan, *pchan;
   5754	u16 dcid, scid, credits, mtu, mps;
   5755	__le16 psm;
   5756	u8 result;
   5757
   5758	if (cmd_len != sizeof(*req))
   5759		return -EPROTO;
   5760
   5761	scid = __le16_to_cpu(req->scid);
   5762	mtu  = __le16_to_cpu(req->mtu);
   5763	mps  = __le16_to_cpu(req->mps);
   5764	psm  = req->psm;
   5765	dcid = 0;
   5766	credits = 0;
   5767
   5768	if (mtu < 23 || mps < 23)
   5769		return -EPROTO;
   5770
   5771	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
   5772	       scid, mtu, mps);
   5773
   5774	/* Check if we have socket listening on psm */
   5775	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
   5776					 &conn->hcon->dst, LE_LINK);
   5777	if (!pchan) {
   5778		result = L2CAP_CR_LE_BAD_PSM;
   5779		chan = NULL;
   5780		goto response;
   5781	}
   5782
   5783	mutex_lock(&conn->chan_lock);
   5784	l2cap_chan_lock(pchan);
   5785
   5786	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
   5787				     SMP_ALLOW_STK)) {
   5788		result = L2CAP_CR_LE_AUTHENTICATION;
   5789		chan = NULL;
   5790		goto response_unlock;
   5791	}
   5792
   5793	/* Check for valid dynamic CID range */
   5794	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
   5795		result = L2CAP_CR_LE_INVALID_SCID;
   5796		chan = NULL;
   5797		goto response_unlock;
   5798	}
   5799
   5800	/* Check if we already have channel with that dcid */
   5801	if (__l2cap_get_chan_by_dcid(conn, scid)) {
   5802		result = L2CAP_CR_LE_SCID_IN_USE;
   5803		chan = NULL;
   5804		goto response_unlock;
   5805	}
   5806
   5807	chan = pchan->ops->new_connection(pchan);
   5808	if (!chan) {
   5809		result = L2CAP_CR_LE_NO_MEM;
   5810		goto response_unlock;
   5811	}
   5812
   5813	bacpy(&chan->src, &conn->hcon->src);
   5814	bacpy(&chan->dst, &conn->hcon->dst);
   5815	chan->src_type = bdaddr_src_type(conn->hcon);
   5816	chan->dst_type = bdaddr_dst_type(conn->hcon);
   5817	chan->psm  = psm;
   5818	chan->dcid = scid;
   5819	chan->omtu = mtu;
   5820	chan->remote_mps = mps;
   5821
   5822	__l2cap_chan_add(conn, chan);
   5823
   5824	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
   5825
   5826	dcid = chan->scid;
   5827	credits = chan->rx_credits;
   5828
   5829	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
   5830
   5831	chan->ident = cmd->ident;
   5832
   5833	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
   5834		l2cap_state_change(chan, BT_CONNECT2);
   5835		/* The following result value is actually not defined
   5836		 * for LE CoC but we use it to let the function know
   5837		 * that it should bail out after doing its cleanup
   5838		 * instead of sending a response.
   5839		 */
   5840		result = L2CAP_CR_PEND;
   5841		chan->ops->defer(chan);
   5842	} else {
   5843		l2cap_chan_ready(chan);
   5844		result = L2CAP_CR_LE_SUCCESS;
   5845	}
   5846
   5847response_unlock:
   5848	l2cap_chan_unlock(pchan);
   5849	mutex_unlock(&conn->chan_lock);
   5850	l2cap_chan_put(pchan);
   5851
   5852	if (result == L2CAP_CR_PEND)
   5853		return 0;
   5854
   5855response:
   5856	if (chan) {
   5857		rsp.mtu = cpu_to_le16(chan->imtu);
   5858		rsp.mps = cpu_to_le16(chan->mps);
   5859	} else {
   5860		rsp.mtu = 0;
   5861		rsp.mps = 0;
   5862	}
   5863
   5864	rsp.dcid    = cpu_to_le16(dcid);
   5865	rsp.credits = cpu_to_le16(credits);
   5866	rsp.result  = cpu_to_le16(result);
   5867
   5868	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
   5869
   5870	return 0;
   5871}
   5872
   5873static inline int l2cap_le_credits(struct l2cap_conn *conn,
   5874				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   5875				   u8 *data)
   5876{
   5877	struct l2cap_le_credits *pkt;
   5878	struct l2cap_chan *chan;
   5879	u16 cid, credits, max_credits;
   5880
   5881	if (cmd_len != sizeof(*pkt))
   5882		return -EPROTO;
   5883
   5884	pkt = (struct l2cap_le_credits *) data;
   5885	cid	= __le16_to_cpu(pkt->cid);
   5886	credits	= __le16_to_cpu(pkt->credits);
   5887
   5888	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
   5889
   5890	chan = l2cap_get_chan_by_dcid(conn, cid);
   5891	if (!chan)
   5892		return -EBADSLT;
   5893
   5894	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
   5895	if (credits > max_credits) {
   5896		BT_ERR("LE credits overflow");
   5897		l2cap_send_disconn_req(chan, ECONNRESET);
   5898		l2cap_chan_unlock(chan);
   5899
   5900		/* Return 0 so that we don't trigger an unnecessary
   5901		 * command reject packet.
   5902		 */
   5903		return 0;
   5904	}
   5905
   5906	chan->tx_credits += credits;
   5907
   5908	/* Resume sending */
   5909	l2cap_le_flowctl_send(chan);
   5910
   5911	if (chan->tx_credits)
   5912		chan->ops->resume(chan);
   5913
   5914	l2cap_chan_unlock(chan);
   5915
   5916	return 0;
   5917}
   5918
   5919static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
   5920				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   5921				       u8 *data)
   5922{
   5923	struct l2cap_ecred_conn_req *req = (void *) data;
   5924	struct {
   5925		struct l2cap_ecred_conn_rsp rsp;
   5926		__le16 dcid[L2CAP_ECRED_MAX_CID];
   5927	} __packed pdu;
   5928	struct l2cap_chan *chan, *pchan;
   5929	u16 mtu, mps;
   5930	__le16 psm;
   5931	u8 result, len = 0;
   5932	int i, num_scid;
   5933	bool defer = false;
   5934
   5935	if (!enable_ecred)
   5936		return -EINVAL;
   5937
   5938	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
   5939		result = L2CAP_CR_LE_INVALID_PARAMS;
   5940		goto response;
   5941	}
   5942
   5943	cmd_len -= sizeof(*req);
   5944	num_scid = cmd_len / sizeof(u16);
   5945
   5946	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
   5947		result = L2CAP_CR_LE_INVALID_PARAMS;
   5948		goto response;
   5949	}
   5950
   5951	mtu  = __le16_to_cpu(req->mtu);
   5952	mps  = __le16_to_cpu(req->mps);
   5953
   5954	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
   5955		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
   5956		goto response;
   5957	}
   5958
   5959	psm  = req->psm;
   5960
   5961	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
   5962
   5963	memset(&pdu, 0, sizeof(pdu));
   5964
   5965	/* Check if we have socket listening on psm */
   5966	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
   5967					 &conn->hcon->dst, LE_LINK);
   5968	if (!pchan) {
   5969		result = L2CAP_CR_LE_BAD_PSM;
   5970		goto response;
   5971	}
   5972
   5973	mutex_lock(&conn->chan_lock);
   5974	l2cap_chan_lock(pchan);
   5975
   5976	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
   5977				     SMP_ALLOW_STK)) {
   5978		result = L2CAP_CR_LE_AUTHENTICATION;
   5979		goto unlock;
   5980	}
   5981
   5982	result = L2CAP_CR_LE_SUCCESS;
   5983
   5984	for (i = 0; i < num_scid; i++) {
   5985		u16 scid = __le16_to_cpu(req->scid[i]);
   5986
   5987		BT_DBG("scid[%d] 0x%4.4x", i, scid);
   5988
   5989		pdu.dcid[i] = 0x0000;
   5990		len += sizeof(*pdu.dcid);
   5991
   5992		/* Check for valid dynamic CID range */
   5993		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
   5994			result = L2CAP_CR_LE_INVALID_SCID;
   5995			continue;
   5996		}
   5997
   5998		/* Check if we already have channel with that dcid */
   5999		if (__l2cap_get_chan_by_dcid(conn, scid)) {
   6000			result = L2CAP_CR_LE_SCID_IN_USE;
   6001			continue;
   6002		}
   6003
   6004		chan = pchan->ops->new_connection(pchan);
   6005		if (!chan) {
   6006			result = L2CAP_CR_LE_NO_MEM;
   6007			continue;
   6008		}
   6009
   6010		bacpy(&chan->src, &conn->hcon->src);
   6011		bacpy(&chan->dst, &conn->hcon->dst);
   6012		chan->src_type = bdaddr_src_type(conn->hcon);
   6013		chan->dst_type = bdaddr_dst_type(conn->hcon);
   6014		chan->psm  = psm;
   6015		chan->dcid = scid;
   6016		chan->omtu = mtu;
   6017		chan->remote_mps = mps;
   6018
   6019		__l2cap_chan_add(conn, chan);
   6020
   6021		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
   6022
   6023		/* Init response */
   6024		if (!pdu.rsp.credits) {
   6025			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
   6026			pdu.rsp.mps = cpu_to_le16(chan->mps);
   6027			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
   6028		}
   6029
   6030		pdu.dcid[i] = cpu_to_le16(chan->scid);
   6031
   6032		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
   6033
   6034		chan->ident = cmd->ident;
   6035
   6036		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
   6037			l2cap_state_change(chan, BT_CONNECT2);
   6038			defer = true;
   6039			chan->ops->defer(chan);
   6040		} else {
   6041			l2cap_chan_ready(chan);
   6042		}
   6043	}
   6044
   6045unlock:
   6046	l2cap_chan_unlock(pchan);
   6047	mutex_unlock(&conn->chan_lock);
   6048	l2cap_chan_put(pchan);
   6049
   6050response:
   6051	pdu.rsp.result = cpu_to_le16(result);
   6052
   6053	if (defer)
   6054		return 0;
   6055
   6056	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
   6057		       sizeof(pdu.rsp) + len, &pdu);
   6058
   6059	return 0;
   6060}
   6061
   6062static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
   6063				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   6064				       u8 *data)
   6065{
   6066	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
   6067	struct hci_conn *hcon = conn->hcon;
   6068	u16 mtu, mps, credits, result;
   6069	struct l2cap_chan *chan, *tmp;
   6070	int err = 0, sec_level;
   6071	int i = 0;
   6072
   6073	if (cmd_len < sizeof(*rsp))
   6074		return -EPROTO;
   6075
   6076	mtu     = __le16_to_cpu(rsp->mtu);
   6077	mps     = __le16_to_cpu(rsp->mps);
   6078	credits = __le16_to_cpu(rsp->credits);
   6079	result  = __le16_to_cpu(rsp->result);
   6080
   6081	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
   6082	       result);
   6083
   6084	mutex_lock(&conn->chan_lock);
   6085
   6086	cmd_len -= sizeof(*rsp);
   6087
   6088	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
   6089		u16 dcid;
   6090
   6091		if (chan->ident != cmd->ident ||
   6092		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
   6093		    chan->state == BT_CONNECTED)
   6094			continue;
   6095
   6096		l2cap_chan_lock(chan);
   6097
   6098		/* Check that there is a dcid for each pending channel */
   6099		if (cmd_len < sizeof(dcid)) {
   6100			l2cap_chan_del(chan, ECONNREFUSED);
   6101			l2cap_chan_unlock(chan);
   6102			continue;
   6103		}
   6104
   6105		dcid = __le16_to_cpu(rsp->dcid[i++]);
   6106		cmd_len -= sizeof(u16);
   6107
   6108		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
   6109
   6110		/* Check if dcid is already in use */
   6111		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
   6112			/* If a device receives a
   6113			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
   6114			 * already-assigned Destination CID, then both the
   6115			 * original channel and the new channel shall be
   6116			 * immediately discarded and not used.
   6117			 */
   6118			l2cap_chan_del(chan, ECONNREFUSED);
   6119			l2cap_chan_unlock(chan);
   6120			chan = __l2cap_get_chan_by_dcid(conn, dcid);
   6121			l2cap_chan_lock(chan);
   6122			l2cap_chan_del(chan, ECONNRESET);
   6123			l2cap_chan_unlock(chan);
   6124			continue;
   6125		}
   6126
   6127		switch (result) {
   6128		case L2CAP_CR_LE_AUTHENTICATION:
   6129		case L2CAP_CR_LE_ENCRYPTION:
   6130			/* If we already have MITM protection we can't do
   6131			 * anything.
   6132			 */
   6133			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
   6134				l2cap_chan_del(chan, ECONNREFUSED);
   6135				break;
   6136			}
   6137
   6138			sec_level = hcon->sec_level + 1;
   6139			if (chan->sec_level < sec_level)
   6140				chan->sec_level = sec_level;
   6141
   6142			/* We'll need to send a new Connect Request */
   6143			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
   6144
   6145			smp_conn_security(hcon, chan->sec_level);
   6146			break;
   6147
   6148		case L2CAP_CR_LE_BAD_PSM:
   6149			l2cap_chan_del(chan, ECONNREFUSED);
   6150			break;
   6151
   6152		default:
   6153			/* If dcid was not set it means channels was refused */
   6154			if (!dcid) {
   6155				l2cap_chan_del(chan, ECONNREFUSED);
   6156				break;
   6157			}
   6158
   6159			chan->ident = 0;
   6160			chan->dcid = dcid;
   6161			chan->omtu = mtu;
   6162			chan->remote_mps = mps;
   6163			chan->tx_credits = credits;
   6164			l2cap_chan_ready(chan);
   6165			break;
   6166		}
   6167
   6168		l2cap_chan_unlock(chan);
   6169	}
   6170
   6171	mutex_unlock(&conn->chan_lock);
   6172
   6173	return err;
   6174}
   6175
   6176static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
   6177					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   6178					 u8 *data)
   6179{
   6180	struct l2cap_ecred_reconf_req *req = (void *) data;
   6181	struct l2cap_ecred_reconf_rsp rsp;
   6182	u16 mtu, mps, result;
   6183	struct l2cap_chan *chan;
   6184	int i, num_scid;
   6185
   6186	if (!enable_ecred)
   6187		return -EINVAL;
   6188
   6189	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
   6190		result = L2CAP_CR_LE_INVALID_PARAMS;
   6191		goto respond;
   6192	}
   6193
   6194	mtu = __le16_to_cpu(req->mtu);
   6195	mps = __le16_to_cpu(req->mps);
   6196
   6197	BT_DBG("mtu %u mps %u", mtu, mps);
   6198
   6199	if (mtu < L2CAP_ECRED_MIN_MTU) {
   6200		result = L2CAP_RECONF_INVALID_MTU;
   6201		goto respond;
   6202	}
   6203
   6204	if (mps < L2CAP_ECRED_MIN_MPS) {
   6205		result = L2CAP_RECONF_INVALID_MPS;
   6206		goto respond;
   6207	}
   6208
   6209	cmd_len -= sizeof(*req);
   6210	num_scid = cmd_len / sizeof(u16);
   6211	result = L2CAP_RECONF_SUCCESS;
   6212
   6213	for (i = 0; i < num_scid; i++) {
   6214		u16 scid;
   6215
   6216		scid = __le16_to_cpu(req->scid[i]);
   6217		if (!scid)
   6218			return -EPROTO;
   6219
   6220		chan = __l2cap_get_chan_by_dcid(conn, scid);
   6221		if (!chan)
   6222			continue;
   6223
   6224		/* If the MTU value is decreased for any of the included
   6225		 * channels, then the receiver shall disconnect all
   6226		 * included channels.
   6227		 */
   6228		if (chan->omtu > mtu) {
   6229			BT_ERR("chan %p decreased MTU %u -> %u", chan,
   6230			       chan->omtu, mtu);
   6231			result = L2CAP_RECONF_INVALID_MTU;
   6232		}
   6233
   6234		chan->omtu = mtu;
   6235		chan->remote_mps = mps;
   6236	}
   6237
   6238respond:
   6239	rsp.result = cpu_to_le16(result);
   6240
   6241	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
   6242		       &rsp);
   6243
   6244	return 0;
   6245}
   6246
   6247static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
   6248					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   6249					 u8 *data)
   6250{
   6251	struct l2cap_chan *chan, *tmp;
   6252	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
   6253	u16 result;
   6254
   6255	if (cmd_len < sizeof(*rsp))
   6256		return -EPROTO;
   6257
   6258	result = __le16_to_cpu(rsp->result);
   6259
   6260	BT_DBG("result 0x%4.4x", rsp->result);
   6261
   6262	if (!result)
   6263		return 0;
   6264
   6265	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
   6266		if (chan->ident != cmd->ident)
   6267			continue;
   6268
   6269		l2cap_chan_del(chan, ECONNRESET);
   6270	}
   6271
   6272	return 0;
   6273}
   6274
   6275static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
   6276				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   6277				       u8 *data)
   6278{
   6279	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
   6280	struct l2cap_chan *chan;
   6281
   6282	if (cmd_len < sizeof(*rej))
   6283		return -EPROTO;
   6284
   6285	mutex_lock(&conn->chan_lock);
   6286
   6287	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
   6288	if (!chan)
   6289		goto done;
   6290
   6291	l2cap_chan_lock(chan);
   6292	l2cap_chan_del(chan, ECONNREFUSED);
   6293	l2cap_chan_unlock(chan);
   6294
   6295done:
   6296	mutex_unlock(&conn->chan_lock);
   6297	return 0;
   6298}
   6299
   6300static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
   6301				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
   6302				   u8 *data)
   6303{
   6304	int err = 0;
   6305
   6306	switch (cmd->code) {
   6307	case L2CAP_COMMAND_REJ:
   6308		l2cap_le_command_rej(conn, cmd, cmd_len, data);
   6309		break;
   6310
   6311	case L2CAP_CONN_PARAM_UPDATE_REQ:
   6312		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
   6313		break;
   6314
   6315	case L2CAP_CONN_PARAM_UPDATE_RSP:
   6316		break;
   6317
   6318	case L2CAP_LE_CONN_RSP:
   6319		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
   6320		break;
   6321
   6322	case L2CAP_LE_CONN_REQ:
   6323		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
   6324		break;
   6325
   6326	case L2CAP_LE_CREDITS:
   6327		err = l2cap_le_credits(conn, cmd, cmd_len, data);
   6328		break;
   6329
   6330	case L2CAP_ECRED_CONN_REQ:
   6331		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
   6332		break;
   6333
   6334	case L2CAP_ECRED_CONN_RSP:
   6335		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
   6336		break;
   6337
   6338	case L2CAP_ECRED_RECONF_REQ:
   6339		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
   6340		break;
   6341
   6342	case L2CAP_ECRED_RECONF_RSP:
   6343		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
   6344		break;
   6345
   6346	case L2CAP_DISCONN_REQ:
   6347		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
   6348		break;
   6349
   6350	case L2CAP_DISCONN_RSP:
   6351		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
   6352		break;
   6353
   6354	default:
   6355		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
   6356		err = -EINVAL;
   6357		break;
   6358	}
   6359
   6360	return err;
   6361}
   6362
   6363static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
   6364					struct sk_buff *skb)
   6365{
   6366	struct hci_conn *hcon = conn->hcon;
   6367	struct l2cap_cmd_hdr *cmd;
   6368	u16 len;
   6369	int err;
   6370
   6371	if (hcon->type != LE_LINK)
   6372		goto drop;
   6373
   6374	if (skb->len < L2CAP_CMD_HDR_SIZE)
   6375		goto drop;
   6376
   6377	cmd = (void *) skb->data;
   6378	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
   6379
   6380	len = le16_to_cpu(cmd->len);
   6381
   6382	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
   6383
   6384	if (len != skb->len || !cmd->ident) {
   6385		BT_DBG("corrupted command");
   6386		goto drop;
   6387	}
   6388
   6389	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
   6390	if (err) {
   6391		struct l2cap_cmd_rej_unk rej;
   6392
   6393		BT_ERR("Wrong link type (%d)", err);
   6394
   6395		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
   6396		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
   6397			       sizeof(rej), &rej);
   6398	}
   6399
   6400drop:
   6401	kfree_skb(skb);
   6402}
   6403
   6404static inline void l2cap_sig_channel(struct l2cap_conn *conn,
   6405				     struct sk_buff *skb)
   6406{
   6407	struct hci_conn *hcon = conn->hcon;
   6408	struct l2cap_cmd_hdr *cmd;
   6409	int err;
   6410
   6411	l2cap_raw_recv(conn, skb);
   6412
   6413	if (hcon->type != ACL_LINK)
   6414		goto drop;
   6415
   6416	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
   6417		u16 len;
   6418
   6419		cmd = (void *) skb->data;
   6420		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
   6421
   6422		len = le16_to_cpu(cmd->len);
   6423
   6424		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
   6425		       cmd->ident);
   6426
   6427		if (len > skb->len || !cmd->ident) {
   6428			BT_DBG("corrupted command");
   6429			break;
   6430		}
   6431
   6432		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
   6433		if (err) {
   6434			struct l2cap_cmd_rej_unk rej;
   6435
   6436			BT_ERR("Wrong link type (%d)", err);
   6437
   6438			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
   6439			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
   6440				       sizeof(rej), &rej);
   6441		}
   6442
   6443		skb_pull(skb, len);
   6444	}
   6445
   6446drop:
   6447	kfree_skb(skb);
   6448}
   6449
   6450static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
   6451{
   6452	u16 our_fcs, rcv_fcs;
   6453	int hdr_size;
   6454
   6455	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
   6456		hdr_size = L2CAP_EXT_HDR_SIZE;
   6457	else
   6458		hdr_size = L2CAP_ENH_HDR_SIZE;
   6459
   6460	if (chan->fcs == L2CAP_FCS_CRC16) {
   6461		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
   6462		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
   6463		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
   6464
   6465		if (our_fcs != rcv_fcs)
   6466			return -EBADMSG;
   6467	}
   6468	return 0;
   6469}
   6470
   6471static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
   6472{
   6473	struct l2cap_ctrl control;
   6474
   6475	BT_DBG("chan %p", chan);
   6476
   6477	memset(&control, 0, sizeof(control));
   6478	control.sframe = 1;
   6479	control.final = 1;
   6480	control.reqseq = chan->buffer_seq;
   6481	set_bit(CONN_SEND_FBIT, &chan->conn_state);
   6482
   6483	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   6484		control.super = L2CAP_SUPER_RNR;
   6485		l2cap_send_sframe(chan, &control);
   6486	}
   6487
   6488	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
   6489	    chan->unacked_frames > 0)
   6490		__set_retrans_timer(chan);
   6491
   6492	/* Send pending iframes */
   6493	l2cap_ertm_send(chan);
   6494
   6495	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
   6496	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
   6497		/* F-bit wasn't sent in an s-frame or i-frame yet, so
   6498		 * send it now.
   6499		 */
   6500		control.super = L2CAP_SUPER_RR;
   6501		l2cap_send_sframe(chan, &control);
   6502	}
   6503}
   6504
   6505static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
   6506			    struct sk_buff **last_frag)
   6507{
   6508	/* skb->len reflects data in skb as well as all fragments
   6509	 * skb->data_len reflects only data in fragments
   6510	 */
   6511	if (!skb_has_frag_list(skb))
   6512		skb_shinfo(skb)->frag_list = new_frag;
   6513
   6514	new_frag->next = NULL;
   6515
   6516	(*last_frag)->next = new_frag;
   6517	*last_frag = new_frag;
   6518
   6519	skb->len += new_frag->len;
   6520	skb->data_len += new_frag->len;
   6521	skb->truesize += new_frag->truesize;
   6522}
   6523
   6524static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
   6525				struct l2cap_ctrl *control)
   6526{
   6527	int err = -EINVAL;
   6528
   6529	switch (control->sar) {
   6530	case L2CAP_SAR_UNSEGMENTED:
   6531		if (chan->sdu)
   6532			break;
   6533
   6534		err = chan->ops->recv(chan, skb);
   6535		break;
   6536
   6537	case L2CAP_SAR_START:
   6538		if (chan->sdu)
   6539			break;
   6540
   6541		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
   6542			break;
   6543
   6544		chan->sdu_len = get_unaligned_le16(skb->data);
   6545		skb_pull(skb, L2CAP_SDULEN_SIZE);
   6546
   6547		if (chan->sdu_len > chan->imtu) {
   6548			err = -EMSGSIZE;
   6549			break;
   6550		}
   6551
   6552		if (skb->len >= chan->sdu_len)
   6553			break;
   6554
   6555		chan->sdu = skb;
   6556		chan->sdu_last_frag = skb;
   6557
   6558		skb = NULL;
   6559		err = 0;
   6560		break;
   6561
   6562	case L2CAP_SAR_CONTINUE:
   6563		if (!chan->sdu)
   6564			break;
   6565
   6566		append_skb_frag(chan->sdu, skb,
   6567				&chan->sdu_last_frag);
   6568		skb = NULL;
   6569
   6570		if (chan->sdu->len >= chan->sdu_len)
   6571			break;
   6572
   6573		err = 0;
   6574		break;
   6575
   6576	case L2CAP_SAR_END:
   6577		if (!chan->sdu)
   6578			break;
   6579
   6580		append_skb_frag(chan->sdu, skb,
   6581				&chan->sdu_last_frag);
   6582		skb = NULL;
   6583
   6584		if (chan->sdu->len != chan->sdu_len)
   6585			break;
   6586
   6587		err = chan->ops->recv(chan, chan->sdu);
   6588
   6589		if (!err) {
   6590			/* Reassembly complete */
   6591			chan->sdu = NULL;
   6592			chan->sdu_last_frag = NULL;
   6593			chan->sdu_len = 0;
   6594		}
   6595		break;
   6596	}
   6597
   6598	if (err) {
   6599		kfree_skb(skb);
   6600		kfree_skb(chan->sdu);
   6601		chan->sdu = NULL;
   6602		chan->sdu_last_frag = NULL;
   6603		chan->sdu_len = 0;
   6604	}
   6605
   6606	return err;
   6607}
   6608
   6609static int l2cap_resegment(struct l2cap_chan *chan)
   6610{
   6611	/* Placeholder */
   6612	return 0;
   6613}
   6614
   6615void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
   6616{
   6617	u8 event;
   6618
   6619	if (chan->mode != L2CAP_MODE_ERTM)
   6620		return;
   6621
   6622	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
   6623	l2cap_tx(chan, NULL, NULL, event);
   6624}
   6625
   6626static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
   6627{
   6628	int err = 0;
   6629	/* Pass sequential frames to l2cap_reassemble_sdu()
   6630	 * until a gap is encountered.
   6631	 */
   6632
   6633	BT_DBG("chan %p", chan);
   6634
   6635	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   6636		struct sk_buff *skb;
   6637		BT_DBG("Searching for skb with txseq %d (queue len %d)",
   6638		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
   6639
   6640		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
   6641
   6642		if (!skb)
   6643			break;
   6644
   6645		skb_unlink(skb, &chan->srej_q);
   6646		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
   6647		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
   6648		if (err)
   6649			break;
   6650	}
   6651
   6652	if (skb_queue_empty(&chan->srej_q)) {
   6653		chan->rx_state = L2CAP_RX_STATE_RECV;
   6654		l2cap_send_ack(chan);
   6655	}
   6656
   6657	return err;
   6658}
   6659
   6660static void l2cap_handle_srej(struct l2cap_chan *chan,
   6661			      struct l2cap_ctrl *control)
   6662{
   6663	struct sk_buff *skb;
   6664
   6665	BT_DBG("chan %p, control %p", chan, control);
   6666
   6667	if (control->reqseq == chan->next_tx_seq) {
   6668		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
   6669		l2cap_send_disconn_req(chan, ECONNRESET);
   6670		return;
   6671	}
   6672
   6673	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
   6674
   6675	if (skb == NULL) {
   6676		BT_DBG("Seq %d not available for retransmission",
   6677		       control->reqseq);
   6678		return;
   6679	}
   6680
   6681	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
   6682		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
   6683		l2cap_send_disconn_req(chan, ECONNRESET);
   6684		return;
   6685	}
   6686
   6687	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   6688
   6689	if (control->poll) {
   6690		l2cap_pass_to_tx(chan, control);
   6691
   6692		set_bit(CONN_SEND_FBIT, &chan->conn_state);
   6693		l2cap_retransmit(chan, control);
   6694		l2cap_ertm_send(chan);
   6695
   6696		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
   6697			set_bit(CONN_SREJ_ACT, &chan->conn_state);
   6698			chan->srej_save_reqseq = control->reqseq;
   6699		}
   6700	} else {
   6701		l2cap_pass_to_tx_fbit(chan, control);
   6702
   6703		if (control->final) {
   6704			if (chan->srej_save_reqseq != control->reqseq ||
   6705			    !test_and_clear_bit(CONN_SREJ_ACT,
   6706						&chan->conn_state))
   6707				l2cap_retransmit(chan, control);
   6708		} else {
   6709			l2cap_retransmit(chan, control);
   6710			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
   6711				set_bit(CONN_SREJ_ACT, &chan->conn_state);
   6712				chan->srej_save_reqseq = control->reqseq;
   6713			}
   6714		}
   6715	}
   6716}
   6717
   6718static void l2cap_handle_rej(struct l2cap_chan *chan,
   6719			     struct l2cap_ctrl *control)
   6720{
   6721	struct sk_buff *skb;
   6722
   6723	BT_DBG("chan %p, control %p", chan, control);
   6724
   6725	if (control->reqseq == chan->next_tx_seq) {
   6726		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
   6727		l2cap_send_disconn_req(chan, ECONNRESET);
   6728		return;
   6729	}
   6730
   6731	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
   6732
   6733	if (chan->max_tx && skb &&
   6734	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
   6735		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
   6736		l2cap_send_disconn_req(chan, ECONNRESET);
   6737		return;
   6738	}
   6739
   6740	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   6741
   6742	l2cap_pass_to_tx(chan, control);
   6743
   6744	if (control->final) {
   6745		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
   6746			l2cap_retransmit_all(chan, control);
   6747	} else {
   6748		l2cap_retransmit_all(chan, control);
   6749		l2cap_ertm_send(chan);
   6750		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
   6751			set_bit(CONN_REJ_ACT, &chan->conn_state);
   6752	}
   6753}
   6754
   6755static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
   6756{
   6757	BT_DBG("chan %p, txseq %d", chan, txseq);
   6758
   6759	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
   6760	       chan->expected_tx_seq);
   6761
   6762	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
   6763		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
   6764		    chan->tx_win) {
   6765			/* See notes below regarding "double poll" and
   6766			 * invalid packets.
   6767			 */
   6768			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
   6769				BT_DBG("Invalid/Ignore - after SREJ");
   6770				return L2CAP_TXSEQ_INVALID_IGNORE;
   6771			} else {
   6772				BT_DBG("Invalid - in window after SREJ sent");
   6773				return L2CAP_TXSEQ_INVALID;
   6774			}
   6775		}
   6776
   6777		if (chan->srej_list.head == txseq) {
   6778			BT_DBG("Expected SREJ");
   6779			return L2CAP_TXSEQ_EXPECTED_SREJ;
   6780		}
   6781
   6782		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
   6783			BT_DBG("Duplicate SREJ - txseq already stored");
   6784			return L2CAP_TXSEQ_DUPLICATE_SREJ;
   6785		}
   6786
   6787		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
   6788			BT_DBG("Unexpected SREJ - not requested");
   6789			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
   6790		}
   6791	}
   6792
   6793	if (chan->expected_tx_seq == txseq) {
   6794		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
   6795		    chan->tx_win) {
   6796			BT_DBG("Invalid - txseq outside tx window");
   6797			return L2CAP_TXSEQ_INVALID;
   6798		} else {
   6799			BT_DBG("Expected");
   6800			return L2CAP_TXSEQ_EXPECTED;
   6801		}
   6802	}
   6803
   6804	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
   6805	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
   6806		BT_DBG("Duplicate - expected_tx_seq later than txseq");
   6807		return L2CAP_TXSEQ_DUPLICATE;
   6808	}
   6809
   6810	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
   6811		/* A source of invalid packets is a "double poll" condition,
   6812		 * where delays cause us to send multiple poll packets.  If
   6813		 * the remote stack receives and processes both polls,
   6814		 * sequence numbers can wrap around in such a way that a
   6815		 * resent frame has a sequence number that looks like new data
   6816		 * with a sequence gap.  This would trigger an erroneous SREJ
   6817		 * request.
   6818		 *
   6819		 * Fortunately, this is impossible with a tx window that's
   6820		 * less than half of the maximum sequence number, which allows
   6821		 * invalid frames to be safely ignored.
   6822		 *
   6823		 * With tx window sizes greater than half of the tx window
   6824		 * maximum, the frame is invalid and cannot be ignored.  This
   6825		 * causes a disconnect.
   6826		 */
   6827
   6828		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
   6829			BT_DBG("Invalid/Ignore - txseq outside tx window");
   6830			return L2CAP_TXSEQ_INVALID_IGNORE;
   6831		} else {
   6832			BT_DBG("Invalid - txseq outside tx window");
   6833			return L2CAP_TXSEQ_INVALID;
   6834		}
   6835	} else {
   6836		BT_DBG("Unexpected - txseq indicates missing frames");
   6837		return L2CAP_TXSEQ_UNEXPECTED;
   6838	}
   6839}
   6840
   6841static int l2cap_rx_state_recv(struct l2cap_chan *chan,
   6842			       struct l2cap_ctrl *control,
   6843			       struct sk_buff *skb, u8 event)
   6844{
   6845	int err = 0;
   6846	bool skb_in_use = false;
   6847
   6848	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
   6849	       event);
   6850
   6851	switch (event) {
   6852	case L2CAP_EV_RECV_IFRAME:
   6853		switch (l2cap_classify_txseq(chan, control->txseq)) {
   6854		case L2CAP_TXSEQ_EXPECTED:
   6855			l2cap_pass_to_tx(chan, control);
   6856
   6857			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   6858				BT_DBG("Busy, discarding expected seq %d",
   6859				       control->txseq);
   6860				break;
   6861			}
   6862
   6863			chan->expected_tx_seq = __next_seq(chan,
   6864							   control->txseq);
   6865
   6866			chan->buffer_seq = chan->expected_tx_seq;
   6867			skb_in_use = true;
   6868
   6869			err = l2cap_reassemble_sdu(chan, skb, control);
   6870			if (err)
   6871				break;
   6872
   6873			if (control->final) {
   6874				if (!test_and_clear_bit(CONN_REJ_ACT,
   6875							&chan->conn_state)) {
   6876					control->final = 0;
   6877					l2cap_retransmit_all(chan, control);
   6878					l2cap_ertm_send(chan);
   6879				}
   6880			}
   6881
   6882			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
   6883				l2cap_send_ack(chan);
   6884			break;
   6885		case L2CAP_TXSEQ_UNEXPECTED:
   6886			l2cap_pass_to_tx(chan, control);
   6887
   6888			/* Can't issue SREJ frames in the local busy state.
   6889			 * Drop this frame, it will be seen as missing
   6890			 * when local busy is exited.
   6891			 */
   6892			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
   6893				BT_DBG("Busy, discarding unexpected seq %d",
   6894				       control->txseq);
   6895				break;
   6896			}
   6897
   6898			/* There was a gap in the sequence, so an SREJ
   6899			 * must be sent for each missing frame.  The
   6900			 * current frame is stored for later use.
   6901			 */
   6902			skb_queue_tail(&chan->srej_q, skb);
   6903			skb_in_use = true;
   6904			BT_DBG("Queued %p (queue len %d)", skb,
   6905			       skb_queue_len(&chan->srej_q));
   6906
   6907			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
   6908			l2cap_seq_list_clear(&chan->srej_list);
   6909			l2cap_send_srej(chan, control->txseq);
   6910
   6911			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
   6912			break;
   6913		case L2CAP_TXSEQ_DUPLICATE:
   6914			l2cap_pass_to_tx(chan, control);
   6915			break;
   6916		case L2CAP_TXSEQ_INVALID_IGNORE:
   6917			break;
   6918		case L2CAP_TXSEQ_INVALID:
   6919		default:
   6920			l2cap_send_disconn_req(chan, ECONNRESET);
   6921			break;
   6922		}
   6923		break;
   6924	case L2CAP_EV_RECV_RR:
   6925		l2cap_pass_to_tx(chan, control);
   6926		if (control->final) {
   6927			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   6928
   6929			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
   6930			    !__chan_is_moving(chan)) {
   6931				control->final = 0;
   6932				l2cap_retransmit_all(chan, control);
   6933			}
   6934
   6935			l2cap_ertm_send(chan);
   6936		} else if (control->poll) {
   6937			l2cap_send_i_or_rr_or_rnr(chan);
   6938		} else {
   6939			if (test_and_clear_bit(CONN_REMOTE_BUSY,
   6940					       &chan->conn_state) &&
   6941			    chan->unacked_frames)
   6942				__set_retrans_timer(chan);
   6943
   6944			l2cap_ertm_send(chan);
   6945		}
   6946		break;
   6947	case L2CAP_EV_RECV_RNR:
   6948		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   6949		l2cap_pass_to_tx(chan, control);
   6950		if (control && control->poll) {
   6951			set_bit(CONN_SEND_FBIT, &chan->conn_state);
   6952			l2cap_send_rr_or_rnr(chan, 0);
   6953		}
   6954		__clear_retrans_timer(chan);
   6955		l2cap_seq_list_clear(&chan->retrans_list);
   6956		break;
   6957	case L2CAP_EV_RECV_REJ:
   6958		l2cap_handle_rej(chan, control);
   6959		break;
   6960	case L2CAP_EV_RECV_SREJ:
   6961		l2cap_handle_srej(chan, control);
   6962		break;
   6963	default:
   6964		break;
   6965	}
   6966
   6967	if (skb && !skb_in_use) {
   6968		BT_DBG("Freeing %p", skb);
   6969		kfree_skb(skb);
   6970	}
   6971
   6972	return err;
   6973}
   6974
   6975static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
   6976				    struct l2cap_ctrl *control,
   6977				    struct sk_buff *skb, u8 event)
   6978{
   6979	int err = 0;
   6980	u16 txseq = control->txseq;
   6981	bool skb_in_use = false;
   6982
   6983	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
   6984	       event);
   6985
   6986	switch (event) {
   6987	case L2CAP_EV_RECV_IFRAME:
   6988		switch (l2cap_classify_txseq(chan, txseq)) {
   6989		case L2CAP_TXSEQ_EXPECTED:
   6990			/* Keep frame for reassembly later */
   6991			l2cap_pass_to_tx(chan, control);
   6992			skb_queue_tail(&chan->srej_q, skb);
   6993			skb_in_use = true;
   6994			BT_DBG("Queued %p (queue len %d)", skb,
   6995			       skb_queue_len(&chan->srej_q));
   6996
   6997			chan->expected_tx_seq = __next_seq(chan, txseq);
   6998			break;
   6999		case L2CAP_TXSEQ_EXPECTED_SREJ:
   7000			l2cap_seq_list_pop(&chan->srej_list);
   7001
   7002			l2cap_pass_to_tx(chan, control);
   7003			skb_queue_tail(&chan->srej_q, skb);
   7004			skb_in_use = true;
   7005			BT_DBG("Queued %p (queue len %d)", skb,
   7006			       skb_queue_len(&chan->srej_q));
   7007
   7008			err = l2cap_rx_queued_iframes(chan);
   7009			if (err)
   7010				break;
   7011
   7012			break;
   7013		case L2CAP_TXSEQ_UNEXPECTED:
   7014			/* Got a frame that can't be reassembled yet.
   7015			 * Save it for later, and send SREJs to cover
   7016			 * the missing frames.
   7017			 */
   7018			skb_queue_tail(&chan->srej_q, skb);
   7019			skb_in_use = true;
   7020			BT_DBG("Queued %p (queue len %d)", skb,
   7021			       skb_queue_len(&chan->srej_q));
   7022
   7023			l2cap_pass_to_tx(chan, control);
   7024			l2cap_send_srej(chan, control->txseq);
   7025			break;
   7026		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
   7027			/* This frame was requested with an SREJ, but
   7028			 * some expected retransmitted frames are
   7029			 * missing.  Request retransmission of missing
   7030			 * SREJ'd frames.
   7031			 */
   7032			skb_queue_tail(&chan->srej_q, skb);
   7033			skb_in_use = true;
   7034			BT_DBG("Queued %p (queue len %d)", skb,
   7035			       skb_queue_len(&chan->srej_q));
   7036
   7037			l2cap_pass_to_tx(chan, control);
   7038			l2cap_send_srej_list(chan, control->txseq);
   7039			break;
   7040		case L2CAP_TXSEQ_DUPLICATE_SREJ:
   7041			/* We've already queued this frame.  Drop this copy. */
   7042			l2cap_pass_to_tx(chan, control);
   7043			break;
   7044		case L2CAP_TXSEQ_DUPLICATE:
   7045			/* Expecting a later sequence number, so this frame
   7046			 * was already received.  Ignore it completely.
   7047			 */
   7048			break;
   7049		case L2CAP_TXSEQ_INVALID_IGNORE:
   7050			break;
   7051		case L2CAP_TXSEQ_INVALID:
   7052		default:
   7053			l2cap_send_disconn_req(chan, ECONNRESET);
   7054			break;
   7055		}
   7056		break;
   7057	case L2CAP_EV_RECV_RR:
   7058		l2cap_pass_to_tx(chan, control);
   7059		if (control->final) {
   7060			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   7061
   7062			if (!test_and_clear_bit(CONN_REJ_ACT,
   7063						&chan->conn_state)) {
   7064				control->final = 0;
   7065				l2cap_retransmit_all(chan, control);
   7066			}
   7067
   7068			l2cap_ertm_send(chan);
   7069		} else if (control->poll) {
   7070			if (test_and_clear_bit(CONN_REMOTE_BUSY,
   7071					       &chan->conn_state) &&
   7072			    chan->unacked_frames) {
   7073				__set_retrans_timer(chan);
   7074			}
   7075
   7076			set_bit(CONN_SEND_FBIT, &chan->conn_state);
   7077			l2cap_send_srej_tail(chan);
   7078		} else {
   7079			if (test_and_clear_bit(CONN_REMOTE_BUSY,
   7080					       &chan->conn_state) &&
   7081			    chan->unacked_frames)
   7082				__set_retrans_timer(chan);
   7083
   7084			l2cap_send_ack(chan);
   7085		}
   7086		break;
   7087	case L2CAP_EV_RECV_RNR:
   7088		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   7089		l2cap_pass_to_tx(chan, control);
   7090		if (control->poll) {
   7091			l2cap_send_srej_tail(chan);
   7092		} else {
   7093			struct l2cap_ctrl rr_control;
   7094			memset(&rr_control, 0, sizeof(rr_control));
   7095			rr_control.sframe = 1;
   7096			rr_control.super = L2CAP_SUPER_RR;
   7097			rr_control.reqseq = chan->buffer_seq;
   7098			l2cap_send_sframe(chan, &rr_control);
   7099		}
   7100
   7101		break;
   7102	case L2CAP_EV_RECV_REJ:
   7103		l2cap_handle_rej(chan, control);
   7104		break;
   7105	case L2CAP_EV_RECV_SREJ:
   7106		l2cap_handle_srej(chan, control);
   7107		break;
   7108	}
   7109
   7110	if (skb && !skb_in_use) {
   7111		BT_DBG("Freeing %p", skb);
   7112		kfree_skb(skb);
   7113	}
   7114
   7115	return err;
   7116}
   7117
   7118static int l2cap_finish_move(struct l2cap_chan *chan)
   7119{
   7120	BT_DBG("chan %p", chan);
   7121
   7122	chan->rx_state = L2CAP_RX_STATE_RECV;
   7123
   7124	if (chan->hs_hcon)
   7125		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
   7126	else
   7127		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
   7128
   7129	return l2cap_resegment(chan);
   7130}
   7131
   7132static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
   7133				 struct l2cap_ctrl *control,
   7134				 struct sk_buff *skb, u8 event)
   7135{
   7136	int err;
   7137
   7138	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
   7139	       event);
   7140
   7141	if (!control->poll)
   7142		return -EPROTO;
   7143
   7144	l2cap_process_reqseq(chan, control->reqseq);
   7145
   7146	if (!skb_queue_empty(&chan->tx_q))
   7147		chan->tx_send_head = skb_peek(&chan->tx_q);
   7148	else
   7149		chan->tx_send_head = NULL;
   7150
   7151	/* Rewind next_tx_seq to the point expected
   7152	 * by the receiver.
   7153	 */
   7154	chan->next_tx_seq = control->reqseq;
   7155	chan->unacked_frames = 0;
   7156
   7157	err = l2cap_finish_move(chan);
   7158	if (err)
   7159		return err;
   7160
   7161	set_bit(CONN_SEND_FBIT, &chan->conn_state);
   7162	l2cap_send_i_or_rr_or_rnr(chan);
   7163
   7164	if (event == L2CAP_EV_RECV_IFRAME)
   7165		return -EPROTO;
   7166
   7167	return l2cap_rx_state_recv(chan, control, NULL, event);
   7168}
   7169
   7170static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
   7171				 struct l2cap_ctrl *control,
   7172				 struct sk_buff *skb, u8 event)
   7173{
   7174	int err;
   7175
   7176	if (!control->final)
   7177		return -EPROTO;
   7178
   7179	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
   7180
   7181	chan->rx_state = L2CAP_RX_STATE_RECV;
   7182	l2cap_process_reqseq(chan, control->reqseq);
   7183
   7184	if (!skb_queue_empty(&chan->tx_q))
   7185		chan->tx_send_head = skb_peek(&chan->tx_q);
   7186	else
   7187		chan->tx_send_head = NULL;
   7188
   7189	/* Rewind next_tx_seq to the point expected
   7190	 * by the receiver.
   7191	 */
   7192	chan->next_tx_seq = control->reqseq;
   7193	chan->unacked_frames = 0;
   7194
   7195	if (chan->hs_hcon)
   7196		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
   7197	else
   7198		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
   7199
   7200	err = l2cap_resegment(chan);
   7201
   7202	if (!err)
   7203		err = l2cap_rx_state_recv(chan, control, skb, event);
   7204
   7205	return err;
   7206}
   7207
   7208static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
   7209{
   7210	/* Make sure reqseq is for a packet that has been sent but not acked */
   7211	u16 unacked;
   7212
   7213	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
   7214	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
   7215}
   7216
   7217static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
   7218		    struct sk_buff *skb, u8 event)
   7219{
   7220	int err = 0;
   7221
   7222	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
   7223	       control, skb, event, chan->rx_state);
   7224
   7225	if (__valid_reqseq(chan, control->reqseq)) {
   7226		switch (chan->rx_state) {
   7227		case L2CAP_RX_STATE_RECV:
   7228			err = l2cap_rx_state_recv(chan, control, skb, event);
   7229			break;
   7230		case L2CAP_RX_STATE_SREJ_SENT:
   7231			err = l2cap_rx_state_srej_sent(chan, control, skb,
   7232						       event);
   7233			break;
   7234		case L2CAP_RX_STATE_WAIT_P:
   7235			err = l2cap_rx_state_wait_p(chan, control, skb, event);
   7236			break;
   7237		case L2CAP_RX_STATE_WAIT_F:
   7238			err = l2cap_rx_state_wait_f(chan, control, skb, event);
   7239			break;
   7240		default:
   7241			/* shut it down */
   7242			break;
   7243		}
   7244	} else {
   7245		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
   7246		       control->reqseq, chan->next_tx_seq,
   7247		       chan->expected_ack_seq);
   7248		l2cap_send_disconn_req(chan, ECONNRESET);
   7249	}
   7250
   7251	return err;
   7252}
   7253
   7254static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
   7255			   struct sk_buff *skb)
   7256{
   7257	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
   7258	       chan->rx_state);
   7259
   7260	if (l2cap_classify_txseq(chan, control->txseq) ==
   7261	    L2CAP_TXSEQ_EXPECTED) {
   7262		l2cap_pass_to_tx(chan, control);
   7263
   7264		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
   7265		       __next_seq(chan, chan->buffer_seq));
   7266
   7267		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
   7268
   7269		l2cap_reassemble_sdu(chan, skb, control);
   7270	} else {
   7271		if (chan->sdu) {
   7272			kfree_skb(chan->sdu);
   7273			chan->sdu = NULL;
   7274		}
   7275		chan->sdu_last_frag = NULL;
   7276		chan->sdu_len = 0;
   7277
   7278		if (skb) {
   7279			BT_DBG("Freeing %p", skb);
   7280			kfree_skb(skb);
   7281		}
   7282	}
   7283
   7284	chan->last_acked_seq = control->txseq;
   7285	chan->expected_tx_seq = __next_seq(chan, control->txseq);
   7286
   7287	return 0;
   7288}
   7289
   7290static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
   7291{
   7292	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
   7293	u16 len;
   7294	u8 event;
   7295
   7296	__unpack_control(chan, skb);
   7297
   7298	len = skb->len;
   7299
   7300	/*
   7301	 * We can just drop the corrupted I-frame here.
   7302	 * Receiver will miss it and start proper recovery
   7303	 * procedures and ask for retransmission.
   7304	 */
   7305	if (l2cap_check_fcs(chan, skb))
   7306		goto drop;
   7307
   7308	if (!control->sframe && control->sar == L2CAP_SAR_START)
   7309		len -= L2CAP_SDULEN_SIZE;
   7310
   7311	if (chan->fcs == L2CAP_FCS_CRC16)
   7312		len -= L2CAP_FCS_SIZE;
   7313
   7314	if (len > chan->mps) {
   7315		l2cap_send_disconn_req(chan, ECONNRESET);
   7316		goto drop;
   7317	}
   7318
   7319	if (chan->ops->filter) {
   7320		if (chan->ops->filter(chan, skb))
   7321			goto drop;
   7322	}
   7323
   7324	if (!control->sframe) {
   7325		int err;
   7326
   7327		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
   7328		       control->sar, control->reqseq, control->final,
   7329		       control->txseq);
   7330
   7331		/* Validate F-bit - F=0 always valid, F=1 only
   7332		 * valid in TX WAIT_F
   7333		 */
   7334		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
   7335			goto drop;
   7336
   7337		if (chan->mode != L2CAP_MODE_STREAMING) {
   7338			event = L2CAP_EV_RECV_IFRAME;
   7339			err = l2cap_rx(chan, control, skb, event);
   7340		} else {
   7341			err = l2cap_stream_rx(chan, control, skb);
   7342		}
   7343
   7344		if (err)
   7345			l2cap_send_disconn_req(chan, ECONNRESET);
   7346	} else {
   7347		const u8 rx_func_to_event[4] = {
   7348			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
   7349			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
   7350		};
   7351
   7352		/* Only I-frames are expected in streaming mode */
   7353		if (chan->mode == L2CAP_MODE_STREAMING)
   7354			goto drop;
   7355
   7356		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
   7357		       control->reqseq, control->final, control->poll,
   7358		       control->super);
   7359
   7360		if (len != 0) {
   7361			BT_ERR("Trailing bytes: %d in sframe", len);
   7362			l2cap_send_disconn_req(chan, ECONNRESET);
   7363			goto drop;
   7364		}
   7365
   7366		/* Validate F and P bits */
   7367		if (control->final && (control->poll ||
   7368				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
   7369			goto drop;
   7370
   7371		event = rx_func_to_event[control->super];
   7372		if (l2cap_rx(chan, control, skb, event))
   7373			l2cap_send_disconn_req(chan, ECONNRESET);
   7374	}
   7375
   7376	return 0;
   7377
   7378drop:
   7379	kfree_skb(skb);
   7380	return 0;
   7381}
   7382
   7383static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
   7384{
   7385	struct l2cap_conn *conn = chan->conn;
   7386	struct l2cap_le_credits pkt;
   7387	u16 return_credits;
   7388
   7389	return_credits = (chan->imtu / chan->mps) + 1;
   7390
   7391	if (chan->rx_credits >= return_credits)
   7392		return;
   7393
   7394	return_credits -= chan->rx_credits;
   7395
   7396	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
   7397
   7398	chan->rx_credits += return_credits;
   7399
   7400	pkt.cid     = cpu_to_le16(chan->scid);
   7401	pkt.credits = cpu_to_le16(return_credits);
   7402
   7403	chan->ident = l2cap_get_ident(conn);
   7404
   7405	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
   7406}
   7407
   7408static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
   7409{
   7410	int err;
   7411
   7412	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
   7413
   7414	/* Wait recv to confirm reception before updating the credits */
   7415	err = chan->ops->recv(chan, skb);
   7416
   7417	/* Update credits whenever an SDU is received */
   7418	l2cap_chan_le_send_credits(chan);
   7419
   7420	return err;
   7421}
   7422
   7423static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
   7424{
   7425	int err;
   7426
   7427	if (!chan->rx_credits) {
   7428		BT_ERR("No credits to receive LE L2CAP data");
   7429		l2cap_send_disconn_req(chan, ECONNRESET);
   7430		return -ENOBUFS;
   7431	}
   7432
   7433	if (chan->imtu < skb->len) {
   7434		BT_ERR("Too big LE L2CAP PDU");
   7435		return -ENOBUFS;
   7436	}
   7437
   7438	chan->rx_credits--;
   7439	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
   7440
   7441	/* Update if remote had run out of credits, this should only happens
   7442	 * if the remote is not using the entire MPS.
   7443	 */
   7444	if (!chan->rx_credits)
   7445		l2cap_chan_le_send_credits(chan);
   7446
   7447	err = 0;
   7448
   7449	if (!chan->sdu) {
   7450		u16 sdu_len;
   7451
   7452		sdu_len = get_unaligned_le16(skb->data);
   7453		skb_pull(skb, L2CAP_SDULEN_SIZE);
   7454
   7455		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
   7456		       sdu_len, skb->len, chan->imtu);
   7457
   7458		if (sdu_len > chan->imtu) {
   7459			BT_ERR("Too big LE L2CAP SDU length received");
   7460			err = -EMSGSIZE;
   7461			goto failed;
   7462		}
   7463
   7464		if (skb->len > sdu_len) {
   7465			BT_ERR("Too much LE L2CAP data received");
   7466			err = -EINVAL;
   7467			goto failed;
   7468		}
   7469
   7470		if (skb->len == sdu_len)
   7471			return l2cap_ecred_recv(chan, skb);
   7472
   7473		chan->sdu = skb;
   7474		chan->sdu_len = sdu_len;
   7475		chan->sdu_last_frag = skb;
   7476
   7477		/* Detect if remote is not able to use the selected MPS */
   7478		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
   7479			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
   7480
   7481			/* Adjust the number of credits */
   7482			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
   7483			chan->mps = mps_len;
   7484			l2cap_chan_le_send_credits(chan);
   7485		}
   7486
   7487		return 0;
   7488	}
   7489
   7490	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
   7491	       chan->sdu->len, skb->len, chan->sdu_len);
   7492
   7493	if (chan->sdu->len + skb->len > chan->sdu_len) {
   7494		BT_ERR("Too much LE L2CAP data received");
   7495		err = -EINVAL;
   7496		goto failed;
   7497	}
   7498
   7499	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
   7500	skb = NULL;
   7501
   7502	if (chan->sdu->len == chan->sdu_len) {
   7503		err = l2cap_ecred_recv(chan, chan->sdu);
   7504		if (!err) {
   7505			chan->sdu = NULL;
   7506			chan->sdu_last_frag = NULL;
   7507			chan->sdu_len = 0;
   7508		}
   7509	}
   7510
   7511failed:
   7512	if (err) {
   7513		kfree_skb(skb);
   7514		kfree_skb(chan->sdu);
   7515		chan->sdu = NULL;
   7516		chan->sdu_last_frag = NULL;
   7517		chan->sdu_len = 0;
   7518	}
   7519
   7520	/* We can't return an error here since we took care of the skb
   7521	 * freeing internally. An error return would cause the caller to
   7522	 * do a double-free of the skb.
   7523	 */
   7524	return 0;
   7525}
   7526
   7527static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
   7528			       struct sk_buff *skb)
   7529{
   7530	struct l2cap_chan *chan;
   7531
   7532	chan = l2cap_get_chan_by_scid(conn, cid);
   7533	if (!chan) {
   7534		if (cid == L2CAP_CID_A2MP) {
   7535			chan = a2mp_channel_create(conn, skb);
   7536			if (!chan) {
   7537				kfree_skb(skb);
   7538				return;
   7539			}
   7540
   7541			l2cap_chan_lock(chan);
   7542		} else {
   7543			BT_DBG("unknown cid 0x%4.4x", cid);
   7544			/* Drop packet and return */
   7545			kfree_skb(skb);
   7546			return;
   7547		}
   7548	}
   7549
   7550	BT_DBG("chan %p, len %d", chan, skb->len);
   7551
   7552	/* If we receive data on a fixed channel before the info req/rsp
   7553	 * procedure is done simply assume that the channel is supported
   7554	 * and mark it as ready.
   7555	 */
   7556	if (chan->chan_type == L2CAP_CHAN_FIXED)
   7557		l2cap_chan_ready(chan);
   7558
   7559	if (chan->state != BT_CONNECTED)
   7560		goto drop;
   7561
   7562	switch (chan->mode) {
   7563	case L2CAP_MODE_LE_FLOWCTL:
   7564	case L2CAP_MODE_EXT_FLOWCTL:
   7565		if (l2cap_ecred_data_rcv(chan, skb) < 0)
   7566			goto drop;
   7567
   7568		goto done;
   7569
   7570	case L2CAP_MODE_BASIC:
   7571		/* If socket recv buffers overflows we drop data here
   7572		 * which is *bad* because L2CAP has to be reliable.
   7573		 * But we don't have any other choice. L2CAP doesn't
   7574		 * provide flow control mechanism. */
   7575
   7576		if (chan->imtu < skb->len) {
   7577			BT_ERR("Dropping L2CAP data: receive buffer overflow");
   7578			goto drop;
   7579		}
   7580
   7581		if (!chan->ops->recv(chan, skb))
   7582			goto done;
   7583		break;
   7584
   7585	case L2CAP_MODE_ERTM:
   7586	case L2CAP_MODE_STREAMING:
   7587		l2cap_data_rcv(chan, skb);
   7588		goto done;
   7589
   7590	default:
   7591		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
   7592		break;
   7593	}
   7594
   7595drop:
   7596	kfree_skb(skb);
   7597
   7598done:
   7599	l2cap_chan_unlock(chan);
   7600}
   7601
   7602static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
   7603				  struct sk_buff *skb)
   7604{
   7605	struct hci_conn *hcon = conn->hcon;
   7606	struct l2cap_chan *chan;
   7607
   7608	if (hcon->type != ACL_LINK)
   7609		goto free_skb;
   7610
   7611	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
   7612					ACL_LINK);
   7613	if (!chan)
   7614		goto free_skb;
   7615
   7616	BT_DBG("chan %p, len %d", chan, skb->len);
   7617
   7618	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
   7619		goto drop;
   7620
   7621	if (chan->imtu < skb->len)
   7622		goto drop;
   7623
   7624	/* Store remote BD_ADDR and PSM for msg_name */
   7625	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
   7626	bt_cb(skb)->l2cap.psm = psm;
   7627
   7628	if (!chan->ops->recv(chan, skb)) {
   7629		l2cap_chan_put(chan);
   7630		return;
   7631	}
   7632
   7633drop:
   7634	l2cap_chan_put(chan);
   7635free_skb:
   7636	kfree_skb(skb);
   7637}
   7638
   7639static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
   7640{
   7641	struct l2cap_hdr *lh = (void *) skb->data;
   7642	struct hci_conn *hcon = conn->hcon;
   7643	u16 cid, len;
   7644	__le16 psm;
   7645
   7646	if (hcon->state != BT_CONNECTED) {
   7647		BT_DBG("queueing pending rx skb");
   7648		skb_queue_tail(&conn->pending_rx, skb);
   7649		return;
   7650	}
   7651
   7652	skb_pull(skb, L2CAP_HDR_SIZE);
   7653	cid = __le16_to_cpu(lh->cid);
   7654	len = __le16_to_cpu(lh->len);
   7655
   7656	if (len != skb->len) {
   7657		kfree_skb(skb);
   7658		return;
   7659	}
   7660
   7661	/* Since we can't actively block incoming LE connections we must
   7662	 * at least ensure that we ignore incoming data from them.
   7663	 */
   7664	if (hcon->type == LE_LINK &&
   7665	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
   7666				   bdaddr_dst_type(hcon))) {
   7667		kfree_skb(skb);
   7668		return;
   7669	}
   7670
   7671	BT_DBG("len %d, cid 0x%4.4x", len, cid);
   7672
   7673	switch (cid) {
   7674	case L2CAP_CID_SIGNALING:
   7675		l2cap_sig_channel(conn, skb);
   7676		break;
   7677
   7678	case L2CAP_CID_CONN_LESS:
   7679		psm = get_unaligned((__le16 *) skb->data);
   7680		skb_pull(skb, L2CAP_PSMLEN_SIZE);
   7681		l2cap_conless_channel(conn, psm, skb);
   7682		break;
   7683
   7684	case L2CAP_CID_LE_SIGNALING:
   7685		l2cap_le_sig_channel(conn, skb);
   7686		break;
   7687
   7688	default:
   7689		l2cap_data_channel(conn, cid, skb);
   7690		break;
   7691	}
   7692}
   7693
   7694static void process_pending_rx(struct work_struct *work)
   7695{
   7696	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
   7697					       pending_rx_work);
   7698	struct sk_buff *skb;
   7699
   7700	BT_DBG("");
   7701
   7702	while ((skb = skb_dequeue(&conn->pending_rx)))
   7703		l2cap_recv_frame(conn, skb);
   7704}
   7705
   7706static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
   7707{
   7708	struct l2cap_conn *conn = hcon->l2cap_data;
   7709	struct hci_chan *hchan;
   7710
   7711	if (conn)
   7712		return conn;
   7713
   7714	hchan = hci_chan_create(hcon);
   7715	if (!hchan)
   7716		return NULL;
   7717
   7718	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
   7719	if (!conn) {
   7720		hci_chan_del(hchan);
   7721		return NULL;
   7722	}
   7723
   7724	kref_init(&conn->ref);
   7725	hcon->l2cap_data = conn;
   7726	conn->hcon = hci_conn_get(hcon);
   7727	conn->hchan = hchan;
   7728
   7729	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
   7730
   7731	switch (hcon->type) {
   7732	case LE_LINK:
   7733		if (hcon->hdev->le_mtu) {
   7734			conn->mtu = hcon->hdev->le_mtu;
   7735			break;
   7736		}
   7737		fallthrough;
   7738	default:
   7739		conn->mtu = hcon->hdev->acl_mtu;
   7740		break;
   7741	}
   7742
   7743	conn->feat_mask = 0;
   7744
   7745	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
   7746
   7747	if (hcon->type == ACL_LINK &&
   7748	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
   7749		conn->local_fixed_chan |= L2CAP_FC_A2MP;
   7750
   7751	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
   7752	    (bredr_sc_enabled(hcon->hdev) ||
   7753	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
   7754		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
   7755
   7756	mutex_init(&conn->ident_lock);
   7757	mutex_init(&conn->chan_lock);
   7758
   7759	INIT_LIST_HEAD(&conn->chan_l);
   7760	INIT_LIST_HEAD(&conn->users);
   7761
   7762	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
   7763
   7764	skb_queue_head_init(&conn->pending_rx);
   7765	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
   7766	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
   7767
   7768	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
   7769
   7770	return conn;
   7771}
   7772
   7773static bool is_valid_psm(u16 psm, u8 dst_type)
   7774{
   7775	if (!psm)
   7776		return false;
   7777
   7778	if (bdaddr_type_is_le(dst_type))
   7779		return (psm <= 0x00ff);
   7780
   7781	/* PSM must be odd and lsb of upper byte must be 0 */
   7782	return ((psm & 0x0101) == 0x0001);
   7783}
   7784
   7785struct l2cap_chan_data {
   7786	struct l2cap_chan *chan;
   7787	struct pid *pid;
   7788	int count;
   7789};
   7790
   7791static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
   7792{
   7793	struct l2cap_chan_data *d = data;
   7794	struct pid *pid;
   7795
   7796	if (chan == d->chan)
   7797		return;
   7798
   7799	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
   7800		return;
   7801
   7802	pid = chan->ops->get_peer_pid(chan);
   7803
   7804	/* Only count deferred channels with the same PID/PSM */
   7805	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
   7806	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
   7807		return;
   7808
   7809	d->count++;
   7810}
   7811
   7812int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
   7813		       bdaddr_t *dst, u8 dst_type)
   7814{
   7815	struct l2cap_conn *conn;
   7816	struct hci_conn *hcon;
   7817	struct hci_dev *hdev;
   7818	int err;
   7819
   7820	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
   7821	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
   7822
   7823	hdev = hci_get_route(dst, &chan->src, chan->src_type);
   7824	if (!hdev)
   7825		return -EHOSTUNREACH;
   7826
   7827	hci_dev_lock(hdev);
   7828
   7829	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
   7830	    chan->chan_type != L2CAP_CHAN_RAW) {
   7831		err = -EINVAL;
   7832		goto done;
   7833	}
   7834
   7835	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
   7836		err = -EINVAL;
   7837		goto done;
   7838	}
   7839
   7840	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
   7841		err = -EINVAL;
   7842		goto done;
   7843	}
   7844
   7845	switch (chan->mode) {
   7846	case L2CAP_MODE_BASIC:
   7847		break;
   7848	case L2CAP_MODE_LE_FLOWCTL:
   7849		break;
   7850	case L2CAP_MODE_EXT_FLOWCTL:
   7851		if (!enable_ecred) {
   7852			err = -EOPNOTSUPP;
   7853			goto done;
   7854		}
   7855		break;
   7856	case L2CAP_MODE_ERTM:
   7857	case L2CAP_MODE_STREAMING:
   7858		if (!disable_ertm)
   7859			break;
   7860		fallthrough;
   7861	default:
   7862		err = -EOPNOTSUPP;
   7863		goto done;
   7864	}
   7865
   7866	switch (chan->state) {
   7867	case BT_CONNECT:
   7868	case BT_CONNECT2:
   7869	case BT_CONFIG:
   7870		/* Already connecting */
   7871		err = 0;
   7872		goto done;
   7873
   7874	case BT_CONNECTED:
   7875		/* Already connected */
   7876		err = -EISCONN;
   7877		goto done;
   7878
   7879	case BT_OPEN:
   7880	case BT_BOUND:
   7881		/* Can connect */
   7882		break;
   7883
   7884	default:
   7885		err = -EBADFD;
   7886		goto done;
   7887	}
   7888
   7889	/* Set destination address and psm */
   7890	bacpy(&chan->dst, dst);
   7891	chan->dst_type = dst_type;
   7892
   7893	chan->psm = psm;
   7894	chan->dcid = cid;
   7895
   7896	if (bdaddr_type_is_le(dst_type)) {
   7897		/* Convert from L2CAP channel address type to HCI address type
   7898		 */
   7899		if (dst_type == BDADDR_LE_PUBLIC)
   7900			dst_type = ADDR_LE_DEV_PUBLIC;
   7901		else
   7902			dst_type = ADDR_LE_DEV_RANDOM;
   7903
   7904		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
   7905			hcon = hci_connect_le(hdev, dst, dst_type, false,
   7906					      chan->sec_level,
   7907					      HCI_LE_CONN_TIMEOUT,
   7908					      HCI_ROLE_SLAVE);
   7909		else
   7910			hcon = hci_connect_le_scan(hdev, dst, dst_type,
   7911						   chan->sec_level,
   7912						   HCI_LE_CONN_TIMEOUT,
   7913						   CONN_REASON_L2CAP_CHAN);
   7914
   7915	} else {
   7916		u8 auth_type = l2cap_get_auth_type(chan);
   7917		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
   7918				       CONN_REASON_L2CAP_CHAN);
   7919	}
   7920
   7921	if (IS_ERR(hcon)) {
   7922		err = PTR_ERR(hcon);
   7923		goto done;
   7924	}
   7925
   7926	conn = l2cap_conn_add(hcon);
   7927	if (!conn) {
   7928		hci_conn_drop(hcon);
   7929		err = -ENOMEM;
   7930		goto done;
   7931	}
   7932
   7933	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
   7934		struct l2cap_chan_data data;
   7935
   7936		data.chan = chan;
   7937		data.pid = chan->ops->get_peer_pid(chan);
   7938		data.count = 1;
   7939
   7940		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
   7941
   7942		/* Check if there isn't too many channels being connected */
   7943		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
   7944			hci_conn_drop(hcon);
   7945			err = -EPROTO;
   7946			goto done;
   7947		}
   7948	}
   7949
   7950	mutex_lock(&conn->chan_lock);
   7951	l2cap_chan_lock(chan);
   7952
   7953	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
   7954		hci_conn_drop(hcon);
   7955		err = -EBUSY;
   7956		goto chan_unlock;
   7957	}
   7958
   7959	/* Update source addr of the socket */
   7960	bacpy(&chan->src, &hcon->src);
   7961	chan->src_type = bdaddr_src_type(hcon);
   7962
   7963	__l2cap_chan_add(conn, chan);
   7964
   7965	/* l2cap_chan_add takes its own ref so we can drop this one */
   7966	hci_conn_drop(hcon);
   7967
   7968	l2cap_state_change(chan, BT_CONNECT);
   7969	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
   7970
   7971	/* Release chan->sport so that it can be reused by other
   7972	 * sockets (as it's only used for listening sockets).
   7973	 */
   7974	write_lock(&chan_list_lock);
   7975	chan->sport = 0;
   7976	write_unlock(&chan_list_lock);
   7977
   7978	if (hcon->state == BT_CONNECTED) {
   7979		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
   7980			__clear_chan_timer(chan);
   7981			if (l2cap_chan_check_security(chan, true))
   7982				l2cap_state_change(chan, BT_CONNECTED);
   7983		} else
   7984			l2cap_do_start(chan);
   7985	}
   7986
   7987	err = 0;
   7988
   7989chan_unlock:
   7990	l2cap_chan_unlock(chan);
   7991	mutex_unlock(&conn->chan_lock);
   7992done:
   7993	hci_dev_unlock(hdev);
   7994	hci_dev_put(hdev);
   7995	return err;
   7996}
   7997EXPORT_SYMBOL_GPL(l2cap_chan_connect);
   7998
   7999static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
   8000{
   8001	struct l2cap_conn *conn = chan->conn;
   8002	struct {
   8003		struct l2cap_ecred_reconf_req req;
   8004		__le16 scid;
   8005	} pdu;
   8006
   8007	pdu.req.mtu = cpu_to_le16(chan->imtu);
   8008	pdu.req.mps = cpu_to_le16(chan->mps);
   8009	pdu.scid    = cpu_to_le16(chan->scid);
   8010
   8011	chan->ident = l2cap_get_ident(conn);
   8012
   8013	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
   8014		       sizeof(pdu), &pdu);
   8015}
   8016
   8017int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
   8018{
   8019	if (chan->imtu > mtu)
   8020		return -EINVAL;
   8021
   8022	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
   8023
   8024	chan->imtu = mtu;
   8025
   8026	l2cap_ecred_reconfigure(chan);
   8027
   8028	return 0;
   8029}
   8030
   8031/* ---- L2CAP interface with lower layer (HCI) ---- */
   8032
   8033int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
   8034{
   8035	int exact = 0, lm1 = 0, lm2 = 0;
   8036	struct l2cap_chan *c;
   8037
   8038	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
   8039
   8040	/* Find listening sockets and check their link_mode */
   8041	read_lock(&chan_list_lock);
   8042	list_for_each_entry(c, &chan_list, global_l) {
   8043		if (c->state != BT_LISTEN)
   8044			continue;
   8045
   8046		if (!bacmp(&c->src, &hdev->bdaddr)) {
   8047			lm1 |= HCI_LM_ACCEPT;
   8048			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
   8049				lm1 |= HCI_LM_MASTER;
   8050			exact++;
   8051		} else if (!bacmp(&c->src, BDADDR_ANY)) {
   8052			lm2 |= HCI_LM_ACCEPT;
   8053			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
   8054				lm2 |= HCI_LM_MASTER;
   8055		}
   8056	}
   8057	read_unlock(&chan_list_lock);
   8058
   8059	return exact ? lm1 : lm2;
   8060}
   8061
   8062/* Find the next fixed channel in BT_LISTEN state, continue iteration
   8063 * from an existing channel in the list or from the beginning of the
   8064 * global list (by passing NULL as first parameter).
   8065 */
   8066static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
   8067						  struct hci_conn *hcon)
   8068{
   8069	u8 src_type = bdaddr_src_type(hcon);
   8070
   8071	read_lock(&chan_list_lock);
   8072
   8073	if (c)
   8074		c = list_next_entry(c, global_l);
   8075	else
   8076		c = list_entry(chan_list.next, typeof(*c), global_l);
   8077
   8078	list_for_each_entry_from(c, &chan_list, global_l) {
   8079		if (c->chan_type != L2CAP_CHAN_FIXED)
   8080			continue;
   8081		if (c->state != BT_LISTEN)
   8082			continue;
   8083		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
   8084			continue;
   8085		if (src_type != c->src_type)
   8086			continue;
   8087
   8088		l2cap_chan_hold(c);
   8089		read_unlock(&chan_list_lock);
   8090		return c;
   8091	}
   8092
   8093	read_unlock(&chan_list_lock);
   8094
   8095	return NULL;
   8096}
   8097
   8098static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
   8099{
   8100	struct hci_dev *hdev = hcon->hdev;
   8101	struct l2cap_conn *conn;
   8102	struct l2cap_chan *pchan;
   8103	u8 dst_type;
   8104
   8105	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
   8106		return;
   8107
   8108	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
   8109
   8110	if (status) {
   8111		l2cap_conn_del(hcon, bt_to_errno(status));
   8112		return;
   8113	}
   8114
   8115	conn = l2cap_conn_add(hcon);
   8116	if (!conn)
   8117		return;
   8118
   8119	dst_type = bdaddr_dst_type(hcon);
   8120
   8121	/* If device is blocked, do not create channels for it */
   8122	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
   8123		return;
   8124
   8125	/* Find fixed channels and notify them of the new connection. We
   8126	 * use multiple individual lookups, continuing each time where
   8127	 * we left off, because the list lock would prevent calling the
   8128	 * potentially sleeping l2cap_chan_lock() function.
   8129	 */
   8130	pchan = l2cap_global_fixed_chan(NULL, hcon);
   8131	while (pchan) {
   8132		struct l2cap_chan *chan, *next;
   8133
   8134		/* Client fixed channels should override server ones */
   8135		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
   8136			goto next;
   8137
   8138		l2cap_chan_lock(pchan);
   8139		chan = pchan->ops->new_connection(pchan);
   8140		if (chan) {
   8141			bacpy(&chan->src, &hcon->src);
   8142			bacpy(&chan->dst, &hcon->dst);
   8143			chan->src_type = bdaddr_src_type(hcon);
   8144			chan->dst_type = dst_type;
   8145
   8146			__l2cap_chan_add(conn, chan);
   8147		}
   8148
   8149		l2cap_chan_unlock(pchan);
   8150next:
   8151		next = l2cap_global_fixed_chan(pchan, hcon);
   8152		l2cap_chan_put(pchan);
   8153		pchan = next;
   8154	}
   8155
   8156	l2cap_conn_ready(conn);
   8157}
   8158
   8159int l2cap_disconn_ind(struct hci_conn *hcon)
   8160{
   8161	struct l2cap_conn *conn = hcon->l2cap_data;
   8162
   8163	BT_DBG("hcon %p", hcon);
   8164
   8165	if (!conn)
   8166		return HCI_ERROR_REMOTE_USER_TERM;
   8167	return conn->disc_reason;
   8168}
   8169
   8170static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
   8171{
   8172	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
   8173		return;
   8174
   8175	BT_DBG("hcon %p reason %d", hcon, reason);
   8176
   8177	l2cap_conn_del(hcon, bt_to_errno(reason));
   8178}
   8179
   8180static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
   8181{
   8182	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
   8183		return;
   8184
   8185	if (encrypt == 0x00) {
   8186		if (chan->sec_level == BT_SECURITY_MEDIUM) {
   8187			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
   8188		} else if (chan->sec_level == BT_SECURITY_HIGH ||
   8189			   chan->sec_level == BT_SECURITY_FIPS)
   8190			l2cap_chan_close(chan, ECONNREFUSED);
   8191	} else {
   8192		if (chan->sec_level == BT_SECURITY_MEDIUM)
   8193			__clear_chan_timer(chan);
   8194	}
   8195}
   8196
   8197static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
   8198{
   8199	struct l2cap_conn *conn = hcon->l2cap_data;
   8200	struct l2cap_chan *chan;
   8201
   8202	if (!conn)
   8203		return;
   8204
   8205	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
   8206
   8207	mutex_lock(&conn->chan_lock);
   8208
   8209	list_for_each_entry(chan, &conn->chan_l, list) {
   8210		l2cap_chan_lock(chan);
   8211
   8212		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
   8213		       state_to_string(chan->state));
   8214
   8215		if (chan->scid == L2CAP_CID_A2MP) {
   8216			l2cap_chan_unlock(chan);
   8217			continue;
   8218		}
   8219
   8220		if (!status && encrypt)
   8221			chan->sec_level = hcon->sec_level;
   8222
   8223		if (!__l2cap_no_conn_pending(chan)) {
   8224			l2cap_chan_unlock(chan);
   8225			continue;
   8226		}
   8227
   8228		if (!status && (chan->state == BT_CONNECTED ||
   8229				chan->state == BT_CONFIG)) {
   8230			chan->ops->resume(chan);
   8231			l2cap_check_encryption(chan, encrypt);
   8232			l2cap_chan_unlock(chan);
   8233			continue;
   8234		}
   8235
   8236		if (chan->state == BT_CONNECT) {
   8237			if (!status && l2cap_check_enc_key_size(hcon))
   8238				l2cap_start_connection(chan);
   8239			else
   8240				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
   8241		} else if (chan->state == BT_CONNECT2 &&
   8242			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
   8243			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
   8244			struct l2cap_conn_rsp rsp;
   8245			__u16 res, stat;
   8246
   8247			if (!status && l2cap_check_enc_key_size(hcon)) {
   8248				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
   8249					res = L2CAP_CR_PEND;
   8250					stat = L2CAP_CS_AUTHOR_PEND;
   8251					chan->ops->defer(chan);
   8252				} else {
   8253					l2cap_state_change(chan, BT_CONFIG);
   8254					res = L2CAP_CR_SUCCESS;
   8255					stat = L2CAP_CS_NO_INFO;
   8256				}
   8257			} else {
   8258				l2cap_state_change(chan, BT_DISCONN);
   8259				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
   8260				res = L2CAP_CR_SEC_BLOCK;
   8261				stat = L2CAP_CS_NO_INFO;
   8262			}
   8263
   8264			rsp.scid   = cpu_to_le16(chan->dcid);
   8265			rsp.dcid   = cpu_to_le16(chan->scid);
   8266			rsp.result = cpu_to_le16(res);
   8267			rsp.status = cpu_to_le16(stat);
   8268			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
   8269				       sizeof(rsp), &rsp);
   8270
   8271			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
   8272			    res == L2CAP_CR_SUCCESS) {
   8273				char buf[128];
   8274				set_bit(CONF_REQ_SENT, &chan->conf_state);
   8275				l2cap_send_cmd(conn, l2cap_get_ident(conn),
   8276					       L2CAP_CONF_REQ,
   8277					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
   8278					       buf);
   8279				chan->num_conf_req++;
   8280			}
   8281		}
   8282
   8283		l2cap_chan_unlock(chan);
   8284	}
   8285
   8286	mutex_unlock(&conn->chan_lock);
   8287}
   8288
   8289/* Append fragment into frame respecting the maximum len of rx_skb */
   8290static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
   8291			   u16 len)
   8292{
   8293	if (!conn->rx_skb) {
   8294		/* Allocate skb for the complete frame (with header) */
   8295		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
   8296		if (!conn->rx_skb)
   8297			return -ENOMEM;
   8298		/* Init rx_len */
   8299		conn->rx_len = len;
   8300	}
   8301
   8302	/* Copy as much as the rx_skb can hold */
   8303	len = min_t(u16, len, skb->len);
   8304	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
   8305	skb_pull(skb, len);
   8306	conn->rx_len -= len;
   8307
   8308	return len;
   8309}
   8310
   8311static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
   8312{
   8313	struct sk_buff *rx_skb;
   8314	int len;
   8315
   8316	/* Append just enough to complete the header */
   8317	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
   8318
   8319	/* If header could not be read just continue */
   8320	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
   8321		return len;
   8322
   8323	rx_skb = conn->rx_skb;
   8324	len = get_unaligned_le16(rx_skb->data);
   8325
   8326	/* Check if rx_skb has enough space to received all fragments */
   8327	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
   8328		/* Update expected len */
   8329		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
   8330		return L2CAP_LEN_SIZE;
   8331	}
   8332
   8333	/* Reset conn->rx_skb since it will need to be reallocated in order to
   8334	 * fit all fragments.
   8335	 */
   8336	conn->rx_skb = NULL;
   8337
   8338	/* Reallocates rx_skb using the exact expected length */
   8339	len = l2cap_recv_frag(conn, rx_skb,
   8340			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
   8341	kfree_skb(rx_skb);
   8342
   8343	return len;
   8344}
   8345
   8346static void l2cap_recv_reset(struct l2cap_conn *conn)
   8347{
   8348	kfree_skb(conn->rx_skb);
   8349	conn->rx_skb = NULL;
   8350	conn->rx_len = 0;
   8351}
   8352
   8353void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
   8354{
   8355	struct l2cap_conn *conn = hcon->l2cap_data;
   8356	int len;
   8357
   8358	/* For AMP controller do not create l2cap conn */
   8359	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
   8360		goto drop;
   8361
   8362	if (!conn)
   8363		conn = l2cap_conn_add(hcon);
   8364
   8365	if (!conn)
   8366		goto drop;
   8367
   8368	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
   8369
   8370	switch (flags) {
   8371	case ACL_START:
   8372	case ACL_START_NO_FLUSH:
   8373	case ACL_COMPLETE:
   8374		if (conn->rx_skb) {
   8375			BT_ERR("Unexpected start frame (len %d)", skb->len);
   8376			l2cap_recv_reset(conn);
   8377			l2cap_conn_unreliable(conn, ECOMM);
   8378		}
   8379
   8380		/* Start fragment may not contain the L2CAP length so just
   8381		 * copy the initial byte when that happens and use conn->mtu as
   8382		 * expected length.
   8383		 */
   8384		if (skb->len < L2CAP_LEN_SIZE) {
   8385			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
   8386				goto drop;
   8387			return;
   8388		}
   8389
   8390		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
   8391
   8392		if (len == skb->len) {
   8393			/* Complete frame received */
   8394			l2cap_recv_frame(conn, skb);
   8395			return;
   8396		}
   8397
   8398		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
   8399
   8400		if (skb->len > len) {
   8401			BT_ERR("Frame is too long (len %u, expected len %d)",
   8402			       skb->len, len);
   8403			l2cap_conn_unreliable(conn, ECOMM);
   8404			goto drop;
   8405		}
   8406
   8407		/* Append fragment into frame (with header) */
   8408		if (l2cap_recv_frag(conn, skb, len) < 0)
   8409			goto drop;
   8410
   8411		break;
   8412
   8413	case ACL_CONT:
   8414		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
   8415
   8416		if (!conn->rx_skb) {
   8417			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
   8418			l2cap_conn_unreliable(conn, ECOMM);
   8419			goto drop;
   8420		}
   8421
   8422		/* Complete the L2CAP length if it has not been read */
   8423		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
   8424			if (l2cap_recv_len(conn, skb) < 0) {
   8425				l2cap_conn_unreliable(conn, ECOMM);
   8426				goto drop;
   8427			}
   8428
   8429			/* Header still could not be read just continue */
   8430			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
   8431				return;
   8432		}
   8433
   8434		if (skb->len > conn->rx_len) {
   8435			BT_ERR("Fragment is too long (len %u, expected %u)",
   8436			       skb->len, conn->rx_len);
   8437			l2cap_recv_reset(conn);
   8438			l2cap_conn_unreliable(conn, ECOMM);
   8439			goto drop;
   8440		}
   8441
   8442		/* Append fragment into frame (with header) */
   8443		l2cap_recv_frag(conn, skb, skb->len);
   8444
   8445		if (!conn->rx_len) {
   8446			/* Complete frame received. l2cap_recv_frame
   8447			 * takes ownership of the skb so set the global
   8448			 * rx_skb pointer to NULL first.
   8449			 */
   8450			struct sk_buff *rx_skb = conn->rx_skb;
   8451			conn->rx_skb = NULL;
   8452			l2cap_recv_frame(conn, rx_skb);
   8453		}
   8454		break;
   8455	}
   8456
   8457drop:
   8458	kfree_skb(skb);
   8459}
   8460
   8461static struct hci_cb l2cap_cb = {
   8462	.name		= "L2CAP",
   8463	.connect_cfm	= l2cap_connect_cfm,
   8464	.disconn_cfm	= l2cap_disconn_cfm,
   8465	.security_cfm	= l2cap_security_cfm,
   8466};
   8467
   8468static int l2cap_debugfs_show(struct seq_file *f, void *p)
   8469{
   8470	struct l2cap_chan *c;
   8471
   8472	read_lock(&chan_list_lock);
   8473
   8474	list_for_each_entry(c, &chan_list, global_l) {
   8475		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
   8476			   &c->src, c->src_type, &c->dst, c->dst_type,
   8477			   c->state, __le16_to_cpu(c->psm),
   8478			   c->scid, c->dcid, c->imtu, c->omtu,
   8479			   c->sec_level, c->mode);
   8480	}
   8481
   8482	read_unlock(&chan_list_lock);
   8483
   8484	return 0;
   8485}
   8486
   8487DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
   8488
   8489static struct dentry *l2cap_debugfs;
   8490
   8491int __init l2cap_init(void)
   8492{
   8493	int err;
   8494
   8495	err = l2cap_init_sockets();
   8496	if (err < 0)
   8497		return err;
   8498
   8499	hci_register_cb(&l2cap_cb);
   8500
   8501	if (IS_ERR_OR_NULL(bt_debugfs))
   8502		return 0;
   8503
   8504	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
   8505					    NULL, &l2cap_debugfs_fops);
   8506
   8507	return 0;
   8508}
   8509
   8510void l2cap_exit(void)
   8511{
   8512	debugfs_remove(l2cap_debugfs);
   8513	hci_unregister_cb(&l2cap_cb);
   8514	l2cap_cleanup_sockets();
   8515}
   8516
   8517module_param(disable_ertm, bool, 0644);
   8518MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
   8519
   8520module_param(enable_ecred, bool, 0644);
   8521MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");