cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hci_h5.c (26055B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *
      4 *  Bluetooth HCI Three-wire UART driver
      5 *
      6 *  Copyright (C) 2012  Intel Corporation
      7 */
      8
      9#include <linux/acpi.h>
     10#include <linux/errno.h>
     11#include <linux/gpio/consumer.h>
     12#include <linux/kernel.h>
     13#include <linux/mod_devicetable.h>
     14#include <linux/of_device.h>
     15#include <linux/pm_runtime.h>
     16#include <linux/serdev.h>
     17#include <linux/skbuff.h>
     18
     19#include <net/bluetooth/bluetooth.h>
     20#include <net/bluetooth/hci_core.h>
     21
     22#include "btrtl.h"
     23#include "hci_uart.h"
     24
     25#define SUSPEND_TIMEOUT_MS	6000
     26
     27#define HCI_3WIRE_ACK_PKT	0
     28#define HCI_3WIRE_LINK_PKT	15
     29
     30/* Sliding window size */
     31#define H5_TX_WIN_MAX		4
     32
     33#define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
     34#define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
     35
     36/*
     37 * Maximum Three-wire packet:
     38 *     4 byte header + max value for 12-bit length + 2 bytes for CRC
     39 */
     40#define H5_MAX_LEN (4 + 0xfff + 2)
     41
     42/* Convenience macros for reading Three-wire header values */
     43#define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
     44#define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
     45#define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
     46#define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
     47#define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
     48#define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
     49
     50#define SLIP_DELIMITER	0xc0
     51#define SLIP_ESC	0xdb
     52#define SLIP_ESC_DELIM	0xdc
     53#define SLIP_ESC_ESC	0xdd
     54
     55/* H5 state flags */
     56enum {
     57	H5_RX_ESC,		/* SLIP escape mode */
     58	H5_TX_ACK_REQ,		/* Pending ack to send */
     59	H5_WAKEUP_DISABLE,	/* Device cannot wake host */
     60	H5_HW_FLOW_CONTROL,	/* Use HW flow control */
     61};
     62
     63struct h5 {
     64	/* Must be the first member, hci_serdev.c expects this. */
     65	struct hci_uart		serdev_hu;
     66
     67	struct sk_buff_head	unack;		/* Unack'ed packets queue */
     68	struct sk_buff_head	rel;		/* Reliable packets queue */
     69	struct sk_buff_head	unrel;		/* Unreliable packets queue */
     70
     71	unsigned long		flags;
     72
     73	struct sk_buff		*rx_skb;	/* Receive buffer */
     74	size_t			rx_pending;	/* Expecting more bytes */
     75	u8			rx_ack;		/* Last ack number received */
     76
     77	int			(*rx_func)(struct hci_uart *hu, u8 c);
     78
     79	struct timer_list	timer;		/* Retransmission timer */
     80	struct hci_uart		*hu;		/* Parent HCI UART */
     81
     82	u8			tx_seq;		/* Next seq number to send */
     83	u8			tx_ack;		/* Next ack number to send */
     84	u8			tx_win;		/* Sliding window size */
     85
     86	enum {
     87		H5_UNINITIALIZED,
     88		H5_INITIALIZED,
     89		H5_ACTIVE,
     90	} state;
     91
     92	enum {
     93		H5_AWAKE,
     94		H5_SLEEPING,
     95		H5_WAKING_UP,
     96	} sleep;
     97
     98	const struct h5_vnd *vnd;
     99	const char *id;
    100
    101	struct gpio_desc *enable_gpio;
    102	struct gpio_desc *device_wake_gpio;
    103};
    104
    105enum h5_driver_info {
    106	H5_INFO_WAKEUP_DISABLE = BIT(0),
    107};
    108
    109struct h5_vnd {
    110	int (*setup)(struct h5 *h5);
    111	void (*open)(struct h5 *h5);
    112	void (*close)(struct h5 *h5);
    113	int (*suspend)(struct h5 *h5);
    114	int (*resume)(struct h5 *h5);
    115	const struct acpi_gpio_mapping *acpi_gpio_map;
    116};
    117
    118struct h5_device_data {
    119	uint32_t driver_info;
    120	struct h5_vnd *vnd;
    121};
    122
    123static void h5_reset_rx(struct h5 *h5);
    124
    125static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
    126{
    127	struct h5 *h5 = hu->priv;
    128	struct sk_buff *nskb;
    129
    130	nskb = alloc_skb(3, GFP_ATOMIC);
    131	if (!nskb)
    132		return;
    133
    134	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
    135
    136	skb_put_data(nskb, data, len);
    137
    138	skb_queue_tail(&h5->unrel, nskb);
    139}
    140
    141static u8 h5_cfg_field(struct h5 *h5)
    142{
    143	/* Sliding window size (first 3 bits) */
    144	return h5->tx_win & 0x07;
    145}
    146
    147static void h5_timed_event(struct timer_list *t)
    148{
    149	const unsigned char sync_req[] = { 0x01, 0x7e };
    150	unsigned char conf_req[3] = { 0x03, 0xfc };
    151	struct h5 *h5 = from_timer(h5, t, timer);
    152	struct hci_uart *hu = h5->hu;
    153	struct sk_buff *skb;
    154	unsigned long flags;
    155
    156	BT_DBG("%s", hu->hdev->name);
    157
    158	if (h5->state == H5_UNINITIALIZED)
    159		h5_link_control(hu, sync_req, sizeof(sync_req));
    160
    161	if (h5->state == H5_INITIALIZED) {
    162		conf_req[2] = h5_cfg_field(h5);
    163		h5_link_control(hu, conf_req, sizeof(conf_req));
    164	}
    165
    166	if (h5->state != H5_ACTIVE) {
    167		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
    168		goto wakeup;
    169	}
    170
    171	if (h5->sleep != H5_AWAKE) {
    172		h5->sleep = H5_SLEEPING;
    173		goto wakeup;
    174	}
    175
    176	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
    177
    178	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
    179
    180	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
    181		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
    182		skb_queue_head(&h5->rel, skb);
    183	}
    184
    185	spin_unlock_irqrestore(&h5->unack.lock, flags);
    186
    187wakeup:
    188	hci_uart_tx_wakeup(hu);
    189}
    190
    191static void h5_peer_reset(struct hci_uart *hu)
    192{
    193	struct h5 *h5 = hu->priv;
    194
    195	bt_dev_err(hu->hdev, "Peer device has reset");
    196
    197	h5->state = H5_UNINITIALIZED;
    198
    199	del_timer(&h5->timer);
    200
    201	skb_queue_purge(&h5->rel);
    202	skb_queue_purge(&h5->unrel);
    203	skb_queue_purge(&h5->unack);
    204
    205	h5->tx_seq = 0;
    206	h5->tx_ack = 0;
    207
    208	/* Send reset request to upper stack */
    209	hci_reset_dev(hu->hdev);
    210}
    211
    212static int h5_open(struct hci_uart *hu)
    213{
    214	struct h5 *h5;
    215	const unsigned char sync[] = { 0x01, 0x7e };
    216
    217	BT_DBG("hu %p", hu);
    218
    219	if (hu->serdev) {
    220		h5 = serdev_device_get_drvdata(hu->serdev);
    221	} else {
    222		h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
    223		if (!h5)
    224			return -ENOMEM;
    225	}
    226
    227	hu->priv = h5;
    228	h5->hu = hu;
    229
    230	skb_queue_head_init(&h5->unack);
    231	skb_queue_head_init(&h5->rel);
    232	skb_queue_head_init(&h5->unrel);
    233
    234	h5_reset_rx(h5);
    235
    236	timer_setup(&h5->timer, h5_timed_event, 0);
    237
    238	h5->tx_win = H5_TX_WIN_MAX;
    239
    240	if (h5->vnd && h5->vnd->open)
    241		h5->vnd->open(h5);
    242
    243	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
    244
    245	/* Send initial sync request */
    246	h5_link_control(hu, sync, sizeof(sync));
    247	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
    248
    249	return 0;
    250}
    251
    252static int h5_close(struct hci_uart *hu)
    253{
    254	struct h5 *h5 = hu->priv;
    255
    256	del_timer_sync(&h5->timer);
    257
    258	skb_queue_purge(&h5->unack);
    259	skb_queue_purge(&h5->rel);
    260	skb_queue_purge(&h5->unrel);
    261
    262	kfree_skb(h5->rx_skb);
    263	h5->rx_skb = NULL;
    264
    265	if (h5->vnd && h5->vnd->close)
    266		h5->vnd->close(h5);
    267
    268	if (!hu->serdev)
    269		kfree(h5);
    270
    271	return 0;
    272}
    273
    274static int h5_setup(struct hci_uart *hu)
    275{
    276	struct h5 *h5 = hu->priv;
    277
    278	if (h5->vnd && h5->vnd->setup)
    279		return h5->vnd->setup(h5);
    280
    281	return 0;
    282}
    283
    284static void h5_pkt_cull(struct h5 *h5)
    285{
    286	struct sk_buff *skb, *tmp;
    287	unsigned long flags;
    288	int i, to_remove;
    289	u8 seq;
    290
    291	spin_lock_irqsave(&h5->unack.lock, flags);
    292
    293	to_remove = skb_queue_len(&h5->unack);
    294	if (to_remove == 0)
    295		goto unlock;
    296
    297	seq = h5->tx_seq;
    298
    299	while (to_remove > 0) {
    300		if (h5->rx_ack == seq)
    301			break;
    302
    303		to_remove--;
    304		seq = (seq - 1) & 0x07;
    305	}
    306
    307	if (seq != h5->rx_ack)
    308		BT_ERR("Controller acked invalid packet");
    309
    310	i = 0;
    311	skb_queue_walk_safe(&h5->unack, skb, tmp) {
    312		if (i++ >= to_remove)
    313			break;
    314
    315		__skb_unlink(skb, &h5->unack);
    316		kfree_skb(skb);
    317	}
    318
    319	if (skb_queue_empty(&h5->unack))
    320		del_timer(&h5->timer);
    321
    322unlock:
    323	spin_unlock_irqrestore(&h5->unack.lock, flags);
    324}
    325
    326static void h5_handle_internal_rx(struct hci_uart *hu)
    327{
    328	struct h5 *h5 = hu->priv;
    329	const unsigned char sync_req[] = { 0x01, 0x7e };
    330	const unsigned char sync_rsp[] = { 0x02, 0x7d };
    331	unsigned char conf_req[3] = { 0x03, 0xfc };
    332	const unsigned char conf_rsp[] = { 0x04, 0x7b };
    333	const unsigned char wakeup_req[] = { 0x05, 0xfa };
    334	const unsigned char woken_req[] = { 0x06, 0xf9 };
    335	const unsigned char sleep_req[] = { 0x07, 0x78 };
    336	const unsigned char *hdr = h5->rx_skb->data;
    337	const unsigned char *data = &h5->rx_skb->data[4];
    338
    339	BT_DBG("%s", hu->hdev->name);
    340
    341	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
    342		return;
    343
    344	if (H5_HDR_LEN(hdr) < 2)
    345		return;
    346
    347	conf_req[2] = h5_cfg_field(h5);
    348
    349	if (memcmp(data, sync_req, 2) == 0) {
    350		if (h5->state == H5_ACTIVE)
    351			h5_peer_reset(hu);
    352		h5_link_control(hu, sync_rsp, 2);
    353	} else if (memcmp(data, sync_rsp, 2) == 0) {
    354		if (h5->state == H5_ACTIVE)
    355			h5_peer_reset(hu);
    356		h5->state = H5_INITIALIZED;
    357		h5_link_control(hu, conf_req, 3);
    358	} else if (memcmp(data, conf_req, 2) == 0) {
    359		h5_link_control(hu, conf_rsp, 2);
    360		h5_link_control(hu, conf_req, 3);
    361	} else if (memcmp(data, conf_rsp, 2) == 0) {
    362		if (H5_HDR_LEN(hdr) > 2)
    363			h5->tx_win = (data[2] & 0x07);
    364		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
    365		h5->state = H5_ACTIVE;
    366		hci_uart_init_ready(hu);
    367		return;
    368	} else if (memcmp(data, sleep_req, 2) == 0) {
    369		BT_DBG("Peer went to sleep");
    370		h5->sleep = H5_SLEEPING;
    371		return;
    372	} else if (memcmp(data, woken_req, 2) == 0) {
    373		BT_DBG("Peer woke up");
    374		h5->sleep = H5_AWAKE;
    375	} else if (memcmp(data, wakeup_req, 2) == 0) {
    376		BT_DBG("Peer requested wakeup");
    377		h5_link_control(hu, woken_req, 2);
    378		h5->sleep = H5_AWAKE;
    379	} else {
    380		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
    381		return;
    382	}
    383
    384	hci_uart_tx_wakeup(hu);
    385}
    386
    387static void h5_complete_rx_pkt(struct hci_uart *hu)
    388{
    389	struct h5 *h5 = hu->priv;
    390	const unsigned char *hdr = h5->rx_skb->data;
    391
    392	if (H5_HDR_RELIABLE(hdr)) {
    393		h5->tx_ack = (h5->tx_ack + 1) % 8;
    394		set_bit(H5_TX_ACK_REQ, &h5->flags);
    395		hci_uart_tx_wakeup(hu);
    396	}
    397
    398	h5->rx_ack = H5_HDR_ACK(hdr);
    399
    400	h5_pkt_cull(h5);
    401
    402	switch (H5_HDR_PKT_TYPE(hdr)) {
    403	case HCI_EVENT_PKT:
    404	case HCI_ACLDATA_PKT:
    405	case HCI_SCODATA_PKT:
    406	case HCI_ISODATA_PKT:
    407		hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
    408
    409		/* Remove Three-wire header */
    410		skb_pull(h5->rx_skb, 4);
    411
    412		hci_recv_frame(hu->hdev, h5->rx_skb);
    413		h5->rx_skb = NULL;
    414
    415		break;
    416
    417	default:
    418		h5_handle_internal_rx(hu);
    419		break;
    420	}
    421
    422	h5_reset_rx(h5);
    423}
    424
    425static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
    426{
    427	h5_complete_rx_pkt(hu);
    428
    429	return 0;
    430}
    431
    432static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
    433{
    434	struct h5 *h5 = hu->priv;
    435	const unsigned char *hdr = h5->rx_skb->data;
    436
    437	if (H5_HDR_CRC(hdr)) {
    438		h5->rx_func = h5_rx_crc;
    439		h5->rx_pending = 2;
    440	} else {
    441		h5_complete_rx_pkt(hu);
    442	}
    443
    444	return 0;
    445}
    446
    447static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
    448{
    449	struct h5 *h5 = hu->priv;
    450	const unsigned char *hdr = h5->rx_skb->data;
    451
    452	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
    453	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
    454	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
    455	       H5_HDR_LEN(hdr));
    456
    457	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
    458		bt_dev_err(hu->hdev, "Invalid header checksum");
    459		h5_reset_rx(h5);
    460		return 0;
    461	}
    462
    463	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
    464		bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
    465			   H5_HDR_SEQ(hdr), h5->tx_ack);
    466		h5_reset_rx(h5);
    467		return 0;
    468	}
    469
    470	if (h5->state != H5_ACTIVE &&
    471	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
    472		bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
    473		h5_reset_rx(h5);
    474		return 0;
    475	}
    476
    477	h5->rx_func = h5_rx_payload;
    478	h5->rx_pending = H5_HDR_LEN(hdr);
    479
    480	return 0;
    481}
    482
    483static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
    484{
    485	struct h5 *h5 = hu->priv;
    486
    487	if (c == SLIP_DELIMITER)
    488		return 1;
    489
    490	h5->rx_func = h5_rx_3wire_hdr;
    491	h5->rx_pending = 4;
    492
    493	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
    494	if (!h5->rx_skb) {
    495		bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
    496		h5_reset_rx(h5);
    497		return -ENOMEM;
    498	}
    499
    500	h5->rx_skb->dev = (void *)hu->hdev;
    501
    502	return 0;
    503}
    504
    505static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
    506{
    507	struct h5 *h5 = hu->priv;
    508
    509	if (c == SLIP_DELIMITER)
    510		h5->rx_func = h5_rx_pkt_start;
    511
    512	return 1;
    513}
    514
    515static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
    516{
    517	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
    518	const u8 *byte = &c;
    519
    520	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
    521		set_bit(H5_RX_ESC, &h5->flags);
    522		return;
    523	}
    524
    525	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
    526		switch (c) {
    527		case SLIP_ESC_DELIM:
    528			byte = &delim;
    529			break;
    530		case SLIP_ESC_ESC:
    531			byte = &esc;
    532			break;
    533		default:
    534			BT_ERR("Invalid esc byte 0x%02hhx", c);
    535			h5_reset_rx(h5);
    536			return;
    537		}
    538	}
    539
    540	skb_put_data(h5->rx_skb, byte, 1);
    541	h5->rx_pending--;
    542
    543	BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
    544}
    545
    546static void h5_reset_rx(struct h5 *h5)
    547{
    548	if (h5->rx_skb) {
    549		kfree_skb(h5->rx_skb);
    550		h5->rx_skb = NULL;
    551	}
    552
    553	h5->rx_func = h5_rx_delimiter;
    554	h5->rx_pending = 0;
    555	clear_bit(H5_RX_ESC, &h5->flags);
    556}
    557
    558static int h5_recv(struct hci_uart *hu, const void *data, int count)
    559{
    560	struct h5 *h5 = hu->priv;
    561	const unsigned char *ptr = data;
    562
    563	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
    564	       count);
    565
    566	while (count > 0) {
    567		int processed;
    568
    569		if (h5->rx_pending > 0) {
    570			if (*ptr == SLIP_DELIMITER) {
    571				bt_dev_err(hu->hdev, "Too short H5 packet");
    572				h5_reset_rx(h5);
    573				continue;
    574			}
    575
    576			h5_unslip_one_byte(h5, *ptr);
    577
    578			ptr++; count--;
    579			continue;
    580		}
    581
    582		processed = h5->rx_func(hu, *ptr);
    583		if (processed < 0)
    584			return processed;
    585
    586		ptr += processed;
    587		count -= processed;
    588	}
    589
    590	if (hu->serdev) {
    591		pm_runtime_get(&hu->serdev->dev);
    592		pm_runtime_mark_last_busy(&hu->serdev->dev);
    593		pm_runtime_put_autosuspend(&hu->serdev->dev);
    594	}
    595
    596	return 0;
    597}
    598
    599static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
    600{
    601	struct h5 *h5 = hu->priv;
    602
    603	if (skb->len > 0xfff) {
    604		bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
    605		kfree_skb(skb);
    606		return 0;
    607	}
    608
    609	if (h5->state != H5_ACTIVE) {
    610		bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
    611		kfree_skb(skb);
    612		return 0;
    613	}
    614
    615	switch (hci_skb_pkt_type(skb)) {
    616	case HCI_ACLDATA_PKT:
    617	case HCI_COMMAND_PKT:
    618		skb_queue_tail(&h5->rel, skb);
    619		break;
    620
    621	case HCI_SCODATA_PKT:
    622	case HCI_ISODATA_PKT:
    623		skb_queue_tail(&h5->unrel, skb);
    624		break;
    625
    626	default:
    627		bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
    628		kfree_skb(skb);
    629		break;
    630	}
    631
    632	if (hu->serdev) {
    633		pm_runtime_get_sync(&hu->serdev->dev);
    634		pm_runtime_mark_last_busy(&hu->serdev->dev);
    635		pm_runtime_put_autosuspend(&hu->serdev->dev);
    636	}
    637
    638	return 0;
    639}
    640
    641static void h5_slip_delim(struct sk_buff *skb)
    642{
    643	const char delim = SLIP_DELIMITER;
    644
    645	skb_put_data(skb, &delim, 1);
    646}
    647
    648static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
    649{
    650	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
    651	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
    652
    653	switch (c) {
    654	case SLIP_DELIMITER:
    655		skb_put_data(skb, &esc_delim, 2);
    656		break;
    657	case SLIP_ESC:
    658		skb_put_data(skb, &esc_esc, 2);
    659		break;
    660	default:
    661		skb_put_data(skb, &c, 1);
    662	}
    663}
    664
    665static bool valid_packet_type(u8 type)
    666{
    667	switch (type) {
    668	case HCI_ACLDATA_PKT:
    669	case HCI_COMMAND_PKT:
    670	case HCI_SCODATA_PKT:
    671	case HCI_ISODATA_PKT:
    672	case HCI_3WIRE_LINK_PKT:
    673	case HCI_3WIRE_ACK_PKT:
    674		return true;
    675	default:
    676		return false;
    677	}
    678}
    679
    680static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
    681				      const u8 *data, size_t len)
    682{
    683	struct h5 *h5 = hu->priv;
    684	struct sk_buff *nskb;
    685	u8 hdr[4];
    686	int i;
    687
    688	if (!valid_packet_type(pkt_type)) {
    689		bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
    690		return NULL;
    691	}
    692
    693	/*
    694	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
    695	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
    696	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
    697	 * delimiters at start and end).
    698	 */
    699	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
    700	if (!nskb)
    701		return NULL;
    702
    703	hci_skb_pkt_type(nskb) = pkt_type;
    704
    705	h5_slip_delim(nskb);
    706
    707	hdr[0] = h5->tx_ack << 3;
    708	clear_bit(H5_TX_ACK_REQ, &h5->flags);
    709
    710	/* Reliable packet? */
    711	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
    712		hdr[0] |= 1 << 7;
    713		hdr[0] |= h5->tx_seq;
    714		h5->tx_seq = (h5->tx_seq + 1) % 8;
    715	}
    716
    717	hdr[1] = pkt_type | ((len & 0x0f) << 4);
    718	hdr[2] = len >> 4;
    719	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
    720
    721	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
    722	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
    723	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
    724	       H5_HDR_LEN(hdr));
    725
    726	for (i = 0; i < 4; i++)
    727		h5_slip_one_byte(nskb, hdr[i]);
    728
    729	for (i = 0; i < len; i++)
    730		h5_slip_one_byte(nskb, data[i]);
    731
    732	h5_slip_delim(nskb);
    733
    734	return nskb;
    735}
    736
    737static struct sk_buff *h5_dequeue(struct hci_uart *hu)
    738{
    739	struct h5 *h5 = hu->priv;
    740	unsigned long flags;
    741	struct sk_buff *skb, *nskb;
    742
    743	if (h5->sleep != H5_AWAKE) {
    744		const unsigned char wakeup_req[] = { 0x05, 0xfa };
    745
    746		if (h5->sleep == H5_WAKING_UP)
    747			return NULL;
    748
    749		h5->sleep = H5_WAKING_UP;
    750		BT_DBG("Sending wakeup request");
    751
    752		mod_timer(&h5->timer, jiffies + HZ / 100);
    753		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
    754	}
    755
    756	skb = skb_dequeue(&h5->unrel);
    757	if (skb) {
    758		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
    759				      skb->data, skb->len);
    760		if (nskb) {
    761			kfree_skb(skb);
    762			return nskb;
    763		}
    764
    765		skb_queue_head(&h5->unrel, skb);
    766		bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
    767	}
    768
    769	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
    770
    771	if (h5->unack.qlen >= h5->tx_win)
    772		goto unlock;
    773
    774	skb = skb_dequeue(&h5->rel);
    775	if (skb) {
    776		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
    777				      skb->data, skb->len);
    778		if (nskb) {
    779			__skb_queue_tail(&h5->unack, skb);
    780			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
    781			spin_unlock_irqrestore(&h5->unack.lock, flags);
    782			return nskb;
    783		}
    784
    785		skb_queue_head(&h5->rel, skb);
    786		bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
    787	}
    788
    789unlock:
    790	spin_unlock_irqrestore(&h5->unack.lock, flags);
    791
    792	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
    793		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
    794
    795	return NULL;
    796}
    797
    798static int h5_flush(struct hci_uart *hu)
    799{
    800	BT_DBG("hu %p", hu);
    801	return 0;
    802}
    803
    804static const struct hci_uart_proto h5p = {
    805	.id		= HCI_UART_3WIRE,
    806	.name		= "Three-wire (H5)",
    807	.open		= h5_open,
    808	.close		= h5_close,
    809	.setup		= h5_setup,
    810	.recv		= h5_recv,
    811	.enqueue	= h5_enqueue,
    812	.dequeue	= h5_dequeue,
    813	.flush		= h5_flush,
    814};
    815
    816static int h5_serdev_probe(struct serdev_device *serdev)
    817{
    818	struct device *dev = &serdev->dev;
    819	struct h5 *h5;
    820	const struct h5_device_data *data;
    821
    822	h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
    823	if (!h5)
    824		return -ENOMEM;
    825
    826	h5->hu = &h5->serdev_hu;
    827	h5->serdev_hu.serdev = serdev;
    828	serdev_device_set_drvdata(serdev, h5);
    829
    830	if (has_acpi_companion(dev)) {
    831		const struct acpi_device_id *match;
    832
    833		match = acpi_match_device(dev->driver->acpi_match_table, dev);
    834		if (!match)
    835			return -ENODEV;
    836
    837		data = (const struct h5_device_data *)match->driver_data;
    838		h5->vnd = data->vnd;
    839		h5->id  = (char *)match->id;
    840
    841		if (h5->vnd->acpi_gpio_map)
    842			devm_acpi_dev_add_driver_gpios(dev,
    843						       h5->vnd->acpi_gpio_map);
    844	} else {
    845		data = of_device_get_match_data(dev);
    846		if (!data)
    847			return -ENODEV;
    848
    849		h5->vnd = data->vnd;
    850	}
    851
    852	if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
    853		set_bit(H5_WAKEUP_DISABLE, &h5->flags);
    854
    855	h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
    856	if (IS_ERR(h5->enable_gpio))
    857		return PTR_ERR(h5->enable_gpio);
    858
    859	h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
    860						       GPIOD_OUT_LOW);
    861	if (IS_ERR(h5->device_wake_gpio))
    862		return PTR_ERR(h5->device_wake_gpio);
    863
    864	return hci_uart_register_device(&h5->serdev_hu, &h5p);
    865}
    866
    867static void h5_serdev_remove(struct serdev_device *serdev)
    868{
    869	struct h5 *h5 = serdev_device_get_drvdata(serdev);
    870
    871	hci_uart_unregister_device(&h5->serdev_hu);
    872}
    873
    874static int __maybe_unused h5_serdev_suspend(struct device *dev)
    875{
    876	struct h5 *h5 = dev_get_drvdata(dev);
    877	int ret = 0;
    878
    879	if (h5->vnd && h5->vnd->suspend)
    880		ret = h5->vnd->suspend(h5);
    881
    882	return ret;
    883}
    884
    885static int __maybe_unused h5_serdev_resume(struct device *dev)
    886{
    887	struct h5 *h5 = dev_get_drvdata(dev);
    888	int ret = 0;
    889
    890	if (h5->vnd && h5->vnd->resume)
    891		ret = h5->vnd->resume(h5);
    892
    893	return ret;
    894}
    895
    896#ifdef CONFIG_BT_HCIUART_RTL
    897static int h5_btrtl_setup(struct h5 *h5)
    898{
    899	struct btrtl_device_info *btrtl_dev;
    900	struct sk_buff *skb;
    901	__le32 baudrate_data;
    902	u32 device_baudrate;
    903	unsigned int controller_baudrate;
    904	bool flow_control;
    905	int err;
    906
    907	btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
    908	if (IS_ERR(btrtl_dev))
    909		return PTR_ERR(btrtl_dev);
    910
    911	err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
    912				      &controller_baudrate, &device_baudrate,
    913				      &flow_control);
    914	if (err)
    915		goto out_free;
    916
    917	baudrate_data = cpu_to_le32(device_baudrate);
    918	skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
    919			     &baudrate_data, HCI_INIT_TIMEOUT);
    920	if (IS_ERR(skb)) {
    921		rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
    922		err = PTR_ERR(skb);
    923		goto out_free;
    924	} else {
    925		kfree_skb(skb);
    926	}
    927	/* Give the device some time to set up the new baudrate. */
    928	usleep_range(10000, 20000);
    929
    930	serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
    931	serdev_device_set_flow_control(h5->hu->serdev, flow_control);
    932
    933	if (flow_control)
    934		set_bit(H5_HW_FLOW_CONTROL, &h5->flags);
    935
    936	err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
    937	/* Give the device some time before the hci-core sends it a reset */
    938	usleep_range(10000, 20000);
    939
    940	btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
    941
    942out_free:
    943	btrtl_free(btrtl_dev);
    944
    945	return err;
    946}
    947
    948static void h5_btrtl_open(struct h5 *h5)
    949{
    950	/*
    951	 * Since h5_btrtl_resume() does a device_reprobe() the suspend handling
    952	 * done by the hci_suspend_notifier is not necessary; it actually causes
    953	 * delays and a bunch of errors to get logged, so disable it.
    954	 */
    955	if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
    956		set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags);
    957
    958	/* Devices always start with these fixed parameters */
    959	serdev_device_set_flow_control(h5->hu->serdev, false);
    960	serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
    961	serdev_device_set_baudrate(h5->hu->serdev, 115200);
    962
    963	if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
    964		pm_runtime_set_active(&h5->hu->serdev->dev);
    965		pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
    966		pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
    967						 SUSPEND_TIMEOUT_MS);
    968		pm_runtime_enable(&h5->hu->serdev->dev);
    969	}
    970
    971	/* The controller needs reset to startup */
    972	gpiod_set_value_cansleep(h5->enable_gpio, 0);
    973	gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
    974	msleep(100);
    975
    976	/* The controller needs up to 500ms to wakeup */
    977	gpiod_set_value_cansleep(h5->enable_gpio, 1);
    978	gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
    979	msleep(500);
    980}
    981
    982static void h5_btrtl_close(struct h5 *h5)
    983{
    984	if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
    985		pm_runtime_disable(&h5->hu->serdev->dev);
    986
    987	gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
    988	gpiod_set_value_cansleep(h5->enable_gpio, 0);
    989}
    990
    991/* Suspend/resume support. On many devices the RTL BT device loses power during
    992 * suspend/resume, causing it to lose its firmware and all state. So we simply
    993 * turn it off on suspend and reprobe on resume. This mirrors how RTL devices
    994 * are handled in the USB driver, where the BTUSB_WAKEUP_DISABLE is used which
    995 * also causes a reprobe on resume.
    996 */
    997static int h5_btrtl_suspend(struct h5 *h5)
    998{
    999	serdev_device_set_flow_control(h5->hu->serdev, false);
   1000	gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
   1001
   1002	if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
   1003		gpiod_set_value_cansleep(h5->enable_gpio, 0);
   1004
   1005	return 0;
   1006}
   1007
   1008struct h5_btrtl_reprobe {
   1009	struct device *dev;
   1010	struct work_struct work;
   1011};
   1012
   1013static void h5_btrtl_reprobe_worker(struct work_struct *work)
   1014{
   1015	struct h5_btrtl_reprobe *reprobe =
   1016		container_of(work, struct h5_btrtl_reprobe, work);
   1017	int ret;
   1018
   1019	ret = device_reprobe(reprobe->dev);
   1020	if (ret && ret != -EPROBE_DEFER)
   1021		dev_err(reprobe->dev, "Reprobe error %d\n", ret);
   1022
   1023	put_device(reprobe->dev);
   1024	kfree(reprobe);
   1025	module_put(THIS_MODULE);
   1026}
   1027
   1028static int h5_btrtl_resume(struct h5 *h5)
   1029{
   1030	if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
   1031		struct h5_btrtl_reprobe *reprobe;
   1032
   1033		reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
   1034		if (!reprobe)
   1035			return -ENOMEM;
   1036
   1037		__module_get(THIS_MODULE);
   1038
   1039		INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
   1040		reprobe->dev = get_device(&h5->hu->serdev->dev);
   1041		queue_work(system_long_wq, &reprobe->work);
   1042	} else {
   1043		gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
   1044
   1045		if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags))
   1046			serdev_device_set_flow_control(h5->hu->serdev, true);
   1047	}
   1048
   1049	return 0;
   1050}
   1051
   1052static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
   1053static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
   1054static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
   1055static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
   1056	{ "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
   1057	{ "enable-gpios", &btrtl_enable_gpios, 1 },
   1058	{ "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
   1059	{},
   1060};
   1061
   1062static struct h5_vnd rtl_vnd = {
   1063	.setup		= h5_btrtl_setup,
   1064	.open		= h5_btrtl_open,
   1065	.close		= h5_btrtl_close,
   1066	.suspend	= h5_btrtl_suspend,
   1067	.resume		= h5_btrtl_resume,
   1068	.acpi_gpio_map	= acpi_btrtl_gpios,
   1069};
   1070
   1071static const struct h5_device_data h5_data_rtl8822cs = {
   1072	.vnd = &rtl_vnd,
   1073};
   1074
   1075static const struct h5_device_data h5_data_rtl8723bs = {
   1076	.driver_info = H5_INFO_WAKEUP_DISABLE,
   1077	.vnd = &rtl_vnd,
   1078};
   1079#endif
   1080
   1081#ifdef CONFIG_ACPI
   1082static const struct acpi_device_id h5_acpi_match[] = {
   1083#ifdef CONFIG_BT_HCIUART_RTL
   1084	{ "OBDA0623", (kernel_ulong_t)&h5_data_rtl8723bs },
   1085	{ "OBDA8723", (kernel_ulong_t)&h5_data_rtl8723bs },
   1086#endif
   1087	{ },
   1088};
   1089MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
   1090#endif
   1091
   1092static const struct dev_pm_ops h5_serdev_pm_ops = {
   1093	SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
   1094	SET_RUNTIME_PM_OPS(h5_serdev_suspend, h5_serdev_resume, NULL)
   1095};
   1096
   1097static const struct of_device_id rtl_bluetooth_of_match[] = {
   1098#ifdef CONFIG_BT_HCIUART_RTL
   1099	{ .compatible = "realtek,rtl8822cs-bt",
   1100	  .data = (const void *)&h5_data_rtl8822cs },
   1101	{ .compatible = "realtek,rtl8723bs-bt",
   1102	  .data = (const void *)&h5_data_rtl8723bs },
   1103	{ .compatible = "realtek,rtl8723ds-bt",
   1104	  .data = (const void *)&h5_data_rtl8723bs },
   1105#endif
   1106	{ },
   1107};
   1108MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
   1109
   1110static struct serdev_device_driver h5_serdev_driver = {
   1111	.probe = h5_serdev_probe,
   1112	.remove = h5_serdev_remove,
   1113	.driver = {
   1114		.name = "hci_uart_h5",
   1115		.acpi_match_table = ACPI_PTR(h5_acpi_match),
   1116		.pm = &h5_serdev_pm_ops,
   1117		.of_match_table = rtl_bluetooth_of_match,
   1118	},
   1119};
   1120
   1121int __init h5_init(void)
   1122{
   1123	serdev_device_driver_register(&h5_serdev_driver);
   1124	return hci_uart_register_proto(&h5p);
   1125}
   1126
   1127int __exit h5_deinit(void)
   1128{
   1129	serdev_device_driver_unregister(&h5_serdev_driver);
   1130	return hci_uart_unregister_proto(&h5p);
   1131}