cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ec.c (60756B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  ec.c - ACPI Embedded Controller Driver (v3)
      4 *
      5 *  Copyright (C) 2001-2015 Intel Corporation
      6 *    Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
      7 *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
      8 *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
      9 *            2004       Luming Yu <luming.yu@intel.com>
     10 *            2001, 2002 Andy Grover <andrew.grover@intel.com>
     11 *            2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
     12 *  Copyright (C) 2008      Alexey Starikovskiy <astarikovskiy@suse.de>
     13 */
     14
     15/* Uncomment next line to get verbose printout */
     16/* #define DEBUG */
     17#define pr_fmt(fmt) "ACPI: EC: " fmt
     18
     19#include <linux/kernel.h>
     20#include <linux/module.h>
     21#include <linux/init.h>
     22#include <linux/types.h>
     23#include <linux/delay.h>
     24#include <linux/interrupt.h>
     25#include <linux/list.h>
     26#include <linux/spinlock.h>
     27#include <linux/slab.h>
     28#include <linux/suspend.h>
     29#include <linux/acpi.h>
     30#include <linux/dmi.h>
     31#include <asm/io.h>
     32
     33#include "internal.h"
     34
     35#define ACPI_EC_CLASS			"embedded_controller"
     36#define ACPI_EC_DEVICE_NAME		"Embedded Controller"
     37
     38/* EC status register */
     39#define ACPI_EC_FLAG_OBF	0x01	/* Output buffer full */
     40#define ACPI_EC_FLAG_IBF	0x02	/* Input buffer full */
     41#define ACPI_EC_FLAG_CMD	0x08	/* Input buffer contains a command */
     42#define ACPI_EC_FLAG_BURST	0x10	/* burst mode */
     43#define ACPI_EC_FLAG_SCI	0x20	/* EC-SCI occurred */
     44
     45/*
     46 * The SCI_EVT clearing timing is not defined by the ACPI specification.
     47 * This leads to lots of practical timing issues for the host EC driver.
     48 * The following variations are defined (from the target EC firmware's
     49 * perspective):
     50 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
     51 *         target can clear SCI_EVT at any time so long as the host can see
     52 *         the indication by reading the status register (EC_SC). So the
     53 *         host should re-check SCI_EVT after the first time the SCI_EVT
     54 *         indication is seen, which is the same time the query request
     55 *         (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
     56 *         at any later time could indicate another event. Normally such
     57 *         kind of EC firmware has implemented an event queue and will
     58 *         return 0x00 to indicate "no outstanding event".
     59 * QUERY: After seeing the query request (QR_EC) written to the command
     60 *        register (EC_CMD) by the host and having prepared the responding
     61 *        event value in the data register (EC_DATA), the target can safely
     62 *        clear SCI_EVT because the target can confirm that the current
     63 *        event is being handled by the host. The host then should check
     64 *        SCI_EVT right after reading the event response from the data
     65 *        register (EC_DATA).
     66 * EVENT: After seeing the event response read from the data register
     67 *        (EC_DATA) by the host, the target can clear SCI_EVT. As the
     68 *        target requires time to notice the change in the data register
     69 *        (EC_DATA), the host may be required to wait additional guarding
     70 *        time before checking the SCI_EVT again. Such guarding may not be
     71 *        necessary if the host is notified via another IRQ.
     72 */
     73#define ACPI_EC_EVT_TIMING_STATUS	0x00
     74#define ACPI_EC_EVT_TIMING_QUERY	0x01
     75#define ACPI_EC_EVT_TIMING_EVENT	0x02
     76
     77/* EC commands */
     78enum ec_command {
     79	ACPI_EC_COMMAND_READ = 0x80,
     80	ACPI_EC_COMMAND_WRITE = 0x81,
     81	ACPI_EC_BURST_ENABLE = 0x82,
     82	ACPI_EC_BURST_DISABLE = 0x83,
     83	ACPI_EC_COMMAND_QUERY = 0x84,
     84};
     85
     86#define ACPI_EC_DELAY		500	/* Wait 500ms max. during EC ops */
     87#define ACPI_EC_UDELAY_GLK	1000	/* Wait 1ms max. to get global lock */
     88#define ACPI_EC_UDELAY_POLL	550	/* Wait 1ms for EC transaction polling */
     89#define ACPI_EC_CLEAR_MAX	100	/* Maximum number of events to query
     90					 * when trying to clear the EC */
     91#define ACPI_EC_MAX_QUERIES	16	/* Maximum number of parallel queries */
     92
     93enum {
     94	EC_FLAGS_QUERY_ENABLED,		/* Query is enabled */
     95	EC_FLAGS_EVENT_HANDLER_INSTALLED,	/* Event handler installed */
     96	EC_FLAGS_EC_HANDLER_INSTALLED,	/* OpReg handler installed */
     97	EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
     98	EC_FLAGS_STARTED,		/* Driver is started */
     99	EC_FLAGS_STOPPED,		/* Driver is stopped */
    100	EC_FLAGS_EVENTS_MASKED,		/* Events masked */
    101};
    102
    103#define ACPI_EC_COMMAND_POLL		0x01 /* Available for command byte */
    104#define ACPI_EC_COMMAND_COMPLETE	0x02 /* Completed last byte */
    105
    106/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
    107static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
    108module_param(ec_delay, uint, 0644);
    109MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
    110
    111static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
    112module_param(ec_max_queries, uint, 0644);
    113MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
    114
    115static bool ec_busy_polling __read_mostly;
    116module_param(ec_busy_polling, bool, 0644);
    117MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
    118
    119static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
    120module_param(ec_polling_guard, uint, 0644);
    121MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
    122
    123static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
    124
    125/*
    126 * If the number of false interrupts per one transaction exceeds
    127 * this threshold, will think there is a GPE storm happened and
    128 * will disable the GPE for normal transaction.
    129 */
    130static unsigned int ec_storm_threshold  __read_mostly = 8;
    131module_param(ec_storm_threshold, uint, 0644);
    132MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
    133
    134static bool ec_freeze_events __read_mostly;
    135module_param(ec_freeze_events, bool, 0644);
    136MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
    137
    138static bool ec_no_wakeup __read_mostly;
    139module_param(ec_no_wakeup, bool, 0644);
    140MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
    141
    142struct acpi_ec_query_handler {
    143	struct list_head node;
    144	acpi_ec_query_func func;
    145	acpi_handle handle;
    146	void *data;
    147	u8 query_bit;
    148	struct kref kref;
    149};
    150
    151struct transaction {
    152	const u8 *wdata;
    153	u8 *rdata;
    154	unsigned short irq_count;
    155	u8 command;
    156	u8 wi;
    157	u8 ri;
    158	u8 wlen;
    159	u8 rlen;
    160	u8 flags;
    161};
    162
    163struct acpi_ec_query {
    164	struct transaction transaction;
    165	struct work_struct work;
    166	struct acpi_ec_query_handler *handler;
    167	struct acpi_ec *ec;
    168};
    169
    170static int acpi_ec_submit_query(struct acpi_ec *ec);
    171static void advance_transaction(struct acpi_ec *ec, bool interrupt);
    172static void acpi_ec_event_handler(struct work_struct *work);
    173
    174struct acpi_ec *first_ec;
    175EXPORT_SYMBOL(first_ec);
    176
    177static struct acpi_ec *boot_ec;
    178static bool boot_ec_is_ecdt;
    179static struct workqueue_struct *ec_wq;
    180static struct workqueue_struct *ec_query_wq;
    181
    182static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
    183static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
    184static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
    185static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
    186
    187/* --------------------------------------------------------------------------
    188 *                           Logging/Debugging
    189 * -------------------------------------------------------------------------- */
    190
    191/*
    192 * Splitters used by the developers to track the boundary of the EC
    193 * handling processes.
    194 */
    195#ifdef DEBUG
    196#define EC_DBG_SEP	" "
    197#define EC_DBG_DRV	"+++++"
    198#define EC_DBG_STM	"====="
    199#define EC_DBG_REQ	"*****"
    200#define EC_DBG_EVT	"#####"
    201#else
    202#define EC_DBG_SEP	""
    203#define EC_DBG_DRV
    204#define EC_DBG_STM
    205#define EC_DBG_REQ
    206#define EC_DBG_EVT
    207#endif
    208
    209#define ec_log_raw(fmt, ...) \
    210	pr_info(fmt "\n", ##__VA_ARGS__)
    211#define ec_dbg_raw(fmt, ...) \
    212	pr_debug(fmt "\n", ##__VA_ARGS__)
    213#define ec_log(filter, fmt, ...) \
    214	ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
    215#define ec_dbg(filter, fmt, ...) \
    216	ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
    217
    218#define ec_log_drv(fmt, ...) \
    219	ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
    220#define ec_dbg_drv(fmt, ...) \
    221	ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
    222#define ec_dbg_stm(fmt, ...) \
    223	ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
    224#define ec_dbg_req(fmt, ...) \
    225	ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
    226#define ec_dbg_evt(fmt, ...) \
    227	ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
    228#define ec_dbg_ref(ec, fmt, ...) \
    229	ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
    230
    231/* --------------------------------------------------------------------------
    232 *                           Device Flags
    233 * -------------------------------------------------------------------------- */
    234
    235static bool acpi_ec_started(struct acpi_ec *ec)
    236{
    237	return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
    238	       !test_bit(EC_FLAGS_STOPPED, &ec->flags);
    239}
    240
    241static bool acpi_ec_event_enabled(struct acpi_ec *ec)
    242{
    243	/*
    244	 * There is an OSPM early stage logic. During the early stages
    245	 * (boot/resume), OSPMs shouldn't enable the event handling, only
    246	 * the EC transactions are allowed to be performed.
    247	 */
    248	if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
    249		return false;
    250	/*
    251	 * However, disabling the event handling is experimental for late
    252	 * stage (suspend), and is controlled by the boot parameter of
    253	 * "ec_freeze_events":
    254	 * 1. true:  The EC event handling is disabled before entering
    255	 *           the noirq stage.
    256	 * 2. false: The EC event handling is automatically disabled as
    257	 *           soon as the EC driver is stopped.
    258	 */
    259	if (ec_freeze_events)
    260		return acpi_ec_started(ec);
    261	else
    262		return test_bit(EC_FLAGS_STARTED, &ec->flags);
    263}
    264
    265static bool acpi_ec_flushed(struct acpi_ec *ec)
    266{
    267	return ec->reference_count == 1;
    268}
    269
    270/* --------------------------------------------------------------------------
    271 *                           EC Registers
    272 * -------------------------------------------------------------------------- */
    273
    274static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
    275{
    276	u8 x = inb(ec->command_addr);
    277
    278	ec_dbg_raw("EC_SC(R) = 0x%2.2x "
    279		   "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
    280		   x,
    281		   !!(x & ACPI_EC_FLAG_SCI),
    282		   !!(x & ACPI_EC_FLAG_BURST),
    283		   !!(x & ACPI_EC_FLAG_CMD),
    284		   !!(x & ACPI_EC_FLAG_IBF),
    285		   !!(x & ACPI_EC_FLAG_OBF));
    286	return x;
    287}
    288
    289static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
    290{
    291	u8 x = inb(ec->data_addr);
    292
    293	ec->timestamp = jiffies;
    294	ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
    295	return x;
    296}
    297
    298static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
    299{
    300	ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
    301	outb(command, ec->command_addr);
    302	ec->timestamp = jiffies;
    303}
    304
    305static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
    306{
    307	ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
    308	outb(data, ec->data_addr);
    309	ec->timestamp = jiffies;
    310}
    311
    312#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
    313static const char *acpi_ec_cmd_string(u8 cmd)
    314{
    315	switch (cmd) {
    316	case 0x80:
    317		return "RD_EC";
    318	case 0x81:
    319		return "WR_EC";
    320	case 0x82:
    321		return "BE_EC";
    322	case 0x83:
    323		return "BD_EC";
    324	case 0x84:
    325		return "QR_EC";
    326	}
    327	return "UNKNOWN";
    328}
    329#else
    330#define acpi_ec_cmd_string(cmd)		"UNDEF"
    331#endif
    332
    333/* --------------------------------------------------------------------------
    334 *                           GPE Registers
    335 * -------------------------------------------------------------------------- */
    336
    337static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
    338{
    339	acpi_event_status gpe_status = 0;
    340
    341	(void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
    342	return !!(gpe_status & ACPI_EVENT_FLAG_STATUS_SET);
    343}
    344
    345static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
    346{
    347	if (open)
    348		acpi_enable_gpe(NULL, ec->gpe);
    349	else {
    350		BUG_ON(ec->reference_count < 1);
    351		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
    352	}
    353	if (acpi_ec_gpe_status_set(ec)) {
    354		/*
    355		 * On some platforms, EN=1 writes cannot trigger GPE. So
    356		 * software need to manually trigger a pseudo GPE event on
    357		 * EN=1 writes.
    358		 */
    359		ec_dbg_raw("Polling quirk");
    360		advance_transaction(ec, false);
    361	}
    362}
    363
    364static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
    365{
    366	if (close)
    367		acpi_disable_gpe(NULL, ec->gpe);
    368	else {
    369		BUG_ON(ec->reference_count < 1);
    370		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
    371	}
    372}
    373
    374/* --------------------------------------------------------------------------
    375 *                           Transaction Management
    376 * -------------------------------------------------------------------------- */
    377
    378static void acpi_ec_submit_request(struct acpi_ec *ec)
    379{
    380	ec->reference_count++;
    381	if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
    382	    ec->gpe >= 0 && ec->reference_count == 1)
    383		acpi_ec_enable_gpe(ec, true);
    384}
    385
    386static void acpi_ec_complete_request(struct acpi_ec *ec)
    387{
    388	bool flushed = false;
    389
    390	ec->reference_count--;
    391	if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
    392	    ec->gpe >= 0 && ec->reference_count == 0)
    393		acpi_ec_disable_gpe(ec, true);
    394	flushed = acpi_ec_flushed(ec);
    395	if (flushed)
    396		wake_up(&ec->wait);
    397}
    398
    399static void acpi_ec_mask_events(struct acpi_ec *ec)
    400{
    401	if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
    402		if (ec->gpe >= 0)
    403			acpi_ec_disable_gpe(ec, false);
    404		else
    405			disable_irq_nosync(ec->irq);
    406
    407		ec_dbg_drv("Polling enabled");
    408		set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
    409	}
    410}
    411
    412static void acpi_ec_unmask_events(struct acpi_ec *ec)
    413{
    414	if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
    415		clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
    416		if (ec->gpe >= 0)
    417			acpi_ec_enable_gpe(ec, false);
    418		else
    419			enable_irq(ec->irq);
    420
    421		ec_dbg_drv("Polling disabled");
    422	}
    423}
    424
    425/*
    426 * acpi_ec_submit_flushable_request() - Increase the reference count unless
    427 *                                      the flush operation is not in
    428 *                                      progress
    429 * @ec: the EC device
    430 *
    431 * This function must be used before taking a new action that should hold
    432 * the reference count.  If this function returns false, then the action
    433 * must be discarded or it will prevent the flush operation from being
    434 * completed.
    435 */
    436static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
    437{
    438	if (!acpi_ec_started(ec))
    439		return false;
    440	acpi_ec_submit_request(ec);
    441	return true;
    442}
    443
    444static void acpi_ec_submit_event(struct acpi_ec *ec)
    445{
    446	/*
    447	 * It is safe to mask the events here, because acpi_ec_close_event()
    448	 * will run at least once after this.
    449	 */
    450	acpi_ec_mask_events(ec);
    451	if (!acpi_ec_event_enabled(ec))
    452		return;
    453
    454	if (ec->event_state != EC_EVENT_READY)
    455		return;
    456
    457	ec_dbg_evt("Command(%s) submitted/blocked",
    458		   acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
    459
    460	ec->event_state = EC_EVENT_IN_PROGRESS;
    461	/*
    462	 * If events_to_process is greater than 0 at this point, the while ()
    463	 * loop in acpi_ec_event_handler() is still running and incrementing
    464	 * events_to_process will cause it to invoke acpi_ec_submit_query() once
    465	 * more, so it is not necessary to queue up the event work to start the
    466	 * same loop again.
    467	 */
    468	if (ec->events_to_process++ > 0)
    469		return;
    470
    471	ec->events_in_progress++;
    472	queue_work(ec_wq, &ec->work);
    473}
    474
    475static void acpi_ec_complete_event(struct acpi_ec *ec)
    476{
    477	if (ec->event_state == EC_EVENT_IN_PROGRESS)
    478		ec->event_state = EC_EVENT_COMPLETE;
    479}
    480
    481static void acpi_ec_close_event(struct acpi_ec *ec)
    482{
    483	if (ec->event_state != EC_EVENT_READY)
    484		ec_dbg_evt("Command(%s) unblocked",
    485			   acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
    486
    487	ec->event_state = EC_EVENT_READY;
    488	acpi_ec_unmask_events(ec);
    489}
    490
    491static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
    492{
    493	if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
    494		ec_log_drv("event unblocked");
    495	/*
    496	 * Unconditionally invoke this once after enabling the event
    497	 * handling mechanism to detect the pending events.
    498	 */
    499	advance_transaction(ec, false);
    500}
    501
    502static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
    503{
    504	if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
    505		ec_log_drv("event blocked");
    506}
    507
    508/*
    509 * Process _Q events that might have accumulated in the EC.
    510 * Run with locked ec mutex.
    511 */
    512static void acpi_ec_clear(struct acpi_ec *ec)
    513{
    514	int i;
    515
    516	for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
    517		if (acpi_ec_submit_query(ec))
    518			break;
    519	}
    520	if (unlikely(i == ACPI_EC_CLEAR_MAX))
    521		pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
    522	else
    523		pr_info("%d stale EC events cleared\n", i);
    524}
    525
    526static void acpi_ec_enable_event(struct acpi_ec *ec)
    527{
    528	unsigned long flags;
    529
    530	spin_lock_irqsave(&ec->lock, flags);
    531	if (acpi_ec_started(ec))
    532		__acpi_ec_enable_event(ec);
    533	spin_unlock_irqrestore(&ec->lock, flags);
    534
    535	/* Drain additional events if hardware requires that */
    536	if (EC_FLAGS_CLEAR_ON_RESUME)
    537		acpi_ec_clear(ec);
    538}
    539
    540#ifdef CONFIG_PM_SLEEP
    541static void __acpi_ec_flush_work(void)
    542{
    543	flush_workqueue(ec_wq); /* flush ec->work */
    544	flush_workqueue(ec_query_wq); /* flush queries */
    545}
    546
    547static void acpi_ec_disable_event(struct acpi_ec *ec)
    548{
    549	unsigned long flags;
    550
    551	spin_lock_irqsave(&ec->lock, flags);
    552	__acpi_ec_disable_event(ec);
    553	spin_unlock_irqrestore(&ec->lock, flags);
    554
    555	/*
    556	 * When ec_freeze_events is true, we need to flush events in
    557	 * the proper position before entering the noirq stage.
    558	 */
    559	__acpi_ec_flush_work();
    560}
    561
    562void acpi_ec_flush_work(void)
    563{
    564	/* Without ec_wq there is nothing to flush. */
    565	if (!ec_wq)
    566		return;
    567
    568	__acpi_ec_flush_work();
    569}
    570#endif /* CONFIG_PM_SLEEP */
    571
    572static bool acpi_ec_guard_event(struct acpi_ec *ec)
    573{
    574	unsigned long flags;
    575	bool guarded;
    576
    577	spin_lock_irqsave(&ec->lock, flags);
    578	/*
    579	 * If firmware SCI_EVT clearing timing is "event", we actually
    580	 * don't know when the SCI_EVT will be cleared by firmware after
    581	 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
    582	 * acceptable period.
    583	 *
    584	 * The guarding period is applicable if the event state is not
    585	 * EC_EVENT_READY, but otherwise if the current transaction is of the
    586	 * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already
    587	 * and it should not be applied to let the transaction transition into
    588	 * the ACPI_EC_COMMAND_POLL state immediately.
    589	 */
    590	guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
    591		ec->event_state != EC_EVENT_READY &&
    592		(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
    593	spin_unlock_irqrestore(&ec->lock, flags);
    594	return guarded;
    595}
    596
    597static int ec_transaction_polled(struct acpi_ec *ec)
    598{
    599	unsigned long flags;
    600	int ret = 0;
    601
    602	spin_lock_irqsave(&ec->lock, flags);
    603	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
    604		ret = 1;
    605	spin_unlock_irqrestore(&ec->lock, flags);
    606	return ret;
    607}
    608
    609static int ec_transaction_completed(struct acpi_ec *ec)
    610{
    611	unsigned long flags;
    612	int ret = 0;
    613
    614	spin_lock_irqsave(&ec->lock, flags);
    615	if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
    616		ret = 1;
    617	spin_unlock_irqrestore(&ec->lock, flags);
    618	return ret;
    619}
    620
    621static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
    622{
    623	ec->curr->flags |= flag;
    624
    625	if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
    626		return;
    627
    628	switch (ec_event_clearing) {
    629	case ACPI_EC_EVT_TIMING_STATUS:
    630		if (flag == ACPI_EC_COMMAND_POLL)
    631			acpi_ec_close_event(ec);
    632
    633		return;
    634
    635	case ACPI_EC_EVT_TIMING_QUERY:
    636		if (flag == ACPI_EC_COMMAND_COMPLETE)
    637			acpi_ec_close_event(ec);
    638
    639		return;
    640
    641	case ACPI_EC_EVT_TIMING_EVENT:
    642		if (flag == ACPI_EC_COMMAND_COMPLETE)
    643			acpi_ec_complete_event(ec);
    644	}
    645}
    646
    647static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
    648{
    649	if (t->irq_count < ec_storm_threshold)
    650		++t->irq_count;
    651
    652	/* Trigger if the threshold is 0 too. */
    653	if (t->irq_count == ec_storm_threshold)
    654		acpi_ec_mask_events(ec);
    655}
    656
    657static void advance_transaction(struct acpi_ec *ec, bool interrupt)
    658{
    659	struct transaction *t = ec->curr;
    660	bool wakeup = false;
    661	u8 status;
    662
    663	ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());
    664
    665	/*
    666	 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
    667	 * changes to always trigger a GPE interrupt.
    668	 *
    669	 * GPE STS is a W1C register, which means:
    670	 *
    671	 * 1. Software can clear it without worrying about clearing the other
    672	 *    GPEs' STS bits when the hardware sets them in parallel.
    673	 *
    674	 * 2. As long as software can ensure only clearing it when it is set,
    675	 *    hardware won't set it in parallel.
    676	 */
    677	if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
    678		acpi_clear_gpe(NULL, ec->gpe);
    679
    680	status = acpi_ec_read_status(ec);
    681
    682	/*
    683	 * Another IRQ or a guarded polling mode advancement is detected,
    684	 * the next QR_EC submission is then allowed.
    685	 */
    686	if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
    687		if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
    688		    ec->event_state == EC_EVENT_COMPLETE)
    689			acpi_ec_close_event(ec);
    690
    691		if (!t)
    692			goto out;
    693	}
    694
    695	if (t->flags & ACPI_EC_COMMAND_POLL) {
    696		if (t->wlen > t->wi) {
    697			if (!(status & ACPI_EC_FLAG_IBF))
    698				acpi_ec_write_data(ec, t->wdata[t->wi++]);
    699			else if (interrupt && !(status & ACPI_EC_FLAG_SCI))
    700				acpi_ec_spurious_interrupt(ec, t);
    701		} else if (t->rlen > t->ri) {
    702			if (status & ACPI_EC_FLAG_OBF) {
    703				t->rdata[t->ri++] = acpi_ec_read_data(ec);
    704				if (t->rlen == t->ri) {
    705					ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
    706					wakeup = true;
    707					if (t->command == ACPI_EC_COMMAND_QUERY)
    708						ec_dbg_evt("Command(%s) completed by hardware",
    709							   acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
    710				}
    711			} else if (interrupt && !(status & ACPI_EC_FLAG_SCI)) {
    712				acpi_ec_spurious_interrupt(ec, t);
    713			}
    714		} else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) {
    715			ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
    716			wakeup = true;
    717		}
    718	} else if (!(status & ACPI_EC_FLAG_IBF)) {
    719		acpi_ec_write_cmd(ec, t->command);
    720		ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
    721	}
    722
    723out:
    724	if (status & ACPI_EC_FLAG_SCI)
    725		acpi_ec_submit_event(ec);
    726
    727	if (wakeup && interrupt)
    728		wake_up(&ec->wait);
    729}
    730
    731static void start_transaction(struct acpi_ec *ec)
    732{
    733	ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
    734	ec->curr->flags = 0;
    735}
    736
    737static int ec_guard(struct acpi_ec *ec)
    738{
    739	unsigned long guard = usecs_to_jiffies(ec->polling_guard);
    740	unsigned long timeout = ec->timestamp + guard;
    741
    742	/* Ensure guarding period before polling EC status */
    743	do {
    744		if (ec->busy_polling) {
    745			/* Perform busy polling */
    746			if (ec_transaction_completed(ec))
    747				return 0;
    748			udelay(jiffies_to_usecs(guard));
    749		} else {
    750			/*
    751			 * Perform wait polling
    752			 * 1. Wait the transaction to be completed by the
    753			 *    GPE handler after the transaction enters
    754			 *    ACPI_EC_COMMAND_POLL state.
    755			 * 2. A special guarding logic is also required
    756			 *    for event clearing mode "event" before the
    757			 *    transaction enters ACPI_EC_COMMAND_POLL
    758			 *    state.
    759			 */
    760			if (!ec_transaction_polled(ec) &&
    761			    !acpi_ec_guard_event(ec))
    762				break;
    763			if (wait_event_timeout(ec->wait,
    764					       ec_transaction_completed(ec),
    765					       guard))
    766				return 0;
    767		}
    768	} while (time_before(jiffies, timeout));
    769	return -ETIME;
    770}
    771
    772static int ec_poll(struct acpi_ec *ec)
    773{
    774	unsigned long flags;
    775	int repeat = 5; /* number of command restarts */
    776
    777	while (repeat--) {
    778		unsigned long delay = jiffies +
    779			msecs_to_jiffies(ec_delay);
    780		do {
    781			if (!ec_guard(ec))
    782				return 0;
    783			spin_lock_irqsave(&ec->lock, flags);
    784			advance_transaction(ec, false);
    785			spin_unlock_irqrestore(&ec->lock, flags);
    786		} while (time_before(jiffies, delay));
    787		pr_debug("controller reset, restart transaction\n");
    788		spin_lock_irqsave(&ec->lock, flags);
    789		start_transaction(ec);
    790		spin_unlock_irqrestore(&ec->lock, flags);
    791	}
    792	return -ETIME;
    793}
    794
    795static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
    796					struct transaction *t)
    797{
    798	unsigned long tmp;
    799	int ret = 0;
    800
    801	/* start transaction */
    802	spin_lock_irqsave(&ec->lock, tmp);
    803	/* Enable GPE for command processing (IBF=0/OBF=1) */
    804	if (!acpi_ec_submit_flushable_request(ec)) {
    805		ret = -EINVAL;
    806		goto unlock;
    807	}
    808	ec_dbg_ref(ec, "Increase command");
    809	/* following two actions should be kept atomic */
    810	ec->curr = t;
    811	ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
    812	start_transaction(ec);
    813	spin_unlock_irqrestore(&ec->lock, tmp);
    814
    815	ret = ec_poll(ec);
    816
    817	spin_lock_irqsave(&ec->lock, tmp);
    818	if (t->irq_count == ec_storm_threshold)
    819		acpi_ec_unmask_events(ec);
    820	ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
    821	ec->curr = NULL;
    822	/* Disable GPE for command processing (IBF=0/OBF=1) */
    823	acpi_ec_complete_request(ec);
    824	ec_dbg_ref(ec, "Decrease command");
    825unlock:
    826	spin_unlock_irqrestore(&ec->lock, tmp);
    827	return ret;
    828}
    829
    830static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
    831{
    832	int status;
    833	u32 glk;
    834
    835	if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
    836		return -EINVAL;
    837	if (t->rdata)
    838		memset(t->rdata, 0, t->rlen);
    839
    840	mutex_lock(&ec->mutex);
    841	if (ec->global_lock) {
    842		status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
    843		if (ACPI_FAILURE(status)) {
    844			status = -ENODEV;
    845			goto unlock;
    846		}
    847	}
    848
    849	status = acpi_ec_transaction_unlocked(ec, t);
    850
    851	if (ec->global_lock)
    852		acpi_release_global_lock(glk);
    853unlock:
    854	mutex_unlock(&ec->mutex);
    855	return status;
    856}
    857
    858static int acpi_ec_burst_enable(struct acpi_ec *ec)
    859{
    860	u8 d;
    861	struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
    862				.wdata = NULL, .rdata = &d,
    863				.wlen = 0, .rlen = 1};
    864
    865	return acpi_ec_transaction(ec, &t);
    866}
    867
    868static int acpi_ec_burst_disable(struct acpi_ec *ec)
    869{
    870	struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
    871				.wdata = NULL, .rdata = NULL,
    872				.wlen = 0, .rlen = 0};
    873
    874	return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
    875				acpi_ec_transaction(ec, &t) : 0;
    876}
    877
    878static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
    879{
    880	int result;
    881	u8 d;
    882	struct transaction t = {.command = ACPI_EC_COMMAND_READ,
    883				.wdata = &address, .rdata = &d,
    884				.wlen = 1, .rlen = 1};
    885
    886	result = acpi_ec_transaction(ec, &t);
    887	*data = d;
    888	return result;
    889}
    890
    891static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
    892{
    893	u8 wdata[2] = { address, data };
    894	struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
    895				.wdata = wdata, .rdata = NULL,
    896				.wlen = 2, .rlen = 0};
    897
    898	return acpi_ec_transaction(ec, &t);
    899}
    900
    901int ec_read(u8 addr, u8 *val)
    902{
    903	int err;
    904	u8 temp_data;
    905
    906	if (!first_ec)
    907		return -ENODEV;
    908
    909	err = acpi_ec_read(first_ec, addr, &temp_data);
    910
    911	if (!err) {
    912		*val = temp_data;
    913		return 0;
    914	}
    915	return err;
    916}
    917EXPORT_SYMBOL(ec_read);
    918
    919int ec_write(u8 addr, u8 val)
    920{
    921	int err;
    922
    923	if (!first_ec)
    924		return -ENODEV;
    925
    926	err = acpi_ec_write(first_ec, addr, val);
    927
    928	return err;
    929}
    930EXPORT_SYMBOL(ec_write);
    931
    932int ec_transaction(u8 command,
    933		   const u8 *wdata, unsigned wdata_len,
    934		   u8 *rdata, unsigned rdata_len)
    935{
    936	struct transaction t = {.command = command,
    937				.wdata = wdata, .rdata = rdata,
    938				.wlen = wdata_len, .rlen = rdata_len};
    939
    940	if (!first_ec)
    941		return -ENODEV;
    942
    943	return acpi_ec_transaction(first_ec, &t);
    944}
    945EXPORT_SYMBOL(ec_transaction);
    946
    947/* Get the handle to the EC device */
    948acpi_handle ec_get_handle(void)
    949{
    950	if (!first_ec)
    951		return NULL;
    952	return first_ec->handle;
    953}
    954EXPORT_SYMBOL(ec_get_handle);
    955
    956static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
    957{
    958	unsigned long flags;
    959
    960	spin_lock_irqsave(&ec->lock, flags);
    961	if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
    962		ec_dbg_drv("Starting EC");
    963		/* Enable GPE for event processing (SCI_EVT=1) */
    964		if (!resuming) {
    965			acpi_ec_submit_request(ec);
    966			ec_dbg_ref(ec, "Increase driver");
    967		}
    968		ec_log_drv("EC started");
    969	}
    970	spin_unlock_irqrestore(&ec->lock, flags);
    971}
    972
    973static bool acpi_ec_stopped(struct acpi_ec *ec)
    974{
    975	unsigned long flags;
    976	bool flushed;
    977
    978	spin_lock_irqsave(&ec->lock, flags);
    979	flushed = acpi_ec_flushed(ec);
    980	spin_unlock_irqrestore(&ec->lock, flags);
    981	return flushed;
    982}
    983
    984static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
    985{
    986	unsigned long flags;
    987
    988	spin_lock_irqsave(&ec->lock, flags);
    989	if (acpi_ec_started(ec)) {
    990		ec_dbg_drv("Stopping EC");
    991		set_bit(EC_FLAGS_STOPPED, &ec->flags);
    992		spin_unlock_irqrestore(&ec->lock, flags);
    993		wait_event(ec->wait, acpi_ec_stopped(ec));
    994		spin_lock_irqsave(&ec->lock, flags);
    995		/* Disable GPE for event processing (SCI_EVT=1) */
    996		if (!suspending) {
    997			acpi_ec_complete_request(ec);
    998			ec_dbg_ref(ec, "Decrease driver");
    999		} else if (!ec_freeze_events)
   1000			__acpi_ec_disable_event(ec);
   1001		clear_bit(EC_FLAGS_STARTED, &ec->flags);
   1002		clear_bit(EC_FLAGS_STOPPED, &ec->flags);
   1003		ec_log_drv("EC stopped");
   1004	}
   1005	spin_unlock_irqrestore(&ec->lock, flags);
   1006}
   1007
   1008static void acpi_ec_enter_noirq(struct acpi_ec *ec)
   1009{
   1010	unsigned long flags;
   1011
   1012	spin_lock_irqsave(&ec->lock, flags);
   1013	ec->busy_polling = true;
   1014	ec->polling_guard = 0;
   1015	ec_log_drv("interrupt blocked");
   1016	spin_unlock_irqrestore(&ec->lock, flags);
   1017}
   1018
   1019static void acpi_ec_leave_noirq(struct acpi_ec *ec)
   1020{
   1021	unsigned long flags;
   1022
   1023	spin_lock_irqsave(&ec->lock, flags);
   1024	ec->busy_polling = ec_busy_polling;
   1025	ec->polling_guard = ec_polling_guard;
   1026	ec_log_drv("interrupt unblocked");
   1027	spin_unlock_irqrestore(&ec->lock, flags);
   1028}
   1029
   1030void acpi_ec_block_transactions(void)
   1031{
   1032	struct acpi_ec *ec = first_ec;
   1033
   1034	if (!ec)
   1035		return;
   1036
   1037	mutex_lock(&ec->mutex);
   1038	/* Prevent transactions from being carried out */
   1039	acpi_ec_stop(ec, true);
   1040	mutex_unlock(&ec->mutex);
   1041}
   1042
   1043void acpi_ec_unblock_transactions(void)
   1044{
   1045	/*
   1046	 * Allow transactions to happen again (this function is called from
   1047	 * atomic context during wakeup, so we don't need to acquire the mutex).
   1048	 */
   1049	if (first_ec)
   1050		acpi_ec_start(first_ec, true);
   1051}
   1052
   1053/* --------------------------------------------------------------------------
   1054                                Event Management
   1055   -------------------------------------------------------------------------- */
   1056static struct acpi_ec_query_handler *
   1057acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
   1058{
   1059	struct acpi_ec_query_handler *handler;
   1060
   1061	mutex_lock(&ec->mutex);
   1062	list_for_each_entry(handler, &ec->list, node) {
   1063		if (value == handler->query_bit) {
   1064			kref_get(&handler->kref);
   1065			mutex_unlock(&ec->mutex);
   1066			return handler;
   1067		}
   1068	}
   1069	mutex_unlock(&ec->mutex);
   1070	return NULL;
   1071}
   1072
   1073static void acpi_ec_query_handler_release(struct kref *kref)
   1074{
   1075	struct acpi_ec_query_handler *handler =
   1076		container_of(kref, struct acpi_ec_query_handler, kref);
   1077
   1078	kfree(handler);
   1079}
   1080
   1081static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
   1082{
   1083	kref_put(&handler->kref, acpi_ec_query_handler_release);
   1084}
   1085
   1086int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
   1087			      acpi_handle handle, acpi_ec_query_func func,
   1088			      void *data)
   1089{
   1090	struct acpi_ec_query_handler *handler =
   1091	    kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
   1092
   1093	if (!handler)
   1094		return -ENOMEM;
   1095
   1096	handler->query_bit = query_bit;
   1097	handler->handle = handle;
   1098	handler->func = func;
   1099	handler->data = data;
   1100	mutex_lock(&ec->mutex);
   1101	kref_init(&handler->kref);
   1102	list_add(&handler->node, &ec->list);
   1103	mutex_unlock(&ec->mutex);
   1104	return 0;
   1105}
   1106EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
   1107
   1108static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
   1109					  bool remove_all, u8 query_bit)
   1110{
   1111	struct acpi_ec_query_handler *handler, *tmp;
   1112	LIST_HEAD(free_list);
   1113
   1114	mutex_lock(&ec->mutex);
   1115	list_for_each_entry_safe(handler, tmp, &ec->list, node) {
   1116		if (remove_all || query_bit == handler->query_bit) {
   1117			list_del_init(&handler->node);
   1118			list_add(&handler->node, &free_list);
   1119		}
   1120	}
   1121	mutex_unlock(&ec->mutex);
   1122	list_for_each_entry_safe(handler, tmp, &free_list, node)
   1123		acpi_ec_put_query_handler(handler);
   1124}
   1125
   1126void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
   1127{
   1128	acpi_ec_remove_query_handlers(ec, false, query_bit);
   1129}
   1130EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
   1131
   1132static void acpi_ec_event_processor(struct work_struct *work)
   1133{
   1134	struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
   1135	struct acpi_ec_query_handler *handler = q->handler;
   1136	struct acpi_ec *ec = q->ec;
   1137
   1138	ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
   1139
   1140	if (handler->func)
   1141		handler->func(handler->data);
   1142	else if (handler->handle)
   1143		acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
   1144
   1145	ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
   1146
   1147	spin_lock_irq(&ec->lock);
   1148	ec->queries_in_progress--;
   1149	spin_unlock_irq(&ec->lock);
   1150
   1151	acpi_ec_put_query_handler(handler);
   1152	kfree(q);
   1153}
   1154
   1155static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
   1156{
   1157	struct acpi_ec_query *q;
   1158	struct transaction *t;
   1159
   1160	q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
   1161	if (!q)
   1162		return NULL;
   1163
   1164	INIT_WORK(&q->work, acpi_ec_event_processor);
   1165	t = &q->transaction;
   1166	t->command = ACPI_EC_COMMAND_QUERY;
   1167	t->rdata = pval;
   1168	t->rlen = 1;
   1169	q->ec = ec;
   1170	return q;
   1171}
   1172
   1173static int acpi_ec_submit_query(struct acpi_ec *ec)
   1174{
   1175	struct acpi_ec_query *q;
   1176	u8 value = 0;
   1177	int result;
   1178
   1179	q = acpi_ec_create_query(ec, &value);
   1180	if (!q)
   1181		return -ENOMEM;
   1182
   1183	/*
   1184	 * Query the EC to find out which _Qxx method we need to evaluate.
   1185	 * Note that successful completion of the query causes the ACPI_EC_SCI
   1186	 * bit to be cleared (and thus clearing the interrupt source).
   1187	 */
   1188	result = acpi_ec_transaction(ec, &q->transaction);
   1189	if (result)
   1190		goto err_exit;
   1191
   1192	if (!value) {
   1193		result = -ENODATA;
   1194		goto err_exit;
   1195	}
   1196
   1197	q->handler = acpi_ec_get_query_handler_by_value(ec, value);
   1198	if (!q->handler) {
   1199		result = -ENODATA;
   1200		goto err_exit;
   1201	}
   1202
   1203	/*
   1204	 * It is reported that _Qxx are evaluated in a parallel way on Windows:
   1205	 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
   1206	 *
   1207	 * Put this log entry before queue_work() to make it appear in the log
   1208	 * before any other messages emitted during workqueue handling.
   1209	 */
   1210	ec_dbg_evt("Query(0x%02x) scheduled", value);
   1211
   1212	spin_lock_irq(&ec->lock);
   1213
   1214	ec->queries_in_progress++;
   1215	queue_work(ec_query_wq, &q->work);
   1216
   1217	spin_unlock_irq(&ec->lock);
   1218
   1219	return 0;
   1220
   1221err_exit:
   1222	kfree(q);
   1223
   1224	return result;
   1225}
   1226
   1227static void acpi_ec_event_handler(struct work_struct *work)
   1228{
   1229	struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
   1230
   1231	ec_dbg_evt("Event started");
   1232
   1233	spin_lock_irq(&ec->lock);
   1234
   1235	while (ec->events_to_process) {
   1236		spin_unlock_irq(&ec->lock);
   1237
   1238		acpi_ec_submit_query(ec);
   1239
   1240		spin_lock_irq(&ec->lock);
   1241
   1242		ec->events_to_process--;
   1243	}
   1244
   1245	/*
   1246	 * Before exit, make sure that the it will be possible to queue up the
   1247	 * event handling work again regardless of whether or not the query
   1248	 * queued up above is processed successfully.
   1249	 */
   1250	if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
   1251		bool guard_timeout;
   1252
   1253		acpi_ec_complete_event(ec);
   1254
   1255		ec_dbg_evt("Event stopped");
   1256
   1257		spin_unlock_irq(&ec->lock);
   1258
   1259		guard_timeout = !!ec_guard(ec);
   1260
   1261		spin_lock_irq(&ec->lock);
   1262
   1263		/* Take care of SCI_EVT unless someone else is doing that. */
   1264		if (guard_timeout && !ec->curr)
   1265			advance_transaction(ec, false);
   1266	} else {
   1267		acpi_ec_close_event(ec);
   1268
   1269		ec_dbg_evt("Event stopped");
   1270	}
   1271
   1272	ec->events_in_progress--;
   1273
   1274	spin_unlock_irq(&ec->lock);
   1275}
   1276
   1277static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
   1278{
   1279	unsigned long flags;
   1280
   1281	spin_lock_irqsave(&ec->lock, flags);
   1282	advance_transaction(ec, true);
   1283	spin_unlock_irqrestore(&ec->lock, flags);
   1284}
   1285
   1286static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
   1287			       u32 gpe_number, void *data)
   1288{
   1289	acpi_ec_handle_interrupt(data);
   1290	return ACPI_INTERRUPT_HANDLED;
   1291}
   1292
   1293static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
   1294{
   1295	acpi_ec_handle_interrupt(data);
   1296	return IRQ_HANDLED;
   1297}
   1298
   1299/* --------------------------------------------------------------------------
   1300 *                           Address Space Management
   1301 * -------------------------------------------------------------------------- */
   1302
   1303static acpi_status
   1304acpi_ec_space_handler(u32 function, acpi_physical_address address,
   1305		      u32 bits, u64 *value64,
   1306		      void *handler_context, void *region_context)
   1307{
   1308	struct acpi_ec *ec = handler_context;
   1309	int result = 0, i, bytes = bits / 8;
   1310	u8 *value = (u8 *)value64;
   1311
   1312	if ((address > 0xFF) || !value || !handler_context)
   1313		return AE_BAD_PARAMETER;
   1314
   1315	if (function != ACPI_READ && function != ACPI_WRITE)
   1316		return AE_BAD_PARAMETER;
   1317
   1318	if (ec->busy_polling || bits > 8)
   1319		acpi_ec_burst_enable(ec);
   1320
   1321	for (i = 0; i < bytes; ++i, ++address, ++value)
   1322		result = (function == ACPI_READ) ?
   1323			acpi_ec_read(ec, address, value) :
   1324			acpi_ec_write(ec, address, *value);
   1325
   1326	if (ec->busy_polling || bits > 8)
   1327		acpi_ec_burst_disable(ec);
   1328
   1329	switch (result) {
   1330	case -EINVAL:
   1331		return AE_BAD_PARAMETER;
   1332	case -ENODEV:
   1333		return AE_NOT_FOUND;
   1334	case -ETIME:
   1335		return AE_TIME;
   1336	default:
   1337		return AE_OK;
   1338	}
   1339}
   1340
   1341/* --------------------------------------------------------------------------
   1342 *                             Driver Interface
   1343 * -------------------------------------------------------------------------- */
   1344
   1345static acpi_status
   1346ec_parse_io_ports(struct acpi_resource *resource, void *context);
   1347
   1348static void acpi_ec_free(struct acpi_ec *ec)
   1349{
   1350	if (first_ec == ec)
   1351		first_ec = NULL;
   1352	if (boot_ec == ec)
   1353		boot_ec = NULL;
   1354	kfree(ec);
   1355}
   1356
   1357static struct acpi_ec *acpi_ec_alloc(void)
   1358{
   1359	struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
   1360
   1361	if (!ec)
   1362		return NULL;
   1363	mutex_init(&ec->mutex);
   1364	init_waitqueue_head(&ec->wait);
   1365	INIT_LIST_HEAD(&ec->list);
   1366	spin_lock_init(&ec->lock);
   1367	INIT_WORK(&ec->work, acpi_ec_event_handler);
   1368	ec->timestamp = jiffies;
   1369	ec->busy_polling = true;
   1370	ec->polling_guard = 0;
   1371	ec->gpe = -1;
   1372	ec->irq = -1;
   1373	return ec;
   1374}
   1375
   1376static acpi_status
   1377acpi_ec_register_query_methods(acpi_handle handle, u32 level,
   1378			       void *context, void **return_value)
   1379{
   1380	char node_name[5];
   1381	struct acpi_buffer buffer = { sizeof(node_name), node_name };
   1382	struct acpi_ec *ec = context;
   1383	int value = 0;
   1384	acpi_status status;
   1385
   1386	status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
   1387
   1388	if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
   1389		acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
   1390	return AE_OK;
   1391}
   1392
   1393static acpi_status
   1394ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
   1395{
   1396	acpi_status status;
   1397	unsigned long long tmp = 0;
   1398	struct acpi_ec *ec = context;
   1399
   1400	/* clear addr values, ec_parse_io_ports depend on it */
   1401	ec->command_addr = ec->data_addr = 0;
   1402
   1403	status = acpi_walk_resources(handle, METHOD_NAME__CRS,
   1404				     ec_parse_io_ports, ec);
   1405	if (ACPI_FAILURE(status))
   1406		return status;
   1407	if (ec->data_addr == 0 || ec->command_addr == 0)
   1408		return AE_OK;
   1409
   1410	if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
   1411		/*
   1412		 * Always inherit the GPE number setting from the ECDT
   1413		 * EC.
   1414		 */
   1415		ec->gpe = boot_ec->gpe;
   1416	} else {
   1417		/* Get GPE bit assignment (EC events). */
   1418		/* TODO: Add support for _GPE returning a package */
   1419		status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
   1420		if (ACPI_SUCCESS(status))
   1421			ec->gpe = tmp;
   1422
   1423		/*
   1424		 * Errors are non-fatal, allowing for ACPI Reduced Hardware
   1425		 * platforms which use GpioInt instead of GPE.
   1426		 */
   1427	}
   1428	/* Use the global lock for all EC transactions? */
   1429	tmp = 0;
   1430	acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
   1431	ec->global_lock = tmp;
   1432	ec->handle = handle;
   1433	return AE_CTRL_TERMINATE;
   1434}
   1435
   1436static bool install_gpe_event_handler(struct acpi_ec *ec)
   1437{
   1438	acpi_status status;
   1439
   1440	status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
   1441					      ACPI_GPE_EDGE_TRIGGERED,
   1442					      &acpi_ec_gpe_handler, ec);
   1443	if (ACPI_FAILURE(status))
   1444		return false;
   1445
   1446	if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
   1447		acpi_ec_enable_gpe(ec, true);
   1448
   1449	return true;
   1450}
   1451
   1452static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
   1453{
   1454	return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
   1455			   "ACPI EC", ec) >= 0;
   1456}
   1457
   1458/**
   1459 * ec_install_handlers - Install service callbacks and register query methods.
   1460 * @ec: Target EC.
   1461 * @device: ACPI device object corresponding to @ec.
   1462 *
   1463 * Install a handler for the EC address space type unless it has been installed
   1464 * already.  If @device is not NULL, also look for EC query methods in the
   1465 * namespace and register them, and install an event (either GPE or GPIO IRQ)
   1466 * handler for the EC, if possible.
   1467 *
   1468 * Return:
   1469 * -ENODEV if the address space handler cannot be installed, which means
   1470 *  "unable to handle transactions",
   1471 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
   1472 * or 0 (success) otherwise.
   1473 */
   1474static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device)
   1475{
   1476	acpi_status status;
   1477
   1478	acpi_ec_start(ec, false);
   1479
   1480	if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
   1481		acpi_ec_enter_noirq(ec);
   1482		status = acpi_install_address_space_handler(ec->handle,
   1483							    ACPI_ADR_SPACE_EC,
   1484							    &acpi_ec_space_handler,
   1485							    NULL, ec);
   1486		if (ACPI_FAILURE(status)) {
   1487			acpi_ec_stop(ec, false);
   1488			return -ENODEV;
   1489		}
   1490		set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
   1491	}
   1492
   1493	if (!device)
   1494		return 0;
   1495
   1496	if (ec->gpe < 0) {
   1497		/* ACPI reduced hardware platforms use a GpioInt from _CRS. */
   1498		int irq = acpi_dev_gpio_irq_get(device, 0);
   1499		/*
   1500		 * Bail out right away for deferred probing or complete the
   1501		 * initialization regardless of any other errors.
   1502		 */
   1503		if (irq == -EPROBE_DEFER)
   1504			return -EPROBE_DEFER;
   1505		else if (irq >= 0)
   1506			ec->irq = irq;
   1507	}
   1508
   1509	if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
   1510		/* Find and register all query methods */
   1511		acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
   1512				    acpi_ec_register_query_methods,
   1513				    NULL, ec, NULL);
   1514		set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
   1515	}
   1516	if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
   1517		bool ready = false;
   1518
   1519		if (ec->gpe >= 0)
   1520			ready = install_gpe_event_handler(ec);
   1521		else if (ec->irq >= 0)
   1522			ready = install_gpio_irq_event_handler(ec);
   1523
   1524		if (ready) {
   1525			set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
   1526			acpi_ec_leave_noirq(ec);
   1527		}
   1528		/*
   1529		 * Failures to install an event handler are not fatal, because
   1530		 * the EC can be polled for events.
   1531		 */
   1532	}
   1533	/* EC is fully operational, allow queries */
   1534	acpi_ec_enable_event(ec);
   1535
   1536	return 0;
   1537}
   1538
   1539static void ec_remove_handlers(struct acpi_ec *ec)
   1540{
   1541	if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
   1542		if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
   1543					ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
   1544			pr_err("failed to remove space handler\n");
   1545		clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
   1546	}
   1547
   1548	/*
   1549	 * Stops handling the EC transactions after removing the operation
   1550	 * region handler. This is required because _REG(DISCONNECT)
   1551	 * invoked during the removal can result in new EC transactions.
   1552	 *
   1553	 * Flushes the EC requests and thus disables the GPE before
   1554	 * removing the GPE handler. This is required by the current ACPICA
   1555	 * GPE core. ACPICA GPE core will automatically disable a GPE when
   1556	 * it is indicated but there is no way to handle it. So the drivers
   1557	 * must disable the GPEs prior to removing the GPE handlers.
   1558	 */
   1559	acpi_ec_stop(ec, false);
   1560
   1561	if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
   1562		if (ec->gpe >= 0 &&
   1563		    ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
   1564				 &acpi_ec_gpe_handler)))
   1565			pr_err("failed to remove gpe handler\n");
   1566
   1567		if (ec->irq >= 0)
   1568			free_irq(ec->irq, ec);
   1569
   1570		clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
   1571	}
   1572	if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
   1573		acpi_ec_remove_query_handlers(ec, true, 0);
   1574		clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
   1575	}
   1576}
   1577
   1578static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device)
   1579{
   1580	int ret;
   1581
   1582	ret = ec_install_handlers(ec, device);
   1583	if (ret)
   1584		return ret;
   1585
   1586	/* First EC capable of handling transactions */
   1587	if (!first_ec)
   1588		first_ec = ec;
   1589
   1590	pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
   1591		ec->data_addr);
   1592
   1593	if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
   1594		if (ec->gpe >= 0)
   1595			pr_info("GPE=0x%x\n", ec->gpe);
   1596		else
   1597			pr_info("IRQ=%d\n", ec->irq);
   1598	}
   1599
   1600	return ret;
   1601}
   1602
   1603static int acpi_ec_add(struct acpi_device *device)
   1604{
   1605	struct acpi_ec *ec;
   1606	int ret;
   1607
   1608	strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
   1609	strcpy(acpi_device_class(device), ACPI_EC_CLASS);
   1610
   1611	if (boot_ec && (boot_ec->handle == device->handle ||
   1612	    !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
   1613		/* Fast path: this device corresponds to the boot EC. */
   1614		ec = boot_ec;
   1615	} else {
   1616		acpi_status status;
   1617
   1618		ec = acpi_ec_alloc();
   1619		if (!ec)
   1620			return -ENOMEM;
   1621
   1622		status = ec_parse_device(device->handle, 0, ec, NULL);
   1623		if (status != AE_CTRL_TERMINATE) {
   1624			ret = -EINVAL;
   1625			goto err;
   1626		}
   1627
   1628		if (boot_ec && ec->command_addr == boot_ec->command_addr &&
   1629		    ec->data_addr == boot_ec->data_addr &&
   1630		    !EC_FLAGS_TRUST_DSDT_GPE) {
   1631			/*
   1632			 * Trust PNP0C09 namespace location rather than
   1633			 * ECDT ID. But trust ECDT GPE rather than _GPE
   1634			 * because of ASUS quirks, so do not change
   1635			 * boot_ec->gpe to ec->gpe.
   1636			 */
   1637			boot_ec->handle = ec->handle;
   1638			acpi_handle_debug(ec->handle, "duplicated.\n");
   1639			acpi_ec_free(ec);
   1640			ec = boot_ec;
   1641		}
   1642	}
   1643
   1644	ret = acpi_ec_setup(ec, device);
   1645	if (ret)
   1646		goto err;
   1647
   1648	if (ec == boot_ec)
   1649		acpi_handle_info(boot_ec->handle,
   1650				 "Boot %s EC initialization complete\n",
   1651				 boot_ec_is_ecdt ? "ECDT" : "DSDT");
   1652
   1653	acpi_handle_info(ec->handle,
   1654			 "EC: Used to handle transactions and events\n");
   1655
   1656	device->driver_data = ec;
   1657
   1658	ret = !!request_region(ec->data_addr, 1, "EC data");
   1659	WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
   1660	ret = !!request_region(ec->command_addr, 1, "EC cmd");
   1661	WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
   1662
   1663	/* Reprobe devices depending on the EC */
   1664	acpi_dev_clear_dependencies(device);
   1665
   1666	acpi_handle_debug(ec->handle, "enumerated.\n");
   1667	return 0;
   1668
   1669err:
   1670	if (ec != boot_ec)
   1671		acpi_ec_free(ec);
   1672
   1673	return ret;
   1674}
   1675
   1676static int acpi_ec_remove(struct acpi_device *device)
   1677{
   1678	struct acpi_ec *ec;
   1679
   1680	if (!device)
   1681		return -EINVAL;
   1682
   1683	ec = acpi_driver_data(device);
   1684	release_region(ec->data_addr, 1);
   1685	release_region(ec->command_addr, 1);
   1686	device->driver_data = NULL;
   1687	if (ec != boot_ec) {
   1688		ec_remove_handlers(ec);
   1689		acpi_ec_free(ec);
   1690	}
   1691	return 0;
   1692}
   1693
   1694static acpi_status
   1695ec_parse_io_ports(struct acpi_resource *resource, void *context)
   1696{
   1697	struct acpi_ec *ec = context;
   1698
   1699	if (resource->type != ACPI_RESOURCE_TYPE_IO)
   1700		return AE_OK;
   1701
   1702	/*
   1703	 * The first address region returned is the data port, and
   1704	 * the second address region returned is the status/command
   1705	 * port.
   1706	 */
   1707	if (ec->data_addr == 0)
   1708		ec->data_addr = resource->data.io.minimum;
   1709	else if (ec->command_addr == 0)
   1710		ec->command_addr = resource->data.io.minimum;
   1711	else
   1712		return AE_CTRL_TERMINATE;
   1713
   1714	return AE_OK;
   1715}
   1716
   1717static const struct acpi_device_id ec_device_ids[] = {
   1718	{"PNP0C09", 0},
   1719	{ACPI_ECDT_HID, 0},
   1720	{"", 0},
   1721};
   1722
   1723/*
   1724 * This function is not Windows-compatible as Windows never enumerates the
   1725 * namespace EC before the main ACPI device enumeration process. It is
   1726 * retained for historical reason and will be deprecated in the future.
   1727 */
   1728void __init acpi_ec_dsdt_probe(void)
   1729{
   1730	struct acpi_ec *ec;
   1731	acpi_status status;
   1732	int ret;
   1733
   1734	/*
   1735	 * If a platform has ECDT, there is no need to proceed as the
   1736	 * following probe is not a part of the ACPI device enumeration,
   1737	 * executing _STA is not safe, and thus this probe may risk of
   1738	 * picking up an invalid EC device.
   1739	 */
   1740	if (boot_ec)
   1741		return;
   1742
   1743	ec = acpi_ec_alloc();
   1744	if (!ec)
   1745		return;
   1746
   1747	/*
   1748	 * At this point, the namespace is initialized, so start to find
   1749	 * the namespace objects.
   1750	 */
   1751	status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
   1752	if (ACPI_FAILURE(status) || !ec->handle) {
   1753		acpi_ec_free(ec);
   1754		return;
   1755	}
   1756
   1757	/*
   1758	 * When the DSDT EC is available, always re-configure boot EC to
   1759	 * have _REG evaluated. _REG can only be evaluated after the
   1760	 * namespace initialization.
   1761	 * At this point, the GPE is not fully initialized, so do not to
   1762	 * handle the events.
   1763	 */
   1764	ret = acpi_ec_setup(ec, NULL);
   1765	if (ret) {
   1766		acpi_ec_free(ec);
   1767		return;
   1768	}
   1769
   1770	boot_ec = ec;
   1771
   1772	acpi_handle_info(ec->handle,
   1773			 "Boot DSDT EC used to handle transactions\n");
   1774}
   1775
   1776/*
   1777 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
   1778 *
   1779 * First, look for an ACPI handle for the boot ECDT EC if acpi_ec_add() has not
   1780 * found a matching object in the namespace.
   1781 *
   1782 * Next, in case the DSDT EC is not functioning, it is still necessary to
   1783 * provide a functional ECDT EC to handle events, so add an extra device object
   1784 * to represent it (see https://bugzilla.kernel.org/show_bug.cgi?id=115021).
   1785 *
   1786 * This is useful on platforms with valid ECDT and invalid DSDT EC settings,
   1787 * like ASUS X550ZE (see https://bugzilla.kernel.org/show_bug.cgi?id=196847).
   1788 */
   1789static void __init acpi_ec_ecdt_start(void)
   1790{
   1791	struct acpi_table_ecdt *ecdt_ptr;
   1792	acpi_handle handle;
   1793	acpi_status status;
   1794
   1795	/* Bail out if a matching EC has been found in the namespace. */
   1796	if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT)
   1797		return;
   1798
   1799	/* Look up the object pointed to from the ECDT in the namespace. */
   1800	status = acpi_get_table(ACPI_SIG_ECDT, 1,
   1801				(struct acpi_table_header **)&ecdt_ptr);
   1802	if (ACPI_FAILURE(status))
   1803		return;
   1804
   1805	status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
   1806	if (ACPI_SUCCESS(status)) {
   1807		boot_ec->handle = handle;
   1808
   1809		/* Add a special ACPI device object to represent the boot EC. */
   1810		acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
   1811	}
   1812
   1813	acpi_put_table((struct acpi_table_header *)ecdt_ptr);
   1814}
   1815
   1816/*
   1817 * On some hardware it is necessary to clear events accumulated by the EC during
   1818 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
   1819 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
   1820 *
   1821 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
   1822 *
   1823 * Ideally, the EC should also be instructed NOT to accumulate events during
   1824 * sleep (which Windows seems to do somehow), but the interface to control this
   1825 * behaviour is not known at this time.
   1826 *
   1827 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
   1828 * however it is very likely that other Samsung models are affected.
   1829 *
   1830 * On systems which don't accumulate _Q events during sleep, this extra check
   1831 * should be harmless.
   1832 */
   1833static int ec_clear_on_resume(const struct dmi_system_id *id)
   1834{
   1835	pr_debug("Detected system needing EC poll on resume.\n");
   1836	EC_FLAGS_CLEAR_ON_RESUME = 1;
   1837	ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
   1838	return 0;
   1839}
   1840
   1841/*
   1842 * Some ECDTs contain wrong register addresses.
   1843 * MSI MS-171F
   1844 * https://bugzilla.kernel.org/show_bug.cgi?id=12461
   1845 */
   1846static int ec_correct_ecdt(const struct dmi_system_id *id)
   1847{
   1848	pr_debug("Detected system needing ECDT address correction.\n");
   1849	EC_FLAGS_CORRECT_ECDT = 1;
   1850	return 0;
   1851}
   1852
   1853/*
   1854 * Some ECDTs contain wrong GPE setting, but they share the same port addresses
   1855 * with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
   1856 * https://bugzilla.kernel.org/show_bug.cgi?id=209989
   1857 */
   1858static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
   1859{
   1860	pr_debug("Detected system needing DSDT GPE setting.\n");
   1861	EC_FLAGS_TRUST_DSDT_GPE = 1;
   1862	return 0;
   1863}
   1864
   1865/*
   1866 * Some DSDTs contain wrong GPE setting.
   1867 * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
   1868 * https://bugzilla.kernel.org/show_bug.cgi?id=195651
   1869 */
   1870static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
   1871{
   1872	pr_debug("Detected system needing ignore DSDT GPE setting.\n");
   1873	EC_FLAGS_IGNORE_DSDT_GPE = 1;
   1874	return 0;
   1875}
   1876
   1877static const struct dmi_system_id ec_dmi_table[] __initconst = {
   1878	{
   1879	ec_correct_ecdt, "MSI MS-171F", {
   1880	DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
   1881	DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
   1882	{
   1883	ec_honor_ecdt_gpe, "ASUS FX502VD", {
   1884	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1885	DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
   1886	{
   1887	ec_honor_ecdt_gpe, "ASUS FX502VE", {
   1888	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1889	DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
   1890	{
   1891	ec_honor_ecdt_gpe, "ASUS GL702VMK", {
   1892	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1893	DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
   1894	{
   1895	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
   1896	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1897	DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
   1898	{
   1899	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
   1900	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1901	DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
   1902	{
   1903	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
   1904	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1905	DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
   1906	{
   1907	ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
   1908	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1909	DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
   1910	{
   1911	ec_honor_ecdt_gpe, "ASUS X550VXK", {
   1912	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1913	DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
   1914	{
   1915	ec_honor_ecdt_gpe, "ASUS X580VD", {
   1916	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
   1917	DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
   1918	{
   1919	/* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
   1920	ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
   1921	DMI_MATCH(DMI_SYS_VENDOR, "HP"),
   1922	DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
   1923	{
   1924	ec_clear_on_resume, "Samsung hardware", {
   1925	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
   1926	{},
   1927};
   1928
   1929void __init acpi_ec_ecdt_probe(void)
   1930{
   1931	struct acpi_table_ecdt *ecdt_ptr;
   1932	struct acpi_ec *ec;
   1933	acpi_status status;
   1934	int ret;
   1935
   1936	/* Generate a boot ec context. */
   1937	dmi_check_system(ec_dmi_table);
   1938	status = acpi_get_table(ACPI_SIG_ECDT, 1,
   1939				(struct acpi_table_header **)&ecdt_ptr);
   1940	if (ACPI_FAILURE(status))
   1941		return;
   1942
   1943	if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
   1944		/*
   1945		 * Asus X50GL:
   1946		 * https://bugzilla.kernel.org/show_bug.cgi?id=11880
   1947		 */
   1948		goto out;
   1949	}
   1950
   1951	ec = acpi_ec_alloc();
   1952	if (!ec)
   1953		goto out;
   1954
   1955	if (EC_FLAGS_CORRECT_ECDT) {
   1956		ec->command_addr = ecdt_ptr->data.address;
   1957		ec->data_addr = ecdt_ptr->control.address;
   1958	} else {
   1959		ec->command_addr = ecdt_ptr->control.address;
   1960		ec->data_addr = ecdt_ptr->data.address;
   1961	}
   1962
   1963	/*
   1964	 * Ignore the GPE value on Reduced Hardware platforms.
   1965	 * Some products have this set to an erroneous value.
   1966	 */
   1967	if (!acpi_gbl_reduced_hardware)
   1968		ec->gpe = ecdt_ptr->gpe;
   1969
   1970	ec->handle = ACPI_ROOT_OBJECT;
   1971
   1972	/*
   1973	 * At this point, the namespace is not initialized, so do not find
   1974	 * the namespace objects, or handle the events.
   1975	 */
   1976	ret = acpi_ec_setup(ec, NULL);
   1977	if (ret) {
   1978		acpi_ec_free(ec);
   1979		goto out;
   1980	}
   1981
   1982	boot_ec = ec;
   1983	boot_ec_is_ecdt = true;
   1984
   1985	pr_info("Boot ECDT EC used to handle transactions\n");
   1986
   1987out:
   1988	acpi_put_table((struct acpi_table_header *)ecdt_ptr);
   1989}
   1990
   1991#ifdef CONFIG_PM_SLEEP
   1992static int acpi_ec_suspend(struct device *dev)
   1993{
   1994	struct acpi_ec *ec =
   1995		acpi_driver_data(to_acpi_device(dev));
   1996
   1997	if (!pm_suspend_no_platform() && ec_freeze_events)
   1998		acpi_ec_disable_event(ec);
   1999	return 0;
   2000}
   2001
   2002static int acpi_ec_suspend_noirq(struct device *dev)
   2003{
   2004	struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
   2005
   2006	/*
   2007	 * The SCI handler doesn't run at this point, so the GPE can be
   2008	 * masked at the low level without side effects.
   2009	 */
   2010	if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
   2011	    ec->gpe >= 0 && ec->reference_count >= 1)
   2012		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
   2013
   2014	acpi_ec_enter_noirq(ec);
   2015
   2016	return 0;
   2017}
   2018
   2019static int acpi_ec_resume_noirq(struct device *dev)
   2020{
   2021	struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
   2022
   2023	acpi_ec_leave_noirq(ec);
   2024
   2025	if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
   2026	    ec->gpe >= 0 && ec->reference_count >= 1)
   2027		acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
   2028
   2029	return 0;
   2030}
   2031
   2032static int acpi_ec_resume(struct device *dev)
   2033{
   2034	struct acpi_ec *ec =
   2035		acpi_driver_data(to_acpi_device(dev));
   2036
   2037	acpi_ec_enable_event(ec);
   2038	return 0;
   2039}
   2040
   2041void acpi_ec_mark_gpe_for_wake(void)
   2042{
   2043	if (first_ec && !ec_no_wakeup)
   2044		acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
   2045}
   2046EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake);
   2047
   2048void acpi_ec_set_gpe_wake_mask(u8 action)
   2049{
   2050	if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup)
   2051		acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
   2052}
   2053
   2054static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
   2055{
   2056	return ec->events_in_progress + ec->queries_in_progress > 0;
   2057}
   2058
   2059bool acpi_ec_dispatch_gpe(void)
   2060{
   2061	bool work_in_progress = false;
   2062
   2063	if (!first_ec)
   2064		return acpi_any_gpe_status_set(U32_MAX);
   2065
   2066	/*
   2067	 * Report wakeup if the status bit is set for any enabled GPE other
   2068	 * than the EC one.
   2069	 */
   2070	if (acpi_any_gpe_status_set(first_ec->gpe))
   2071		return true;
   2072
   2073	/*
   2074	 * Cancel the SCI wakeup and process all pending events in case there
   2075	 * are any wakeup ones in there.
   2076	 *
   2077	 * Note that if any non-EC GPEs are active at this point, the SCI will
   2078	 * retrigger after the rearming in acpi_s2idle_wake(), so no events
   2079	 * should be missed by canceling the wakeup here.
   2080	 */
   2081	pm_system_cancel_wakeup();
   2082
   2083	/*
   2084	 * Dispatch the EC GPE in-band, but do not report wakeup in any case
   2085	 * to allow the caller to process events properly after that.
   2086	 */
   2087	spin_lock_irq(&first_ec->lock);
   2088
   2089	if (acpi_ec_gpe_status_set(first_ec)) {
   2090		pm_pr_dbg("ACPI EC GPE status set\n");
   2091
   2092		advance_transaction(first_ec, false);
   2093		work_in_progress = acpi_ec_work_in_progress(first_ec);
   2094	}
   2095
   2096	spin_unlock_irq(&first_ec->lock);
   2097
   2098	if (!work_in_progress)
   2099		return false;
   2100
   2101	pm_pr_dbg("ACPI EC GPE dispatched\n");
   2102
   2103	/* Drain EC work. */
   2104	do {
   2105		acpi_ec_flush_work();
   2106
   2107		pm_pr_dbg("ACPI EC work flushed\n");
   2108
   2109		spin_lock_irq(&first_ec->lock);
   2110
   2111		work_in_progress = acpi_ec_work_in_progress(first_ec);
   2112
   2113		spin_unlock_irq(&first_ec->lock);
   2114	} while (work_in_progress && !pm_wakeup_pending());
   2115
   2116	return false;
   2117}
   2118#endif /* CONFIG_PM_SLEEP */
   2119
   2120static const struct dev_pm_ops acpi_ec_pm = {
   2121	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
   2122	SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
   2123};
   2124
   2125static int param_set_event_clearing(const char *val,
   2126				    const struct kernel_param *kp)
   2127{
   2128	int result = 0;
   2129
   2130	if (!strncmp(val, "status", sizeof("status") - 1)) {
   2131		ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
   2132		pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
   2133	} else if (!strncmp(val, "query", sizeof("query") - 1)) {
   2134		ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
   2135		pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
   2136	} else if (!strncmp(val, "event", sizeof("event") - 1)) {
   2137		ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
   2138		pr_info("Assuming SCI_EVT clearing on event reads\n");
   2139	} else
   2140		result = -EINVAL;
   2141	return result;
   2142}
   2143
   2144static int param_get_event_clearing(char *buffer,
   2145				    const struct kernel_param *kp)
   2146{
   2147	switch (ec_event_clearing) {
   2148	case ACPI_EC_EVT_TIMING_STATUS:
   2149		return sprintf(buffer, "status\n");
   2150	case ACPI_EC_EVT_TIMING_QUERY:
   2151		return sprintf(buffer, "query\n");
   2152	case ACPI_EC_EVT_TIMING_EVENT:
   2153		return sprintf(buffer, "event\n");
   2154	default:
   2155		return sprintf(buffer, "invalid\n");
   2156	}
   2157	return 0;
   2158}
   2159
   2160module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
   2161		  NULL, 0644);
   2162MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
   2163
   2164static struct acpi_driver acpi_ec_driver = {
   2165	.name = "ec",
   2166	.class = ACPI_EC_CLASS,
   2167	.ids = ec_device_ids,
   2168	.ops = {
   2169		.add = acpi_ec_add,
   2170		.remove = acpi_ec_remove,
   2171		},
   2172	.drv.pm = &acpi_ec_pm,
   2173};
   2174
   2175static void acpi_ec_destroy_workqueues(void)
   2176{
   2177	if (ec_wq) {
   2178		destroy_workqueue(ec_wq);
   2179		ec_wq = NULL;
   2180	}
   2181	if (ec_query_wq) {
   2182		destroy_workqueue(ec_query_wq);
   2183		ec_query_wq = NULL;
   2184	}
   2185}
   2186
   2187static int acpi_ec_init_workqueues(void)
   2188{
   2189	if (!ec_wq)
   2190		ec_wq = alloc_ordered_workqueue("kec", 0);
   2191
   2192	if (!ec_query_wq)
   2193		ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
   2194
   2195	if (!ec_wq || !ec_query_wq) {
   2196		acpi_ec_destroy_workqueues();
   2197		return -ENODEV;
   2198	}
   2199	return 0;
   2200}
   2201
   2202static const struct dmi_system_id acpi_ec_no_wakeup[] = {
   2203	{
   2204		.ident = "Thinkpad X1 Carbon 6th",
   2205		.matches = {
   2206			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
   2207			DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
   2208		},
   2209	},
   2210	{
   2211		.ident = "ThinkPad X1 Carbon 6th",
   2212		.matches = {
   2213			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
   2214			DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
   2215		},
   2216	},
   2217	{
   2218		.ident = "ThinkPad X1 Yoga 3rd",
   2219		.matches = {
   2220			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
   2221			DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
   2222		},
   2223	},
   2224	{
   2225		.ident = "HP ZHAN 66 Pro",
   2226		.matches = {
   2227			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
   2228			DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
   2229		},
   2230	},
   2231	{ },
   2232};
   2233
   2234void __init acpi_ec_init(void)
   2235{
   2236	int result;
   2237
   2238	result = acpi_ec_init_workqueues();
   2239	if (result)
   2240		return;
   2241
   2242	/*
   2243	 * Disable EC wakeup on following systems to prevent periodic
   2244	 * wakeup from EC GPE.
   2245	 */
   2246	if (dmi_check_system(acpi_ec_no_wakeup)) {
   2247		ec_no_wakeup = true;
   2248		pr_debug("Disabling EC wakeup on suspend-to-idle\n");
   2249	}
   2250
   2251	/* Driver must be registered after acpi_ec_init_workqueues(). */
   2252	acpi_bus_register_driver(&acpi_ec_driver);
   2253
   2254	acpi_ec_ecdt_start();
   2255}
   2256
   2257/* EC driver currently not unloadable */
   2258#if 0
   2259static void __exit acpi_ec_exit(void)
   2260{
   2261
   2262	acpi_bus_unregister_driver(&acpi_ec_driver);
   2263	acpi_ec_destroy_workqueues();
   2264}
   2265#endif	/* 0 */