cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smu.c (31242B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * PowerMac G5 SMU driver
      4 *
      5 * Copyright 2004 J. Mayer <l_indien@magic.fr>
      6 * Copyright 2005 Benjamin Herrenschmidt, IBM Corp.
      7 */
      8
      9/*
     10 * TODO:
     11 *  - maybe add timeout to commands ?
     12 *  - blocking version of time functions
     13 *  - polling version of i2c commands (including timer that works with
     14 *    interrupts off)
     15 *  - maybe avoid some data copies with i2c by directly using the smu cmd
     16 *    buffer and a lower level internal interface
     17 *  - understand SMU -> CPU events and implement reception of them via
     18 *    the userland interface
     19 */
     20
     21#include <linux/types.h>
     22#include <linux/kernel.h>
     23#include <linux/device.h>
     24#include <linux/dmapool.h>
     25#include <linux/memblock.h>
     26#include <linux/vmalloc.h>
     27#include <linux/highmem.h>
     28#include <linux/jiffies.h>
     29#include <linux/interrupt.h>
     30#include <linux/rtc.h>
     31#include <linux/completion.h>
     32#include <linux/miscdevice.h>
     33#include <linux/delay.h>
     34#include <linux/poll.h>
     35#include <linux/mutex.h>
     36#include <linux/of_device.h>
     37#include <linux/of_irq.h>
     38#include <linux/of_platform.h>
     39#include <linux/slab.h>
     40#include <linux/sched/signal.h>
     41
     42#include <asm/byteorder.h>
     43#include <asm/io.h>
     44#include <asm/machdep.h>
     45#include <asm/pmac_feature.h>
     46#include <asm/smu.h>
     47#include <asm/sections.h>
     48#include <linux/uaccess.h>
     49
     50#define VERSION "0.7"
     51#define AUTHOR  "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
     52
     53#undef DEBUG_SMU
     54
     55#ifdef DEBUG_SMU
     56#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
     57#else
     58#define DPRINTK(fmt, args...) do { } while (0)
     59#endif
     60
     61/*
     62 * This is the command buffer passed to the SMU hardware
     63 */
     64#define SMU_MAX_DATA	254
     65
     66struct smu_cmd_buf {
     67	u8 cmd;
     68	u8 length;
     69	u8 data[SMU_MAX_DATA];
     70};
     71
     72struct smu_device {
     73	spinlock_t		lock;
     74	struct device_node	*of_node;
     75	struct platform_device	*of_dev;
     76	int			doorbell;	/* doorbell gpio */
     77	u32 __iomem		*db_buf;	/* doorbell buffer */
     78	struct device_node	*db_node;
     79	unsigned int		db_irq;
     80	int			msg;
     81	struct device_node	*msg_node;
     82	unsigned int		msg_irq;
     83	struct smu_cmd_buf	*cmd_buf;	/* command buffer virtual */
     84	u32			cmd_buf_abs;	/* command buffer absolute */
     85	struct list_head	cmd_list;
     86	struct smu_cmd		*cmd_cur;	/* pending command */
     87	int			broken_nap;
     88	struct list_head	cmd_i2c_list;
     89	struct smu_i2c_cmd	*cmd_i2c_cur;	/* pending i2c command */
     90	struct timer_list	i2c_timer;
     91};
     92
     93/*
     94 * I don't think there will ever be more than one SMU, so
     95 * for now, just hard code that
     96 */
     97static DEFINE_MUTEX(smu_mutex);
     98static struct smu_device	*smu;
     99static DEFINE_MUTEX(smu_part_access);
    100static int smu_irq_inited;
    101static unsigned long smu_cmdbuf_abs;
    102
    103static void smu_i2c_retry(struct timer_list *t);
    104
    105/*
    106 * SMU driver low level stuff
    107 */
    108
    109static void smu_start_cmd(void)
    110{
    111	unsigned long faddr, fend;
    112	struct smu_cmd *cmd;
    113
    114	if (list_empty(&smu->cmd_list))
    115		return;
    116
    117	/* Fetch first command in queue */
    118	cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
    119	smu->cmd_cur = cmd;
    120	list_del(&cmd->link);
    121
    122	DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
    123		cmd->data_len);
    124	DPRINTK("SMU: data buffer: %8ph\n", cmd->data_buf);
    125
    126	/* Fill the SMU command buffer */
    127	smu->cmd_buf->cmd = cmd->cmd;
    128	smu->cmd_buf->length = cmd->data_len;
    129	memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
    130
    131	/* Flush command and data to RAM */
    132	faddr = (unsigned long)smu->cmd_buf;
    133	fend = faddr + smu->cmd_buf->length + 2;
    134	flush_dcache_range(faddr, fend);
    135
    136
    137	/* We also disable NAP mode for the duration of the command
    138	 * on U3 based machines.
    139	 * This is slightly racy as it can be written back to 1 by a sysctl
    140	 * but that never happens in practice. There seem to be an issue with
    141	 * U3 based machines such as the iMac G5 where napping for the
    142	 * whole duration of the command prevents the SMU from fetching it
    143	 * from memory. This might be related to the strange i2c based
    144	 * mechanism the SMU uses to access memory.
    145	 */
    146	if (smu->broken_nap)
    147		powersave_nap = 0;
    148
    149	/* This isn't exactly a DMA mapping here, I suspect
    150	 * the SMU is actually communicating with us via i2c to the
    151	 * northbridge or the CPU to access RAM.
    152	 */
    153	writel(smu->cmd_buf_abs, smu->db_buf);
    154
    155	/* Ring the SMU doorbell */
    156	pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
    157}
    158
    159
    160static irqreturn_t smu_db_intr(int irq, void *arg)
    161{
    162	unsigned long flags;
    163	struct smu_cmd *cmd;
    164	void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
    165	void *misc = NULL;
    166	u8 gpio;
    167	int rc = 0;
    168
    169	/* SMU completed the command, well, we hope, let's make sure
    170	 * of it
    171	 */
    172	spin_lock_irqsave(&smu->lock, flags);
    173
    174	gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
    175	if ((gpio & 7) != 7) {
    176		spin_unlock_irqrestore(&smu->lock, flags);
    177		return IRQ_HANDLED;
    178	}
    179
    180	cmd = smu->cmd_cur;
    181	smu->cmd_cur = NULL;
    182	if (cmd == NULL)
    183		goto bail;
    184
    185	if (rc == 0) {
    186		unsigned long faddr;
    187		int reply_len;
    188		u8 ack;
    189
    190		/* CPU might have brought back the cache line, so we need
    191		 * to flush again before peeking at the SMU response. We
    192		 * flush the entire buffer for now as we haven't read the
    193		 * reply length (it's only 2 cache lines anyway)
    194		 */
    195		faddr = (unsigned long)smu->cmd_buf;
    196		flush_dcache_range(faddr, faddr + 256);
    197
    198		/* Now check ack */
    199		ack = (~cmd->cmd) & 0xff;
    200		if (ack != smu->cmd_buf->cmd) {
    201			DPRINTK("SMU: incorrect ack, want %x got %x\n",
    202				ack, smu->cmd_buf->cmd);
    203			rc = -EIO;
    204		}
    205		reply_len = rc == 0 ? smu->cmd_buf->length : 0;
    206		DPRINTK("SMU: reply len: %d\n", reply_len);
    207		if (reply_len > cmd->reply_len) {
    208			printk(KERN_WARNING "SMU: reply buffer too small,"
    209			       "got %d bytes for a %d bytes buffer\n",
    210			       reply_len, cmd->reply_len);
    211			reply_len = cmd->reply_len;
    212		}
    213		cmd->reply_len = reply_len;
    214		if (cmd->reply_buf && reply_len)
    215			memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
    216	}
    217
    218	/* Now complete the command. Write status last in order as we lost
    219	 * ownership of the command structure as soon as it's no longer -1
    220	 */
    221	done = cmd->done;
    222	misc = cmd->misc;
    223	mb();
    224	cmd->status = rc;
    225
    226	/* Re-enable NAP mode */
    227	if (smu->broken_nap)
    228		powersave_nap = 1;
    229 bail:
    230	/* Start next command if any */
    231	smu_start_cmd();
    232	spin_unlock_irqrestore(&smu->lock, flags);
    233
    234	/* Call command completion handler if any */
    235	if (done)
    236		done(cmd, misc);
    237
    238	/* It's an edge interrupt, nothing to do */
    239	return IRQ_HANDLED;
    240}
    241
    242
    243static irqreturn_t smu_msg_intr(int irq, void *arg)
    244{
    245	/* I don't quite know what to do with this one, we seem to never
    246	 * receive it, so I suspect we have to arm it someway in the SMU
    247	 * to start getting events that way.
    248	 */
    249
    250	printk(KERN_INFO "SMU: message interrupt !\n");
    251
    252	/* It's an edge interrupt, nothing to do */
    253	return IRQ_HANDLED;
    254}
    255
    256
    257/*
    258 * Queued command management.
    259 *
    260 */
    261
    262int smu_queue_cmd(struct smu_cmd *cmd)
    263{
    264	unsigned long flags;
    265
    266	if (smu == NULL)
    267		return -ENODEV;
    268	if (cmd->data_len > SMU_MAX_DATA ||
    269	    cmd->reply_len > SMU_MAX_DATA)
    270		return -EINVAL;
    271
    272	cmd->status = 1;
    273	spin_lock_irqsave(&smu->lock, flags);
    274	list_add_tail(&cmd->link, &smu->cmd_list);
    275	if (smu->cmd_cur == NULL)
    276		smu_start_cmd();
    277	spin_unlock_irqrestore(&smu->lock, flags);
    278
    279	/* Workaround for early calls when irq isn't available */
    280	if (!smu_irq_inited || !smu->db_irq)
    281		smu_spinwait_cmd(cmd);
    282
    283	return 0;
    284}
    285EXPORT_SYMBOL(smu_queue_cmd);
    286
    287
    288int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
    289		     unsigned int data_len,
    290		     void (*done)(struct smu_cmd *cmd, void *misc),
    291		     void *misc, ...)
    292{
    293	struct smu_cmd *cmd = &scmd->cmd;
    294	va_list list;
    295	int i;
    296
    297	if (data_len > sizeof(scmd->buffer))
    298		return -EINVAL;
    299
    300	memset(scmd, 0, sizeof(*scmd));
    301	cmd->cmd = command;
    302	cmd->data_len = data_len;
    303	cmd->data_buf = scmd->buffer;
    304	cmd->reply_len = sizeof(scmd->buffer);
    305	cmd->reply_buf = scmd->buffer;
    306	cmd->done = done;
    307	cmd->misc = misc;
    308
    309	va_start(list, misc);
    310	for (i = 0; i < data_len; ++i)
    311		scmd->buffer[i] = (u8)va_arg(list, int);
    312	va_end(list);
    313
    314	return smu_queue_cmd(cmd);
    315}
    316EXPORT_SYMBOL(smu_queue_simple);
    317
    318
    319void smu_poll(void)
    320{
    321	u8 gpio;
    322
    323	if (smu == NULL)
    324		return;
    325
    326	gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
    327	if ((gpio & 7) == 7)
    328		smu_db_intr(smu->db_irq, smu);
    329}
    330EXPORT_SYMBOL(smu_poll);
    331
    332
    333void smu_done_complete(struct smu_cmd *cmd, void *misc)
    334{
    335	struct completion *comp = misc;
    336
    337	complete(comp);
    338}
    339EXPORT_SYMBOL(smu_done_complete);
    340
    341
    342void smu_spinwait_cmd(struct smu_cmd *cmd)
    343{
    344	while(cmd->status == 1)
    345		smu_poll();
    346}
    347EXPORT_SYMBOL(smu_spinwait_cmd);
    348
    349
    350/* RTC low level commands */
    351static inline int bcd2hex (int n)
    352{
    353	return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
    354}
    355
    356
    357static inline int hex2bcd (int n)
    358{
    359	return ((n / 10) << 4) + (n % 10);
    360}
    361
    362
    363static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
    364					struct rtc_time *time)
    365{
    366	cmd_buf->cmd = 0x8e;
    367	cmd_buf->length = 8;
    368	cmd_buf->data[0] = 0x80;
    369	cmd_buf->data[1] = hex2bcd(time->tm_sec);
    370	cmd_buf->data[2] = hex2bcd(time->tm_min);
    371	cmd_buf->data[3] = hex2bcd(time->tm_hour);
    372	cmd_buf->data[4] = time->tm_wday;
    373	cmd_buf->data[5] = hex2bcd(time->tm_mday);
    374	cmd_buf->data[6] = hex2bcd(time->tm_mon) + 1;
    375	cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
    376}
    377
    378
    379int smu_get_rtc_time(struct rtc_time *time, int spinwait)
    380{
    381	struct smu_simple_cmd cmd;
    382	int rc;
    383
    384	if (smu == NULL)
    385		return -ENODEV;
    386
    387	memset(time, 0, sizeof(struct rtc_time));
    388	rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
    389			      SMU_CMD_RTC_GET_DATETIME);
    390	if (rc)
    391		return rc;
    392	smu_spinwait_simple(&cmd);
    393
    394	time->tm_sec = bcd2hex(cmd.buffer[0]);
    395	time->tm_min = bcd2hex(cmd.buffer[1]);
    396	time->tm_hour = bcd2hex(cmd.buffer[2]);
    397	time->tm_wday = bcd2hex(cmd.buffer[3]);
    398	time->tm_mday = bcd2hex(cmd.buffer[4]);
    399	time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
    400	time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
    401
    402	return 0;
    403}
    404
    405
    406int smu_set_rtc_time(struct rtc_time *time, int spinwait)
    407{
    408	struct smu_simple_cmd cmd;
    409	int rc;
    410
    411	if (smu == NULL)
    412		return -ENODEV;
    413
    414	rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
    415			      SMU_CMD_RTC_SET_DATETIME,
    416			      hex2bcd(time->tm_sec),
    417			      hex2bcd(time->tm_min),
    418			      hex2bcd(time->tm_hour),
    419			      time->tm_wday,
    420			      hex2bcd(time->tm_mday),
    421			      hex2bcd(time->tm_mon) + 1,
    422			      hex2bcd(time->tm_year - 100));
    423	if (rc)
    424		return rc;
    425	smu_spinwait_simple(&cmd);
    426
    427	return 0;
    428}
    429
    430
    431void smu_shutdown(void)
    432{
    433	struct smu_simple_cmd cmd;
    434
    435	if (smu == NULL)
    436		return;
    437
    438	if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
    439			     'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
    440		return;
    441	smu_spinwait_simple(&cmd);
    442	for (;;)
    443		;
    444}
    445
    446
    447void smu_restart(void)
    448{
    449	struct smu_simple_cmd cmd;
    450
    451	if (smu == NULL)
    452		return;
    453
    454	if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
    455			     'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
    456		return;
    457	smu_spinwait_simple(&cmd);
    458	for (;;)
    459		;
    460}
    461
    462
    463int smu_present(void)
    464{
    465	return smu != NULL;
    466}
    467EXPORT_SYMBOL(smu_present);
    468
    469
    470int __init smu_init (void)
    471{
    472	struct device_node *np;
    473	const u32 *data;
    474	int ret = 0;
    475
    476        np = of_find_node_by_type(NULL, "smu");
    477        if (np == NULL)
    478		return -ENODEV;
    479
    480	printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
    481
    482	/*
    483	 * SMU based G5s need some memory below 2Gb. Thankfully this is
    484	 * called at a time where memblock is still available.
    485	 */
    486	smu_cmdbuf_abs = memblock_phys_alloc_range(4096, 4096, 0, 0x80000000UL);
    487	if (smu_cmdbuf_abs == 0) {
    488		printk(KERN_ERR "SMU: Command buffer allocation failed !\n");
    489		ret = -EINVAL;
    490		goto fail_np;
    491	}
    492
    493	smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES);
    494	if (!smu)
    495		panic("%s: Failed to allocate %zu bytes\n", __func__,
    496		      sizeof(struct smu_device));
    497
    498	spin_lock_init(&smu->lock);
    499	INIT_LIST_HEAD(&smu->cmd_list);
    500	INIT_LIST_HEAD(&smu->cmd_i2c_list);
    501	smu->of_node = np;
    502	smu->db_irq = 0;
    503	smu->msg_irq = 0;
    504
    505	/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
    506	 * 32 bits value safely
    507	 */
    508	smu->cmd_buf_abs = (u32)smu_cmdbuf_abs;
    509	smu->cmd_buf = __va(smu_cmdbuf_abs);
    510
    511	smu->db_node = of_find_node_by_name(NULL, "smu-doorbell");
    512	if (smu->db_node == NULL) {
    513		printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
    514		ret = -ENXIO;
    515		goto fail_bootmem;
    516	}
    517	data = of_get_property(smu->db_node, "reg", NULL);
    518	if (data == NULL) {
    519		printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
    520		ret = -ENXIO;
    521		goto fail_db_node;
    522	}
    523
    524	/* Current setup has one doorbell GPIO that does both doorbell
    525	 * and ack. GPIOs are at 0x50, best would be to find that out
    526	 * in the device-tree though.
    527	 */
    528	smu->doorbell = *data;
    529	if (smu->doorbell < 0x50)
    530		smu->doorbell += 0x50;
    531
    532	/* Now look for the smu-interrupt GPIO */
    533	do {
    534		smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt");
    535		if (smu->msg_node == NULL)
    536			break;
    537		data = of_get_property(smu->msg_node, "reg", NULL);
    538		if (data == NULL) {
    539			of_node_put(smu->msg_node);
    540			smu->msg_node = NULL;
    541			break;
    542		}
    543		smu->msg = *data;
    544		if (smu->msg < 0x50)
    545			smu->msg += 0x50;
    546	} while(0);
    547
    548	/* Doorbell buffer is currently hard-coded, I didn't find a proper
    549	 * device-tree entry giving the address. Best would probably to use
    550	 * an offset for K2 base though, but let's do it that way for now.
    551	 */
    552	smu->db_buf = ioremap(0x8000860c, 0x1000);
    553	if (smu->db_buf == NULL) {
    554		printk(KERN_ERR "SMU: Can't map doorbell buffer pointer !\n");
    555		ret = -ENXIO;
    556		goto fail_msg_node;
    557	}
    558
    559	/* U3 has an issue with NAP mode when issuing SMU commands */
    560	smu->broken_nap = pmac_get_uninorth_variant() < 4;
    561	if (smu->broken_nap)
    562		printk(KERN_INFO "SMU: using NAP mode workaround\n");
    563
    564	sys_ctrler = SYS_CTRLER_SMU;
    565	return 0;
    566
    567fail_msg_node:
    568	of_node_put(smu->msg_node);
    569fail_db_node:
    570	of_node_put(smu->db_node);
    571fail_bootmem:
    572	memblock_free(smu, sizeof(struct smu_device));
    573	smu = NULL;
    574fail_np:
    575	of_node_put(np);
    576	return ret;
    577}
    578
    579
    580static int smu_late_init(void)
    581{
    582	if (!smu)
    583		return 0;
    584
    585	timer_setup(&smu->i2c_timer, smu_i2c_retry, 0);
    586
    587	if (smu->db_node) {
    588		smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
    589		if (!smu->db_irq)
    590			printk(KERN_ERR "smu: failed to map irq for node %pOF\n",
    591			       smu->db_node);
    592	}
    593	if (smu->msg_node) {
    594		smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
    595		if (!smu->msg_irq)
    596			printk(KERN_ERR "smu: failed to map irq for node %pOF\n",
    597			       smu->msg_node);
    598	}
    599
    600	/*
    601	 * Try to request the interrupts
    602	 */
    603
    604	if (smu->db_irq) {
    605		if (request_irq(smu->db_irq, smu_db_intr,
    606				IRQF_SHARED, "SMU doorbell", smu) < 0) {
    607			printk(KERN_WARNING "SMU: can't "
    608			       "request interrupt %d\n",
    609			       smu->db_irq);
    610			smu->db_irq = 0;
    611		}
    612	}
    613
    614	if (smu->msg_irq) {
    615		if (request_irq(smu->msg_irq, smu_msg_intr,
    616				IRQF_SHARED, "SMU message", smu) < 0) {
    617			printk(KERN_WARNING "SMU: can't "
    618			       "request interrupt %d\n",
    619			       smu->msg_irq);
    620			smu->msg_irq = 0;
    621		}
    622	}
    623
    624	smu_irq_inited = 1;
    625	return 0;
    626}
    627/* This has to be before arch_initcall as the low i2c stuff relies on the
    628 * above having been done before we reach arch_initcalls
    629 */
    630core_initcall(smu_late_init);
    631
    632/*
    633 * sysfs visibility
    634 */
    635
    636static void smu_expose_childs(struct work_struct *unused)
    637{
    638	struct device_node *np;
    639
    640	for_each_child_of_node(smu->of_node, np)
    641		if (of_device_is_compatible(np, "smu-sensors"))
    642			of_platform_device_create(np, "smu-sensors",
    643						  &smu->of_dev->dev);
    644}
    645
    646static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
    647
    648static int smu_platform_probe(struct platform_device* dev)
    649{
    650	if (!smu)
    651		return -ENODEV;
    652	smu->of_dev = dev;
    653
    654	/*
    655	 * Ok, we are matched, now expose all i2c busses. We have to defer
    656	 * that unfortunately or it would deadlock inside the device model
    657	 */
    658	schedule_work(&smu_expose_childs_work);
    659
    660	return 0;
    661}
    662
    663static const struct of_device_id smu_platform_match[] =
    664{
    665	{
    666		.type		= "smu",
    667	},
    668	{},
    669};
    670
    671static struct platform_driver smu_of_platform_driver =
    672{
    673	.driver = {
    674		.name = "smu",
    675		.of_match_table = smu_platform_match,
    676	},
    677	.probe		= smu_platform_probe,
    678};
    679
    680static int __init smu_init_sysfs(void)
    681{
    682	/*
    683	 * For now, we don't power manage machines with an SMU chip,
    684	 * I'm a bit too far from figuring out how that works with those
    685	 * new chipsets, but that will come back and bite us
    686	 */
    687	platform_driver_register(&smu_of_platform_driver);
    688	return 0;
    689}
    690
    691device_initcall(smu_init_sysfs);
    692
    693struct platform_device *smu_get_ofdev(void)
    694{
    695	if (!smu)
    696		return NULL;
    697	return smu->of_dev;
    698}
    699
    700EXPORT_SYMBOL_GPL(smu_get_ofdev);
    701
    702/*
    703 * i2c interface
    704 */
    705
    706static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
    707{
    708	void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
    709	void *misc = cmd->misc;
    710	unsigned long flags;
    711
    712	/* Check for read case */
    713	if (!fail && cmd->read) {
    714		if (cmd->pdata[0] < 1)
    715			fail = 1;
    716		else
    717			memcpy(cmd->info.data, &cmd->pdata[1],
    718			       cmd->info.datalen);
    719	}
    720
    721	DPRINTK("SMU: completing, success: %d\n", !fail);
    722
    723	/* Update status and mark no pending i2c command with lock
    724	 * held so nobody comes in while we dequeue an eventual
    725	 * pending next i2c command
    726	 */
    727	spin_lock_irqsave(&smu->lock, flags);
    728	smu->cmd_i2c_cur = NULL;
    729	wmb();
    730	cmd->status = fail ? -EIO : 0;
    731
    732	/* Is there another i2c command waiting ? */
    733	if (!list_empty(&smu->cmd_i2c_list)) {
    734		struct smu_i2c_cmd *newcmd;
    735
    736		/* Fetch it, new current, remove from list */
    737		newcmd = list_entry(smu->cmd_i2c_list.next,
    738				    struct smu_i2c_cmd, link);
    739		smu->cmd_i2c_cur = newcmd;
    740		list_del(&cmd->link);
    741
    742		/* Queue with low level smu */
    743		list_add_tail(&cmd->scmd.link, &smu->cmd_list);
    744		if (smu->cmd_cur == NULL)
    745			smu_start_cmd();
    746	}
    747	spin_unlock_irqrestore(&smu->lock, flags);
    748
    749	/* Call command completion handler if any */
    750	if (done)
    751		done(cmd, misc);
    752
    753}
    754
    755
    756static void smu_i2c_retry(struct timer_list *unused)
    757{
    758	struct smu_i2c_cmd	*cmd = smu->cmd_i2c_cur;
    759
    760	DPRINTK("SMU: i2c failure, requeuing...\n");
    761
    762	/* requeue command simply by resetting reply_len */
    763	cmd->pdata[0] = 0xff;
    764	cmd->scmd.reply_len = sizeof(cmd->pdata);
    765	smu_queue_cmd(&cmd->scmd);
    766}
    767
    768
    769static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
    770{
    771	struct smu_i2c_cmd	*cmd = misc;
    772	int			fail = 0;
    773
    774	DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
    775		cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
    776
    777	/* Check for possible status */
    778	if (scmd->status < 0)
    779		fail = 1;
    780	else if (cmd->read) {
    781		if (cmd->stage == 0)
    782			fail = cmd->pdata[0] != 0;
    783		else
    784			fail = cmd->pdata[0] >= 0x80;
    785	} else {
    786		fail = cmd->pdata[0] != 0;
    787	}
    788
    789	/* Handle failures by requeuing command, after 5ms interval
    790	 */
    791	if (fail && --cmd->retries > 0) {
    792		DPRINTK("SMU: i2c failure, starting timer...\n");
    793		BUG_ON(cmd != smu->cmd_i2c_cur);
    794		if (!smu_irq_inited) {
    795			mdelay(5);
    796			smu_i2c_retry(NULL);
    797			return;
    798		}
    799		mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
    800		return;
    801	}
    802
    803	/* If failure or stage 1, command is complete */
    804	if (fail || cmd->stage != 0) {
    805		smu_i2c_complete_command(cmd, fail);
    806		return;
    807	}
    808
    809	DPRINTK("SMU: going to stage 1\n");
    810
    811	/* Ok, initial command complete, now poll status */
    812	scmd->reply_buf = cmd->pdata;
    813	scmd->reply_len = sizeof(cmd->pdata);
    814	scmd->data_buf = cmd->pdata;
    815	scmd->data_len = 1;
    816	cmd->pdata[0] = 0;
    817	cmd->stage = 1;
    818	cmd->retries = 20;
    819	smu_queue_cmd(scmd);
    820}
    821
    822
    823int smu_queue_i2c(struct smu_i2c_cmd *cmd)
    824{
    825	unsigned long flags;
    826
    827	if (smu == NULL)
    828		return -ENODEV;
    829
    830	/* Fill most fields of scmd */
    831	cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
    832	cmd->scmd.done = smu_i2c_low_completion;
    833	cmd->scmd.misc = cmd;
    834	cmd->scmd.reply_buf = cmd->pdata;
    835	cmd->scmd.reply_len = sizeof(cmd->pdata);
    836	cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
    837	cmd->scmd.status = 1;
    838	cmd->stage = 0;
    839	cmd->pdata[0] = 0xff;
    840	cmd->retries = 20;
    841	cmd->status = 1;
    842
    843	/* Check transfer type, sanitize some "info" fields
    844	 * based on transfer type and do more checking
    845	 */
    846	cmd->info.caddr = cmd->info.devaddr;
    847	cmd->read = cmd->info.devaddr & 0x01;
    848	switch(cmd->info.type) {
    849	case SMU_I2C_TRANSFER_SIMPLE:
    850		cmd->info.sublen = 0;
    851		memset(cmd->info.subaddr, 0, sizeof(cmd->info.subaddr));
    852		break;
    853	case SMU_I2C_TRANSFER_COMBINED:
    854		cmd->info.devaddr &= 0xfe;
    855		fallthrough;
    856	case SMU_I2C_TRANSFER_STDSUB:
    857		if (cmd->info.sublen > 3)
    858			return -EINVAL;
    859		break;
    860	default:
    861		return -EINVAL;
    862	}
    863
    864	/* Finish setting up command based on transfer direction
    865	 */
    866	if (cmd->read) {
    867		if (cmd->info.datalen > SMU_I2C_READ_MAX)
    868			return -EINVAL;
    869		memset(cmd->info.data, 0xff, cmd->info.datalen);
    870		cmd->scmd.data_len = 9;
    871	} else {
    872		if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
    873			return -EINVAL;
    874		cmd->scmd.data_len = 9 + cmd->info.datalen;
    875	}
    876
    877	DPRINTK("SMU: i2c enqueuing command\n");
    878	DPRINTK("SMU:   %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
    879		cmd->read ? "read" : "write", cmd->info.datalen,
    880		cmd->info.bus, cmd->info.caddr,
    881		cmd->info.subaddr[0], cmd->info.type);
    882
    883
    884	/* Enqueue command in i2c list, and if empty, enqueue also in
    885	 * main command list
    886	 */
    887	spin_lock_irqsave(&smu->lock, flags);
    888	if (smu->cmd_i2c_cur == NULL) {
    889		smu->cmd_i2c_cur = cmd;
    890		list_add_tail(&cmd->scmd.link, &smu->cmd_list);
    891		if (smu->cmd_cur == NULL)
    892			smu_start_cmd();
    893	} else
    894		list_add_tail(&cmd->link, &smu->cmd_i2c_list);
    895	spin_unlock_irqrestore(&smu->lock, flags);
    896
    897	return 0;
    898}
    899
    900/*
    901 * Handling of "partitions"
    902 */
    903
    904static int smu_read_datablock(u8 *dest, unsigned int addr, unsigned int len)
    905{
    906	DECLARE_COMPLETION_ONSTACK(comp);
    907	unsigned int chunk;
    908	struct smu_cmd cmd;
    909	int rc;
    910	u8 params[8];
    911
    912	/* We currently use a chunk size of 0xe. We could check the
    913	 * SMU firmware version and use bigger sizes though
    914	 */
    915	chunk = 0xe;
    916
    917	while (len) {
    918		unsigned int clen = min(len, chunk);
    919
    920		cmd.cmd = SMU_CMD_MISC_ee_COMMAND;
    921		cmd.data_len = 7;
    922		cmd.data_buf = params;
    923		cmd.reply_len = chunk;
    924		cmd.reply_buf = dest;
    925		cmd.done = smu_done_complete;
    926		cmd.misc = &comp;
    927		params[0] = SMU_CMD_MISC_ee_GET_DATABLOCK_REC;
    928		params[1] = 0x4;
    929		*((u32 *)&params[2]) = addr;
    930		params[6] = clen;
    931
    932		rc = smu_queue_cmd(&cmd);
    933		if (rc)
    934			return rc;
    935		wait_for_completion(&comp);
    936		if (cmd.status != 0)
    937			return rc;
    938		if (cmd.reply_len != clen) {
    939			printk(KERN_DEBUG "SMU: short read in "
    940			       "smu_read_datablock, got: %d, want: %d\n",
    941			       cmd.reply_len, clen);
    942			return -EIO;
    943		}
    944		len -= clen;
    945		addr += clen;
    946		dest += clen;
    947	}
    948	return 0;
    949}
    950
    951static struct smu_sdbp_header *smu_create_sdb_partition(int id)
    952{
    953	DECLARE_COMPLETION_ONSTACK(comp);
    954	struct smu_simple_cmd cmd;
    955	unsigned int addr, len, tlen;
    956	struct smu_sdbp_header *hdr;
    957	struct property *prop;
    958
    959	/* First query the partition info */
    960	DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq);
    961	smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2,
    962			 smu_done_complete, &comp,
    963			 SMU_CMD_PARTITION_LATEST, id);
    964	wait_for_completion(&comp);
    965	DPRINTK("SMU: done, status: %d, reply_len: %d\n",
    966		cmd.cmd.status, cmd.cmd.reply_len);
    967
    968	/* Partition doesn't exist (or other error) */
    969	if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6)
    970		return NULL;
    971
    972	/* Fetch address and length from reply */
    973	addr = *((u16 *)cmd.buffer);
    974	len = cmd.buffer[3] << 2;
    975	/* Calucluate total length to allocate, including the 17 bytes
    976	 * for "sdb-partition-XX" that we append at the end of the buffer
    977	 */
    978	tlen = sizeof(struct property) + len + 18;
    979
    980	prop = kzalloc(tlen, GFP_KERNEL);
    981	if (prop == NULL)
    982		return NULL;
    983	hdr = (struct smu_sdbp_header *)(prop + 1);
    984	prop->name = ((char *)prop) + tlen - 18;
    985	sprintf(prop->name, "sdb-partition-%02x", id);
    986	prop->length = len;
    987	prop->value = hdr;
    988	prop->next = NULL;
    989
    990	/* Read the datablock */
    991	if (smu_read_datablock((u8 *)hdr, addr, len)) {
    992		printk(KERN_DEBUG "SMU: datablock read failed while reading "
    993		       "partition %02x !\n", id);
    994		goto failure;
    995	}
    996
    997	/* Got it, check a few things and create the property */
    998	if (hdr->id != id) {
    999		printk(KERN_DEBUG "SMU: Reading partition %02x and got "
   1000		       "%02x !\n", id, hdr->id);
   1001		goto failure;
   1002	}
   1003	if (of_add_property(smu->of_node, prop)) {
   1004		printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x "
   1005		       "property !\n", id);
   1006		goto failure;
   1007	}
   1008
   1009	return hdr;
   1010 failure:
   1011	kfree(prop);
   1012	return NULL;
   1013}
   1014
   1015/* Note: Only allowed to return error code in pointers (using ERR_PTR)
   1016 * when interruptible is 1
   1017 */
   1018static const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
   1019		unsigned int *size, int interruptible)
   1020{
   1021	char pname[32];
   1022	const struct smu_sdbp_header *part;
   1023
   1024	if (!smu)
   1025		return NULL;
   1026
   1027	sprintf(pname, "sdb-partition-%02x", id);
   1028
   1029	DPRINTK("smu_get_sdb_partition(%02x)\n", id);
   1030
   1031	if (interruptible) {
   1032		int rc;
   1033		rc = mutex_lock_interruptible(&smu_part_access);
   1034		if (rc)
   1035			return ERR_PTR(rc);
   1036	} else
   1037		mutex_lock(&smu_part_access);
   1038
   1039	part = of_get_property(smu->of_node, pname, size);
   1040	if (part == NULL) {
   1041		DPRINTK("trying to extract from SMU ...\n");
   1042		part = smu_create_sdb_partition(id);
   1043		if (part != NULL && size)
   1044			*size = part->len << 2;
   1045	}
   1046	mutex_unlock(&smu_part_access);
   1047	return part;
   1048}
   1049
   1050const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
   1051{
   1052	return __smu_get_sdb_partition(id, size, 0);
   1053}
   1054EXPORT_SYMBOL(smu_get_sdb_partition);
   1055
   1056
   1057/*
   1058 * Userland driver interface
   1059 */
   1060
   1061
   1062static LIST_HEAD(smu_clist);
   1063static DEFINE_SPINLOCK(smu_clist_lock);
   1064
   1065enum smu_file_mode {
   1066	smu_file_commands,
   1067	smu_file_events,
   1068	smu_file_closing
   1069};
   1070
   1071struct smu_private
   1072{
   1073	struct list_head	list;
   1074	enum smu_file_mode	mode;
   1075	int			busy;
   1076	struct smu_cmd		cmd;
   1077	spinlock_t		lock;
   1078	wait_queue_head_t	wait;
   1079	u8			buffer[SMU_MAX_DATA];
   1080};
   1081
   1082
   1083static int smu_open(struct inode *inode, struct file *file)
   1084{
   1085	struct smu_private *pp;
   1086	unsigned long flags;
   1087
   1088	pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
   1089	if (!pp)
   1090		return -ENOMEM;
   1091	spin_lock_init(&pp->lock);
   1092	pp->mode = smu_file_commands;
   1093	init_waitqueue_head(&pp->wait);
   1094
   1095	mutex_lock(&smu_mutex);
   1096	spin_lock_irqsave(&smu_clist_lock, flags);
   1097	list_add(&pp->list, &smu_clist);
   1098	spin_unlock_irqrestore(&smu_clist_lock, flags);
   1099	file->private_data = pp;
   1100	mutex_unlock(&smu_mutex);
   1101
   1102	return 0;
   1103}
   1104
   1105
   1106static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
   1107{
   1108	struct smu_private *pp = misc;
   1109
   1110	wake_up_all(&pp->wait);
   1111}
   1112
   1113
   1114static ssize_t smu_write(struct file *file, const char __user *buf,
   1115			 size_t count, loff_t *ppos)
   1116{
   1117	struct smu_private *pp = file->private_data;
   1118	unsigned long flags;
   1119	struct smu_user_cmd_hdr hdr;
   1120	int rc = 0;
   1121
   1122	if (pp->busy)
   1123		return -EBUSY;
   1124	else if (copy_from_user(&hdr, buf, sizeof(hdr)))
   1125		return -EFAULT;
   1126	else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
   1127		pp->mode = smu_file_events;
   1128		return 0;
   1129	} else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) {
   1130		const struct smu_sdbp_header *part;
   1131		part = __smu_get_sdb_partition(hdr.cmd, NULL, 1);
   1132		if (part == NULL)
   1133			return -EINVAL;
   1134		else if (IS_ERR(part))
   1135			return PTR_ERR(part);
   1136		return 0;
   1137	} else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
   1138		return -EINVAL;
   1139	else if (pp->mode != smu_file_commands)
   1140		return -EBADFD;
   1141	else if (hdr.data_len > SMU_MAX_DATA)
   1142		return -EINVAL;
   1143
   1144	spin_lock_irqsave(&pp->lock, flags);
   1145	if (pp->busy) {
   1146		spin_unlock_irqrestore(&pp->lock, flags);
   1147		return -EBUSY;
   1148	}
   1149	pp->busy = 1;
   1150	pp->cmd.status = 1;
   1151	spin_unlock_irqrestore(&pp->lock, flags);
   1152
   1153	if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
   1154		pp->busy = 0;
   1155		return -EFAULT;
   1156	}
   1157
   1158	pp->cmd.cmd = hdr.cmd;
   1159	pp->cmd.data_len = hdr.data_len;
   1160	pp->cmd.reply_len = SMU_MAX_DATA;
   1161	pp->cmd.data_buf = pp->buffer;
   1162	pp->cmd.reply_buf = pp->buffer;
   1163	pp->cmd.done = smu_user_cmd_done;
   1164	pp->cmd.misc = pp;
   1165	rc = smu_queue_cmd(&pp->cmd);
   1166	if (rc < 0)
   1167		return rc;
   1168	return count;
   1169}
   1170
   1171
   1172static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
   1173				char __user *buf, size_t count)
   1174{
   1175	DECLARE_WAITQUEUE(wait, current);
   1176	struct smu_user_reply_hdr hdr;
   1177	unsigned long flags;
   1178	int size, rc = 0;
   1179
   1180	if (!pp->busy)
   1181		return 0;
   1182	if (count < sizeof(struct smu_user_reply_hdr))
   1183		return -EOVERFLOW;
   1184	spin_lock_irqsave(&pp->lock, flags);
   1185	if (pp->cmd.status == 1) {
   1186		if (file->f_flags & O_NONBLOCK) {
   1187			spin_unlock_irqrestore(&pp->lock, flags);
   1188			return -EAGAIN;
   1189		}
   1190		add_wait_queue(&pp->wait, &wait);
   1191		for (;;) {
   1192			set_current_state(TASK_INTERRUPTIBLE);
   1193			rc = 0;
   1194			if (pp->cmd.status != 1)
   1195				break;
   1196			rc = -ERESTARTSYS;
   1197			if (signal_pending(current))
   1198				break;
   1199			spin_unlock_irqrestore(&pp->lock, flags);
   1200			schedule();
   1201			spin_lock_irqsave(&pp->lock, flags);
   1202		}
   1203		set_current_state(TASK_RUNNING);
   1204		remove_wait_queue(&pp->wait, &wait);
   1205	}
   1206	spin_unlock_irqrestore(&pp->lock, flags);
   1207	if (rc)
   1208		return rc;
   1209	if (pp->cmd.status != 0)
   1210		pp->cmd.reply_len = 0;
   1211	size = sizeof(hdr) + pp->cmd.reply_len;
   1212	if (count < size)
   1213		size = count;
   1214	rc = size;
   1215	hdr.status = pp->cmd.status;
   1216	hdr.reply_len = pp->cmd.reply_len;
   1217	if (copy_to_user(buf, &hdr, sizeof(hdr)))
   1218		return -EFAULT;
   1219	size -= sizeof(hdr);
   1220	if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
   1221		return -EFAULT;
   1222	pp->busy = 0;
   1223
   1224	return rc;
   1225}
   1226
   1227
   1228static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
   1229			       char __user *buf, size_t count)
   1230{
   1231	/* Not implemented */
   1232	msleep_interruptible(1000);
   1233	return 0;
   1234}
   1235
   1236
   1237static ssize_t smu_read(struct file *file, char __user *buf,
   1238			size_t count, loff_t *ppos)
   1239{
   1240	struct smu_private *pp = file->private_data;
   1241
   1242	if (pp->mode == smu_file_commands)
   1243		return smu_read_command(file, pp, buf, count);
   1244	if (pp->mode == smu_file_events)
   1245		return smu_read_events(file, pp, buf, count);
   1246
   1247	return -EBADFD;
   1248}
   1249
   1250static __poll_t smu_fpoll(struct file *file, poll_table *wait)
   1251{
   1252	struct smu_private *pp = file->private_data;
   1253	__poll_t mask = 0;
   1254	unsigned long flags;
   1255
   1256	if (!pp)
   1257		return 0;
   1258
   1259	if (pp->mode == smu_file_commands) {
   1260		poll_wait(file, &pp->wait, wait);
   1261
   1262		spin_lock_irqsave(&pp->lock, flags);
   1263		if (pp->busy && pp->cmd.status != 1)
   1264			mask |= EPOLLIN;
   1265		spin_unlock_irqrestore(&pp->lock, flags);
   1266	}
   1267	if (pp->mode == smu_file_events) {
   1268		/* Not yet implemented */
   1269	}
   1270	return mask;
   1271}
   1272
   1273static int smu_release(struct inode *inode, struct file *file)
   1274{
   1275	struct smu_private *pp = file->private_data;
   1276	unsigned long flags;
   1277	unsigned int busy;
   1278
   1279	if (!pp)
   1280		return 0;
   1281
   1282	file->private_data = NULL;
   1283
   1284	/* Mark file as closing to avoid races with new request */
   1285	spin_lock_irqsave(&pp->lock, flags);
   1286	pp->mode = smu_file_closing;
   1287	busy = pp->busy;
   1288
   1289	/* Wait for any pending request to complete */
   1290	if (busy && pp->cmd.status == 1) {
   1291		DECLARE_WAITQUEUE(wait, current);
   1292
   1293		add_wait_queue(&pp->wait, &wait);
   1294		for (;;) {
   1295			set_current_state(TASK_UNINTERRUPTIBLE);
   1296			if (pp->cmd.status != 1)
   1297				break;
   1298			spin_unlock_irqrestore(&pp->lock, flags);
   1299			schedule();
   1300			spin_lock_irqsave(&pp->lock, flags);
   1301		}
   1302		set_current_state(TASK_RUNNING);
   1303		remove_wait_queue(&pp->wait, &wait);
   1304	}
   1305	spin_unlock_irqrestore(&pp->lock, flags);
   1306
   1307	spin_lock_irqsave(&smu_clist_lock, flags);
   1308	list_del(&pp->list);
   1309	spin_unlock_irqrestore(&smu_clist_lock, flags);
   1310	kfree(pp);
   1311
   1312	return 0;
   1313}
   1314
   1315
   1316static const struct file_operations smu_device_fops = {
   1317	.llseek		= no_llseek,
   1318	.read		= smu_read,
   1319	.write		= smu_write,
   1320	.poll		= smu_fpoll,
   1321	.open		= smu_open,
   1322	.release	= smu_release,
   1323};
   1324
   1325static struct miscdevice pmu_device = {
   1326	MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
   1327};
   1328
   1329static int smu_device_init(void)
   1330{
   1331	if (!smu)
   1332		return -ENODEV;
   1333	if (misc_register(&pmu_device) < 0)
   1334		printk(KERN_ERR "via-pmu: cannot register misc device.\n");
   1335	return 0;
   1336}
   1337device_initcall(smu_device_init);