cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

emif.c (35512B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * EMIF driver
      4 *
      5 * Copyright (C) 2012 Texas Instruments, Inc.
      6 *
      7 * Aneesh V <aneesh@ti.com>
      8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
      9 */
     10#include <linux/err.h>
     11#include <linux/kernel.h>
     12#include <linux/reboot.h>
     13#include <linux/platform_data/emif_plat.h>
     14#include <linux/io.h>
     15#include <linux/device.h>
     16#include <linux/platform_device.h>
     17#include <linux/interrupt.h>
     18#include <linux/slab.h>
     19#include <linux/of.h>
     20#include <linux/debugfs.h>
     21#include <linux/seq_file.h>
     22#include <linux/module.h>
     23#include <linux/list.h>
     24#include <linux/spinlock.h>
     25#include <linux/pm.h>
     26
     27#include "emif.h"
     28#include "jedec_ddr.h"
     29#include "of_memory.h"
     30
     31/**
     32 * struct emif_data - Per device static data for driver's use
     33 * @duplicate:			Whether the DDR devices attached to this EMIF
     34 *				instance are exactly same as that on EMIF1. In
     35 *				this case we can save some memory and processing
     36 * @temperature_level:		Maximum temperature of LPDDR2 devices attached
     37 *				to this EMIF - read from MR4 register. If there
     38 *				are two devices attached to this EMIF, this
     39 *				value is the maximum of the two temperature
     40 *				levels.
     41 * @node:			node in the device list
     42 * @base:			base address of memory-mapped IO registers.
     43 * @dev:			device pointer.
     44 * @regs_cache:			An array of 'struct emif_regs' that stores
     45 *				calculated register values for different
     46 *				frequencies, to avoid re-calculating them on
     47 *				each DVFS transition.
     48 * @curr_regs:			The set of register values used in the last
     49 *				frequency change (i.e. corresponding to the
     50 *				frequency in effect at the moment)
     51 * @plat_data:			Pointer to saved platform data.
     52 * @debugfs_root:		dentry to the root folder for EMIF in debugfs
     53 * @np_ddr:			Pointer to ddr device tree node
     54 */
     55struct emif_data {
     56	u8				duplicate;
     57	u8				temperature_level;
     58	u8				lpmode;
     59	struct list_head		node;
     60	unsigned long			irq_state;
     61	void __iomem			*base;
     62	struct device			*dev;
     63	struct emif_regs		*regs_cache[EMIF_MAX_NUM_FREQUENCIES];
     64	struct emif_regs		*curr_regs;
     65	struct emif_platform_data	*plat_data;
     66	struct dentry			*debugfs_root;
     67	struct device_node		*np_ddr;
     68};
     69
     70static struct emif_data *emif1;
     71static DEFINE_SPINLOCK(emif_lock);
     72static unsigned long	irq_state;
     73static LIST_HEAD(device_list);
     74
     75#ifdef CONFIG_DEBUG_FS
     76static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
     77	struct emif_regs *regs)
     78{
     79	u32 type = emif->plat_data->device_info->type;
     80	u32 ip_rev = emif->plat_data->ip_rev;
     81
     82	seq_printf(s, "EMIF register cache dump for %dMHz\n",
     83		regs->freq/1000000);
     84
     85	seq_printf(s, "ref_ctrl_shdw\t: 0x%08x\n", regs->ref_ctrl_shdw);
     86	seq_printf(s, "sdram_tim1_shdw\t: 0x%08x\n", regs->sdram_tim1_shdw);
     87	seq_printf(s, "sdram_tim2_shdw\t: 0x%08x\n", regs->sdram_tim2_shdw);
     88	seq_printf(s, "sdram_tim3_shdw\t: 0x%08x\n", regs->sdram_tim3_shdw);
     89
     90	if (ip_rev == EMIF_4D) {
     91		seq_printf(s, "read_idle_ctrl_shdw_normal\t: 0x%08x\n",
     92			regs->read_idle_ctrl_shdw_normal);
     93		seq_printf(s, "read_idle_ctrl_shdw_volt_ramp\t: 0x%08x\n",
     94			regs->read_idle_ctrl_shdw_volt_ramp);
     95	} else if (ip_rev == EMIF_4D5) {
     96		seq_printf(s, "dll_calib_ctrl_shdw_normal\t: 0x%08x\n",
     97			regs->dll_calib_ctrl_shdw_normal);
     98		seq_printf(s, "dll_calib_ctrl_shdw_volt_ramp\t: 0x%08x\n",
     99			regs->dll_calib_ctrl_shdw_volt_ramp);
    100	}
    101
    102	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) {
    103		seq_printf(s, "ref_ctrl_shdw_derated\t: 0x%08x\n",
    104			regs->ref_ctrl_shdw_derated);
    105		seq_printf(s, "sdram_tim1_shdw_derated\t: 0x%08x\n",
    106			regs->sdram_tim1_shdw_derated);
    107		seq_printf(s, "sdram_tim3_shdw_derated\t: 0x%08x\n",
    108			regs->sdram_tim3_shdw_derated);
    109	}
    110}
    111
    112static int emif_regdump_show(struct seq_file *s, void *unused)
    113{
    114	struct emif_data	*emif	= s->private;
    115	struct emif_regs	**regs_cache;
    116	int			i;
    117
    118	if (emif->duplicate)
    119		regs_cache = emif1->regs_cache;
    120	else
    121		regs_cache = emif->regs_cache;
    122
    123	for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) {
    124		do_emif_regdump_show(s, emif, regs_cache[i]);
    125		seq_putc(s, '\n');
    126	}
    127
    128	return 0;
    129}
    130
    131DEFINE_SHOW_ATTRIBUTE(emif_regdump);
    132
    133static int emif_mr4_show(struct seq_file *s, void *unused)
    134{
    135	struct emif_data *emif = s->private;
    136
    137	seq_printf(s, "MR4=%d\n", emif->temperature_level);
    138	return 0;
    139}
    140
    141DEFINE_SHOW_ATTRIBUTE(emif_mr4);
    142
    143static int __init_or_module emif_debugfs_init(struct emif_data *emif)
    144{
    145	emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
    146	debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
    147			    &emif_regdump_fops);
    148	debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
    149			    &emif_mr4_fops);
    150	return 0;
    151}
    152
    153static void __exit emif_debugfs_exit(struct emif_data *emif)
    154{
    155	debugfs_remove_recursive(emif->debugfs_root);
    156	emif->debugfs_root = NULL;
    157}
    158#else
    159static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
    160{
    161	return 0;
    162}
    163
    164static inline void __exit emif_debugfs_exit(struct emif_data *emif)
    165{
    166}
    167#endif
    168
    169/*
    170 * Get bus width used by EMIF. Note that this may be different from the
    171 * bus width of the DDR devices used. For instance two 16-bit DDR devices
    172 * may be connected to a given CS of EMIF. In this case bus width as far
    173 * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
    174 */
    175static u32 get_emif_bus_width(struct emif_data *emif)
    176{
    177	u32		width;
    178	void __iomem	*base = emif->base;
    179
    180	width = (readl(base + EMIF_SDRAM_CONFIG) & NARROW_MODE_MASK)
    181			>> NARROW_MODE_SHIFT;
    182	width = width == 0 ? 32 : 16;
    183
    184	return width;
    185}
    186
    187static void set_lpmode(struct emif_data *emif, u8 lpmode)
    188{
    189	u32 temp;
    190	void __iomem *base = emif->base;
    191
    192	/*
    193	 * Workaround for errata i743 - LPDDR2 Power-Down State is Not
    194	 * Efficient
    195	 *
    196	 * i743 DESCRIPTION:
    197	 * The EMIF supports power-down state for low power. The EMIF
    198	 * automatically puts the SDRAM into power-down after the memory is
    199	 * not accessed for a defined number of cycles and the
    200	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4.
    201	 * As the EMIF supports automatic output impedance calibration, a ZQ
    202	 * calibration long command is issued every time it exits active
    203	 * power-down and precharge power-down modes. The EMIF waits and
    204	 * blocks any other command during this calibration.
    205	 * The EMIF does not allow selective disabling of ZQ calibration upon
    206	 * exit of power-down mode. Due to very short periods of power-down
    207	 * cycles, ZQ calibration overhead creates bandwidth issues and
    208	 * increases overall system power consumption. On the other hand,
    209	 * issuing ZQ calibration long commands when exiting self-refresh is
    210	 * still required.
    211	 *
    212	 * WORKAROUND
    213	 * Because there is no power consumption benefit of the power-down due
    214	 * to the calibration and there is a performance risk, the guideline
    215	 * is to not allow power-down state and, therefore, to not have set
    216	 * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
    217	 */
    218	if ((emif->plat_data->ip_rev == EMIF_4D) &&
    219	    (lpmode == EMIF_LP_MODE_PWR_DN)) {
    220		WARN_ONCE(1,
    221			  "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n");
    222		/* rollback LP_MODE to Self-refresh mode */
    223		lpmode = EMIF_LP_MODE_SELF_REFRESH;
    224	}
    225
    226	temp = readl(base + EMIF_POWER_MANAGEMENT_CONTROL);
    227	temp &= ~LP_MODE_MASK;
    228	temp |= (lpmode << LP_MODE_SHIFT);
    229	writel(temp, base + EMIF_POWER_MANAGEMENT_CONTROL);
    230}
    231
    232static void do_freq_update(void)
    233{
    234	struct emif_data *emif;
    235
    236	/*
    237	 * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE
    238	 *
    239	 * i728 DESCRIPTION:
    240	 * The EMIF automatically puts the SDRAM into self-refresh mode
    241	 * after the EMIF has not performed accesses during
    242	 * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles
    243	 * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set
    244	 * to 0x2. If during a small window the following three events
    245	 * occur:
    246	 * - The SR_TIMING counter expires
    247	 * - And frequency change is requested
    248	 * - And OCP access is requested
    249	 * Then it causes instable clock on the DDR interface.
    250	 *
    251	 * WORKAROUND
    252	 * To avoid the occurrence of the three events, the workaround
    253	 * is to disable the self-refresh when requesting a frequency
    254	 * change. Before requesting a frequency change the software must
    255	 * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the
    256	 * frequency change has been done, the software can reprogram
    257	 * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
    258	 */
    259	list_for_each_entry(emif, &device_list, node) {
    260		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
    261			set_lpmode(emif, EMIF_LP_MODE_DISABLE);
    262	}
    263
    264	/*
    265	 * TODO: Do FREQ_UPDATE here when an API
    266	 * is available for this as part of the new
    267	 * clock framework
    268	 */
    269
    270	list_for_each_entry(emif, &device_list, node) {
    271		if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
    272			set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH);
    273	}
    274}
    275
    276/* Find addressing table entry based on the device's type and density */
    277static const struct lpddr2_addressing *get_addressing_table(
    278	const struct ddr_device_info *device_info)
    279{
    280	u32		index, type, density;
    281
    282	type = device_info->type;
    283	density = device_info->density;
    284
    285	switch (type) {
    286	case DDR_TYPE_LPDDR2_S4:
    287		index = density - 1;
    288		break;
    289	case DDR_TYPE_LPDDR2_S2:
    290		switch (density) {
    291		case DDR_DENSITY_1Gb:
    292		case DDR_DENSITY_2Gb:
    293			index = density + 3;
    294			break;
    295		default:
    296			index = density - 1;
    297		}
    298		break;
    299	default:
    300		return NULL;
    301	}
    302
    303	return &lpddr2_jedec_addressing_table[index];
    304}
    305
    306static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing,
    307		bool cs1_used, bool cal_resistors_per_cs)
    308{
    309	u32 zq = 0, val = 0;
    310
    311	val = EMIF_ZQCS_INTERVAL_US * 1000 / addressing->tREFI_ns;
    312	zq |= val << ZQ_REFINTERVAL_SHIFT;
    313
    314	val = DIV_ROUND_UP(T_ZQCL_DEFAULT_NS, T_ZQCS_DEFAULT_NS) - 1;
    315	zq |= val << ZQ_ZQCL_MULT_SHIFT;
    316
    317	val = DIV_ROUND_UP(T_ZQINIT_DEFAULT_NS, T_ZQCL_DEFAULT_NS) - 1;
    318	zq |= val << ZQ_ZQINIT_MULT_SHIFT;
    319
    320	zq |= ZQ_SFEXITEN_ENABLE << ZQ_SFEXITEN_SHIFT;
    321
    322	if (cal_resistors_per_cs)
    323		zq |= ZQ_DUALCALEN_ENABLE << ZQ_DUALCALEN_SHIFT;
    324	else
    325		zq |= ZQ_DUALCALEN_DISABLE << ZQ_DUALCALEN_SHIFT;
    326
    327	zq |= ZQ_CS0EN_MASK; /* CS0 is used for sure */
    328
    329	val = cs1_used ? 1 : 0;
    330	zq |= val << ZQ_CS1EN_SHIFT;
    331
    332	return zq;
    333}
    334
    335static u32 get_temp_alert_config(const struct lpddr2_addressing *addressing,
    336		const struct emif_custom_configs *custom_configs, bool cs1_used,
    337		u32 sdram_io_width, u32 emif_bus_width)
    338{
    339	u32 alert = 0, interval, devcnt;
    340
    341	if (custom_configs && (custom_configs->mask &
    342				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL))
    343		interval = custom_configs->temp_alert_poll_interval_ms;
    344	else
    345		interval = TEMP_ALERT_POLL_INTERVAL_DEFAULT_MS;
    346
    347	interval *= 1000000;			/* Convert to ns */
    348	interval /= addressing->tREFI_ns;	/* Convert to refresh cycles */
    349	alert |= (interval << TA_REFINTERVAL_SHIFT);
    350
    351	/*
    352	 * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width
    353	 * also to this form and subtract to get TA_DEVCNT, which is
    354	 * in log2(x) form.
    355	 */
    356	emif_bus_width = __fls(emif_bus_width) - 1;
    357	devcnt = emif_bus_width - sdram_io_width;
    358	alert |= devcnt << TA_DEVCNT_SHIFT;
    359
    360	/* DEVWDT is in 'log2(x) - 3' form */
    361	alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
    362
    363	alert |= 1 << TA_SFEXITEN_SHIFT;
    364	alert |= 1 << TA_CS0EN_SHIFT;
    365	alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT;
    366
    367	return alert;
    368}
    369
    370static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev)
    371{
    372	u32 pwr_mgmt_ctrl	= 0, timeout;
    373	u32 lpmode		= EMIF_LP_MODE_SELF_REFRESH;
    374	u32 timeout_perf	= EMIF_LP_MODE_TIMEOUT_PERFORMANCE;
    375	u32 timeout_pwr		= EMIF_LP_MODE_TIMEOUT_POWER;
    376	u32 freq_threshold	= EMIF_LP_MODE_FREQ_THRESHOLD;
    377	u32 mask;
    378	u8 shift;
    379
    380	struct emif_custom_configs *cust_cfgs = emif->plat_data->custom_configs;
    381
    382	if (cust_cfgs && (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE)) {
    383		lpmode		= cust_cfgs->lpmode;
    384		timeout_perf	= cust_cfgs->lpmode_timeout_performance;
    385		timeout_pwr	= cust_cfgs->lpmode_timeout_power;
    386		freq_threshold  = cust_cfgs->lpmode_freq_threshold;
    387	}
    388
    389	/* Timeout based on DDR frequency */
    390	timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
    391
    392	/*
    393	 * The value to be set in register is "log2(timeout) - 3"
    394	 * if timeout < 16 load 0 in register
    395	 * if timeout is not a power of 2, round to next highest power of 2
    396	 */
    397	if (timeout < 16) {
    398		timeout = 0;
    399	} else {
    400		if (timeout & (timeout - 1))
    401			timeout <<= 1;
    402		timeout = __fls(timeout) - 3;
    403	}
    404
    405	switch (lpmode) {
    406	case EMIF_LP_MODE_CLOCK_STOP:
    407		shift = CS_TIM_SHIFT;
    408		mask = CS_TIM_MASK;
    409		break;
    410	case EMIF_LP_MODE_SELF_REFRESH:
    411		/* Workaround for errata i735 */
    412		if (timeout < 6)
    413			timeout = 6;
    414
    415		shift = SR_TIM_SHIFT;
    416		mask = SR_TIM_MASK;
    417		break;
    418	case EMIF_LP_MODE_PWR_DN:
    419		shift = PD_TIM_SHIFT;
    420		mask = PD_TIM_MASK;
    421		break;
    422	case EMIF_LP_MODE_DISABLE:
    423	default:
    424		mask = 0;
    425		shift = 0;
    426		break;
    427	}
    428	/* Round to maximum in case of overflow, BUT warn! */
    429	if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
    430		pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
    431		       lpmode,
    432		       timeout_perf,
    433		       timeout_pwr,
    434		       freq_threshold);
    435		WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
    436		     timeout, mask >> shift);
    437		timeout = mask >> shift;
    438	}
    439
    440	/* Setup required timing */
    441	pwr_mgmt_ctrl = (timeout << shift) & mask;
    442	/* setup a default mask for rest of the modes */
    443	pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
    444			  ~mask;
    445
    446	/* No CS_TIM in EMIF_4D5 */
    447	if (ip_rev == EMIF_4D5)
    448		pwr_mgmt_ctrl &= ~CS_TIM_MASK;
    449
    450	pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
    451
    452	return pwr_mgmt_ctrl;
    453}
    454
    455/*
    456 * Get the temperature level of the EMIF instance:
    457 * Reads the MR4 register of attached SDRAM parts to find out the temperature
    458 * level. If there are two parts attached(one on each CS), then the temperature
    459 * level for the EMIF instance is the higher of the two temperatures.
    460 */
    461static void get_temperature_level(struct emif_data *emif)
    462{
    463	u32		temp, temperature_level;
    464	void __iomem	*base;
    465
    466	base = emif->base;
    467
    468	/* Read mode register 4 */
    469	writel(DDR_MR4, base + EMIF_LPDDR2_MODE_REG_CONFIG);
    470	temperature_level = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
    471	temperature_level = (temperature_level & MR4_SDRAM_REF_RATE_MASK) >>
    472				MR4_SDRAM_REF_RATE_SHIFT;
    473
    474	if (emif->plat_data->device_info->cs1_used) {
    475		writel(DDR_MR4 | CS_MASK, base + EMIF_LPDDR2_MODE_REG_CONFIG);
    476		temp = readl(base + EMIF_LPDDR2_MODE_REG_DATA);
    477		temp = (temp & MR4_SDRAM_REF_RATE_MASK)
    478				>> MR4_SDRAM_REF_RATE_SHIFT;
    479		temperature_level = max(temp, temperature_level);
    480	}
    481
    482	/* treat everything less than nominal(3) in MR4 as nominal */
    483	if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
    484		temperature_level = SDRAM_TEMP_NOMINAL;
    485
    486	/* if we get reserved value in MR4 persist with the existing value */
    487	if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
    488		emif->temperature_level = temperature_level;
    489}
    490
    491/*
    492 * setup_temperature_sensitive_regs() - set the timings for temperature
    493 * sensitive registers. This happens once at initialisation time based
    494 * on the temperature at boot time and subsequently based on the temperature
    495 * alert interrupt. Temperature alert can happen when the temperature
    496 * increases or drops. So this function can have the effect of either
    497 * derating the timings or going back to nominal values.
    498 */
    499static void setup_temperature_sensitive_regs(struct emif_data *emif,
    500		struct emif_regs *regs)
    501{
    502	u32		tim1, tim3, ref_ctrl, type;
    503	void __iomem	*base = emif->base;
    504	u32		temperature;
    505
    506	type = emif->plat_data->device_info->type;
    507
    508	tim1 = regs->sdram_tim1_shdw;
    509	tim3 = regs->sdram_tim3_shdw;
    510	ref_ctrl = regs->ref_ctrl_shdw;
    511
    512	/* No de-rating for non-lpddr2 devices */
    513	if (type != DDR_TYPE_LPDDR2_S2 && type != DDR_TYPE_LPDDR2_S4)
    514		goto out;
    515
    516	temperature = emif->temperature_level;
    517	if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH) {
    518		ref_ctrl = regs->ref_ctrl_shdw_derated;
    519	} else if (temperature == SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS) {
    520		tim1 = regs->sdram_tim1_shdw_derated;
    521		tim3 = regs->sdram_tim3_shdw_derated;
    522		ref_ctrl = regs->ref_ctrl_shdw_derated;
    523	}
    524
    525out:
    526	writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
    527	writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
    528	writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
    529}
    530
    531static irqreturn_t handle_temp_alert(void __iomem *base, struct emif_data *emif)
    532{
    533	u32		old_temp_level;
    534	irqreturn_t	ret = IRQ_HANDLED;
    535	struct emif_custom_configs *custom_configs;
    536
    537	spin_lock_irqsave(&emif_lock, irq_state);
    538	old_temp_level = emif->temperature_level;
    539	get_temperature_level(emif);
    540
    541	if (unlikely(emif->temperature_level == old_temp_level)) {
    542		goto out;
    543	} else if (!emif->curr_regs) {
    544		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
    545		goto out;
    546	}
    547
    548	custom_configs = emif->plat_data->custom_configs;
    549
    550	/*
    551	 * IF we detect higher than "nominal rating" from DDR sensor
    552	 * on an unsupported DDR part, shutdown system
    553	 */
    554	if (custom_configs && !(custom_configs->mask &
    555				EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) {
    556		if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
    557			dev_err(emif->dev,
    558				"%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
    559				__func__, emif->temperature_level);
    560			/*
    561			 * Temperature far too high - do kernel_power_off()
    562			 * from thread context
    563			 */
    564			emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN;
    565			ret = IRQ_WAKE_THREAD;
    566			goto out;
    567		}
    568	}
    569
    570	if (emif->temperature_level < old_temp_level ||
    571		emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
    572		/*
    573		 * Temperature coming down - defer handling to thread OR
    574		 * Temperature far too high - do kernel_power_off() from
    575		 * thread context
    576		 */
    577		ret = IRQ_WAKE_THREAD;
    578	} else {
    579		/* Temperature is going up - handle immediately */
    580		setup_temperature_sensitive_regs(emif, emif->curr_regs);
    581		do_freq_update();
    582	}
    583
    584out:
    585	spin_unlock_irqrestore(&emif_lock, irq_state);
    586	return ret;
    587}
    588
    589static irqreturn_t emif_interrupt_handler(int irq, void *dev_id)
    590{
    591	u32			interrupts;
    592	struct emif_data	*emif = dev_id;
    593	void __iomem		*base = emif->base;
    594	struct device		*dev = emif->dev;
    595	irqreturn_t		ret = IRQ_HANDLED;
    596
    597	/* Save the status and clear it */
    598	interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
    599	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
    600
    601	/*
    602	 * Handle temperature alert
    603	 * Temperature alert should be same for all ports
    604	 * So, it's enough to process it only for one of the ports
    605	 */
    606	if (interrupts & TA_SYS_MASK)
    607		ret = handle_temp_alert(base, emif);
    608
    609	if (interrupts & ERR_SYS_MASK)
    610		dev_err(dev, "Access error from SYS port - %x\n", interrupts);
    611
    612	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
    613		/* Save the status and clear it */
    614		interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
    615		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
    616
    617		if (interrupts & ERR_LL_MASK)
    618			dev_err(dev, "Access error from LL port - %x\n",
    619				interrupts);
    620	}
    621
    622	return ret;
    623}
    624
    625static irqreturn_t emif_threaded_isr(int irq, void *dev_id)
    626{
    627	struct emif_data	*emif = dev_id;
    628
    629	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
    630		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
    631
    632		/* If we have Power OFF ability, use it, else try restarting */
    633		if (kernel_can_power_off()) {
    634			kernel_power_off();
    635		} else {
    636			WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
    637			kernel_restart("SDRAM Over-temp Emergency restart");
    638		}
    639		return IRQ_HANDLED;
    640	}
    641
    642	spin_lock_irqsave(&emif_lock, irq_state);
    643
    644	if (emif->curr_regs) {
    645		setup_temperature_sensitive_regs(emif, emif->curr_regs);
    646		do_freq_update();
    647	} else {
    648		dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
    649	}
    650
    651	spin_unlock_irqrestore(&emif_lock, irq_state);
    652
    653	return IRQ_HANDLED;
    654}
    655
    656static void clear_all_interrupts(struct emif_data *emif)
    657{
    658	void __iomem	*base = emif->base;
    659
    660	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS),
    661		base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
    662	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
    663		writel(readl(base + EMIF_LL_OCP_INTERRUPT_STATUS),
    664			base + EMIF_LL_OCP_INTERRUPT_STATUS);
    665}
    666
    667static void disable_and_clear_all_interrupts(struct emif_data *emif)
    668{
    669	void __iomem		*base = emif->base;
    670
    671	/* Disable all interrupts */
    672	writel(readl(base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET),
    673		base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_CLEAR);
    674	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE)
    675		writel(readl(base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET),
    676			base + EMIF_LL_OCP_INTERRUPT_ENABLE_CLEAR);
    677
    678	/* Clear all interrupts */
    679	clear_all_interrupts(emif);
    680}
    681
    682static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
    683{
    684	u32		interrupts, type;
    685	void __iomem	*base = emif->base;
    686
    687	type = emif->plat_data->device_info->type;
    688
    689	clear_all_interrupts(emif);
    690
    691	/* Enable interrupts for SYS interface */
    692	interrupts = EN_ERR_SYS_MASK;
    693	if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
    694		interrupts |= EN_TA_SYS_MASK;
    695	writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
    696
    697	/* Enable interrupts for LL interface */
    698	if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) {
    699		/* TA need not be enabled for LL */
    700		interrupts = EN_ERR_LL_MASK;
    701		writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
    702	}
    703
    704	/* setup IRQ handlers */
    705	return devm_request_threaded_irq(emif->dev, irq,
    706				    emif_interrupt_handler,
    707				    emif_threaded_isr,
    708				    0, dev_name(emif->dev),
    709				    emif);
    710
    711}
    712
    713static void __init_or_module emif_onetime_settings(struct emif_data *emif)
    714{
    715	u32				pwr_mgmt_ctrl, zq, temp_alert_cfg;
    716	void __iomem			*base = emif->base;
    717	const struct lpddr2_addressing	*addressing;
    718	const struct ddr_device_info	*device_info;
    719
    720	device_info = emif->plat_data->device_info;
    721	addressing = get_addressing_table(device_info);
    722
    723	/*
    724	 * Init power management settings
    725	 * We don't know the frequency yet. Use a high frequency
    726	 * value for a conservative timeout setting
    727	 */
    728	pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
    729			emif->plat_data->ip_rev);
    730	emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
    731	writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
    732
    733	/* Init ZQ calibration settings */
    734	zq = get_zq_config_reg(addressing, device_info->cs1_used,
    735		device_info->cal_resistors_per_cs);
    736	writel(zq, base + EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG);
    737
    738	/* Check temperature level temperature level*/
    739	get_temperature_level(emif);
    740	if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
    741		dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
    742
    743	/* Init temperature polling */
    744	temp_alert_cfg = get_temp_alert_config(addressing,
    745		emif->plat_data->custom_configs, device_info->cs1_used,
    746		device_info->io_width, get_emif_bus_width(emif));
    747	writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
    748
    749	/*
    750	 * Program external PHY control registers that are not frequency
    751	 * dependent
    752	 */
    753	if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY)
    754		return;
    755	writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
    756	writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
    757	writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
    758	writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
    759	writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
    760	writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
    761	writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
    762	writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
    763	writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
    764	writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
    765	writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
    766	writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
    767	writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
    768	writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
    769	writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
    770	writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
    771	writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
    772	writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
    773	writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
    774	writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
    775	writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
    776}
    777
    778static void get_default_timings(struct emif_data *emif)
    779{
    780	struct emif_platform_data *pd = emif->plat_data;
    781
    782	pd->timings		= lpddr2_jedec_timings;
    783	pd->timings_arr_size	= ARRAY_SIZE(lpddr2_jedec_timings);
    784
    785	dev_warn(emif->dev, "%s: using default timings\n", __func__);
    786}
    787
    788static int is_dev_data_valid(u32 type, u32 density, u32 io_width, u32 phy_type,
    789		u32 ip_rev, struct device *dev)
    790{
    791	int valid;
    792
    793	valid = (type == DDR_TYPE_LPDDR2_S4 ||
    794			type == DDR_TYPE_LPDDR2_S2)
    795		&& (density >= DDR_DENSITY_64Mb
    796			&& density <= DDR_DENSITY_8Gb)
    797		&& (io_width >= DDR_IO_WIDTH_8
    798			&& io_width <= DDR_IO_WIDTH_32);
    799
    800	/* Combinations of EMIF and PHY revisions that we support today */
    801	switch (ip_rev) {
    802	case EMIF_4D:
    803		valid = valid && (phy_type == EMIF_PHY_TYPE_ATTILAPHY);
    804		break;
    805	case EMIF_4D5:
    806		valid = valid && (phy_type == EMIF_PHY_TYPE_INTELLIPHY);
    807		break;
    808	default:
    809		valid = 0;
    810	}
    811
    812	if (!valid)
    813		dev_err(dev, "%s: invalid DDR details\n", __func__);
    814	return valid;
    815}
    816
    817static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
    818		struct device *dev)
    819{
    820	int valid = 1;
    821
    822	if ((cust_cfgs->mask & EMIF_CUSTOM_CONFIG_LPMODE) &&
    823		(cust_cfgs->lpmode != EMIF_LP_MODE_DISABLE))
    824		valid = cust_cfgs->lpmode_freq_threshold &&
    825			cust_cfgs->lpmode_timeout_performance &&
    826			cust_cfgs->lpmode_timeout_power;
    827
    828	if (cust_cfgs->mask & EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL)
    829		valid = valid && cust_cfgs->temp_alert_poll_interval_ms;
    830
    831	if (!valid)
    832		dev_warn(dev, "%s: invalid custom configs\n", __func__);
    833
    834	return valid;
    835}
    836
    837#if defined(CONFIG_OF)
    838static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
    839		struct emif_data *emif)
    840{
    841	struct emif_custom_configs	*cust_cfgs = NULL;
    842	int				len;
    843	const __be32			*lpmode, *poll_intvl;
    844
    845	lpmode = of_get_property(np_emif, "low-power-mode", &len);
    846	poll_intvl = of_get_property(np_emif, "temp-alert-poll-interval", &len);
    847
    848	if (lpmode || poll_intvl)
    849		cust_cfgs = devm_kzalloc(emif->dev, sizeof(*cust_cfgs),
    850			GFP_KERNEL);
    851
    852	if (!cust_cfgs)
    853		return;
    854
    855	if (lpmode) {
    856		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_LPMODE;
    857		cust_cfgs->lpmode = be32_to_cpup(lpmode);
    858		of_property_read_u32(np_emif,
    859				"low-power-mode-timeout-performance",
    860				&cust_cfgs->lpmode_timeout_performance);
    861		of_property_read_u32(np_emif,
    862				"low-power-mode-timeout-power",
    863				&cust_cfgs->lpmode_timeout_power);
    864		of_property_read_u32(np_emif,
    865				"low-power-mode-freq-threshold",
    866				&cust_cfgs->lpmode_freq_threshold);
    867	}
    868
    869	if (poll_intvl) {
    870		cust_cfgs->mask |=
    871				EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL;
    872		cust_cfgs->temp_alert_poll_interval_ms =
    873						be32_to_cpup(poll_intvl);
    874	}
    875
    876	if (of_find_property(np_emif, "extended-temp-part", &len))
    877		cust_cfgs->mask |= EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART;
    878
    879	if (!is_custom_config_valid(cust_cfgs, emif->dev)) {
    880		devm_kfree(emif->dev, cust_cfgs);
    881		return;
    882	}
    883
    884	emif->plat_data->custom_configs = cust_cfgs;
    885}
    886
    887static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
    888		struct device_node *np_ddr,
    889		struct ddr_device_info *dev_info)
    890{
    891	u32 density = 0, io_width = 0;
    892	int len;
    893
    894	if (of_find_property(np_emif, "cs1-used", &len))
    895		dev_info->cs1_used = true;
    896
    897	if (of_find_property(np_emif, "cal-resistor-per-cs", &len))
    898		dev_info->cal_resistors_per_cs = true;
    899
    900	if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s4"))
    901		dev_info->type = DDR_TYPE_LPDDR2_S4;
    902	else if (of_device_is_compatible(np_ddr, "jedec,lpddr2-s2"))
    903		dev_info->type = DDR_TYPE_LPDDR2_S2;
    904
    905	of_property_read_u32(np_ddr, "density", &density);
    906	of_property_read_u32(np_ddr, "io-width", &io_width);
    907
    908	/* Convert from density in Mb to the density encoding in jedc_ddr.h */
    909	if (density & (density - 1))
    910		dev_info->density = 0;
    911	else
    912		dev_info->density = __fls(density) - 5;
    913
    914	/* Convert from io_width in bits to io_width encoding in jedc_ddr.h */
    915	if (io_width & (io_width - 1))
    916		dev_info->io_width = 0;
    917	else
    918		dev_info->io_width = __fls(io_width) - 1;
    919}
    920
    921static struct emif_data * __init_or_module of_get_memory_device_details(
    922		struct device_node *np_emif, struct device *dev)
    923{
    924	struct emif_data		*emif = NULL;
    925	struct ddr_device_info		*dev_info = NULL;
    926	struct emif_platform_data	*pd = NULL;
    927	struct device_node		*np_ddr;
    928	int				len;
    929
    930	np_ddr = of_parse_phandle(np_emif, "device-handle", 0);
    931	if (!np_ddr)
    932		goto error;
    933	emif	= devm_kzalloc(dev, sizeof(struct emif_data), GFP_KERNEL);
    934	pd	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
    935	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
    936
    937	if (!emif || !pd || !dev_info) {
    938		dev_err(dev, "%s: Out of memory!!\n",
    939			__func__);
    940		goto error;
    941	}
    942
    943	emif->plat_data		= pd;
    944	pd->device_info		= dev_info;
    945	emif->dev		= dev;
    946	emif->np_ddr		= np_ddr;
    947	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
    948
    949	if (of_device_is_compatible(np_emif, "ti,emif-4d"))
    950		emif->plat_data->ip_rev = EMIF_4D;
    951	else if (of_device_is_compatible(np_emif, "ti,emif-4d5"))
    952		emif->plat_data->ip_rev = EMIF_4D5;
    953
    954	of_property_read_u32(np_emif, "phy-type", &pd->phy_type);
    955
    956	if (of_find_property(np_emif, "hw-caps-ll-interface", &len))
    957		pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
    958
    959	of_get_ddr_info(np_emif, np_ddr, dev_info);
    960	if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
    961			pd->device_info->io_width, pd->phy_type, pd->ip_rev,
    962			emif->dev)) {
    963		dev_err(dev, "%s: invalid device data!!\n", __func__);
    964		goto error;
    965	}
    966	/*
    967	 * For EMIF instances other than EMIF1 see if the devices connected
    968	 * are exactly same as on EMIF1(which is typically the case). If so,
    969	 * mark it as a duplicate of EMIF1. This will save some memory and
    970	 * computation.
    971	 */
    972	if (emif1 && emif1->np_ddr == np_ddr) {
    973		emif->duplicate = true;
    974		goto out;
    975	} else if (emif1) {
    976		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
    977			__func__);
    978	}
    979
    980	of_get_custom_configs(np_emif, emif);
    981	emif->plat_data->timings = of_get_ddr_timings(np_ddr, emif->dev,
    982					emif->plat_data->device_info->type,
    983					&emif->plat_data->timings_arr_size);
    984
    985	emif->plat_data->min_tck = of_get_min_tck(np_ddr, emif->dev);
    986	goto out;
    987
    988error:
    989	return NULL;
    990out:
    991	return emif;
    992}
    993
    994#else
    995
    996static struct emif_data * __init_or_module of_get_memory_device_details(
    997		struct device_node *np_emif, struct device *dev)
    998{
    999	return NULL;
   1000}
   1001#endif
   1002
   1003static struct emif_data *__init_or_module get_device_details(
   1004		struct platform_device *pdev)
   1005{
   1006	u32				size;
   1007	struct emif_data		*emif = NULL;
   1008	struct ddr_device_info		*dev_info;
   1009	struct emif_custom_configs	*cust_cfgs;
   1010	struct emif_platform_data	*pd;
   1011	struct device			*dev;
   1012	void				*temp;
   1013
   1014	pd = pdev->dev.platform_data;
   1015	dev = &pdev->dev;
   1016
   1017	if (!(pd && pd->device_info && is_dev_data_valid(pd->device_info->type,
   1018			pd->device_info->density, pd->device_info->io_width,
   1019			pd->phy_type, pd->ip_rev, dev))) {
   1020		dev_err(dev, "%s: invalid device data\n", __func__);
   1021		goto error;
   1022	}
   1023
   1024	emif	= devm_kzalloc(dev, sizeof(*emif), GFP_KERNEL);
   1025	temp	= devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
   1026	dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
   1027
   1028	if (!emif || !temp || !dev_info)
   1029		goto error;
   1030
   1031	memcpy(temp, pd, sizeof(*pd));
   1032	pd = temp;
   1033	memcpy(dev_info, pd->device_info, sizeof(*dev_info));
   1034
   1035	pd->device_info		= dev_info;
   1036	emif->plat_data		= pd;
   1037	emif->dev		= dev;
   1038	emif->temperature_level	= SDRAM_TEMP_NOMINAL;
   1039
   1040	/*
   1041	 * For EMIF instances other than EMIF1 see if the devices connected
   1042	 * are exactly same as on EMIF1(which is typically the case). If so,
   1043	 * mark it as a duplicate of EMIF1 and skip copying timings data.
   1044	 * This will save some memory and some computation later.
   1045	 */
   1046	emif->duplicate = emif1 && (memcmp(dev_info,
   1047		emif1->plat_data->device_info,
   1048		sizeof(struct ddr_device_info)) == 0);
   1049
   1050	if (emif->duplicate) {
   1051		pd->timings = NULL;
   1052		pd->min_tck = NULL;
   1053		goto out;
   1054	} else if (emif1) {
   1055		dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
   1056			__func__);
   1057	}
   1058
   1059	/*
   1060	 * Copy custom configs - ignore allocation error, if any, as
   1061	 * custom_configs is not very critical
   1062	 */
   1063	cust_cfgs = pd->custom_configs;
   1064	if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
   1065		temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
   1066		if (temp)
   1067			memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
   1068		pd->custom_configs = temp;
   1069	}
   1070
   1071	/*
   1072	 * Copy timings and min-tck values from platform data. If it is not
   1073	 * available or if memory allocation fails, use JEDEC defaults
   1074	 */
   1075	size = sizeof(struct lpddr2_timings) * pd->timings_arr_size;
   1076	if (pd->timings) {
   1077		temp = devm_kzalloc(dev, size, GFP_KERNEL);
   1078		if (temp) {
   1079			memcpy(temp, pd->timings, size);
   1080			pd->timings = temp;
   1081		} else {
   1082			get_default_timings(emif);
   1083		}
   1084	} else {
   1085		get_default_timings(emif);
   1086	}
   1087
   1088	if (pd->min_tck) {
   1089		temp = devm_kzalloc(dev, sizeof(*pd->min_tck), GFP_KERNEL);
   1090		if (temp) {
   1091			memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
   1092			pd->min_tck = temp;
   1093		} else {
   1094			pd->min_tck = &lpddr2_jedec_min_tck;
   1095		}
   1096	} else {
   1097		pd->min_tck = &lpddr2_jedec_min_tck;
   1098	}
   1099
   1100out:
   1101	return emif;
   1102
   1103error:
   1104	return NULL;
   1105}
   1106
   1107static int __init_or_module emif_probe(struct platform_device *pdev)
   1108{
   1109	struct emif_data	*emif;
   1110	int			irq, ret;
   1111
   1112	if (pdev->dev.of_node)
   1113		emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev);
   1114	else
   1115		emif = get_device_details(pdev);
   1116
   1117	if (!emif) {
   1118		pr_err("%s: error getting device data\n", __func__);
   1119		goto error;
   1120	}
   1121
   1122	list_add(&emif->node, &device_list);
   1123
   1124	/* Save pointers to each other in emif and device structures */
   1125	emif->dev = &pdev->dev;
   1126	platform_set_drvdata(pdev, emif);
   1127
   1128	emif->base = devm_platform_ioremap_resource(pdev, 0);
   1129	if (IS_ERR(emif->base))
   1130		goto error;
   1131
   1132	irq = platform_get_irq(pdev, 0);
   1133	if (irq < 0)
   1134		goto error;
   1135
   1136	emif_onetime_settings(emif);
   1137	emif_debugfs_init(emif);
   1138	disable_and_clear_all_interrupts(emif);
   1139	ret = setup_interrupts(emif, irq);
   1140	if (ret)
   1141		goto error;
   1142
   1143	/* One-time actions taken on probing the first device */
   1144	if (!emif1) {
   1145		emif1 = emif;
   1146
   1147		/*
   1148		 * TODO: register notifiers for frequency and voltage
   1149		 * change here once the respective frameworks are
   1150		 * available
   1151		 */
   1152	}
   1153
   1154	dev_info(&pdev->dev, "%s: device configured with addr = %p and IRQ%d\n",
   1155		__func__, emif->base, irq);
   1156
   1157	return 0;
   1158error:
   1159	return -ENODEV;
   1160}
   1161
   1162static int __exit emif_remove(struct platform_device *pdev)
   1163{
   1164	struct emif_data *emif = platform_get_drvdata(pdev);
   1165
   1166	emif_debugfs_exit(emif);
   1167
   1168	return 0;
   1169}
   1170
   1171static void emif_shutdown(struct platform_device *pdev)
   1172{
   1173	struct emif_data	*emif = platform_get_drvdata(pdev);
   1174
   1175	disable_and_clear_all_interrupts(emif);
   1176}
   1177
   1178#if defined(CONFIG_OF)
   1179static const struct of_device_id emif_of_match[] = {
   1180		{ .compatible = "ti,emif-4d" },
   1181		{ .compatible = "ti,emif-4d5" },
   1182		{},
   1183};
   1184MODULE_DEVICE_TABLE(of, emif_of_match);
   1185#endif
   1186
   1187static struct platform_driver emif_driver = {
   1188	.remove		= __exit_p(emif_remove),
   1189	.shutdown	= emif_shutdown,
   1190	.driver = {
   1191		.name = "emif",
   1192		.of_match_table = of_match_ptr(emif_of_match),
   1193	},
   1194};
   1195
   1196module_platform_driver_probe(emif_driver, emif_probe);
   1197
   1198MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
   1199MODULE_LICENSE("GPL");
   1200MODULE_ALIAS("platform:emif");
   1201MODULE_AUTHOR("Texas Instruments Inc");