cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpr.c (44742B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
      4 * Copyright (c) 2019, Linaro Limited
      5 */
      6
      7#include <linux/module.h>
      8#include <linux/err.h>
      9#include <linux/debugfs.h>
     10#include <linux/string.h>
     11#include <linux/kernel.h>
     12#include <linux/list.h>
     13#include <linux/init.h>
     14#include <linux/io.h>
     15#include <linux/bitops.h>
     16#include <linux/slab.h>
     17#include <linux/of.h>
     18#include <linux/of_device.h>
     19#include <linux/platform_device.h>
     20#include <linux/pm_domain.h>
     21#include <linux/pm_opp.h>
     22#include <linux/interrupt.h>
     23#include <linux/regmap.h>
     24#include <linux/mfd/syscon.h>
     25#include <linux/regulator/consumer.h>
     26#include <linux/clk.h>
     27#include <linux/nvmem-consumer.h>
     28
     29/* Register Offsets for RB-CPR and Bit Definitions */
     30
     31/* RBCPR Version Register */
     32#define REG_RBCPR_VERSION		0
     33#define RBCPR_VER_2			0x02
     34#define FLAGS_IGNORE_1ST_IRQ_STATUS	BIT(0)
     35
     36/* RBCPR Gate Count and Target Registers */
     37#define REG_RBCPR_GCNT_TARGET(n)	(0x60 + 4 * (n))
     38
     39#define RBCPR_GCNT_TARGET_TARGET_SHIFT	0
     40#define RBCPR_GCNT_TARGET_TARGET_MASK	GENMASK(11, 0)
     41#define RBCPR_GCNT_TARGET_GCNT_SHIFT	12
     42#define RBCPR_GCNT_TARGET_GCNT_MASK	GENMASK(9, 0)
     43
     44/* RBCPR Timer Control */
     45#define REG_RBCPR_TIMER_INTERVAL	0x44
     46#define REG_RBIF_TIMER_ADJUST		0x4c
     47
     48#define RBIF_TIMER_ADJ_CONS_UP_MASK	GENMASK(3, 0)
     49#define RBIF_TIMER_ADJ_CONS_UP_SHIFT	0
     50#define RBIF_TIMER_ADJ_CONS_DOWN_MASK	GENMASK(3, 0)
     51#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT	4
     52#define RBIF_TIMER_ADJ_CLAMP_INT_MASK	GENMASK(7, 0)
     53#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT	8
     54
     55/* RBCPR Config Register */
     56#define REG_RBIF_LIMIT			0x48
     57#define RBIF_LIMIT_CEILING_MASK		GENMASK(5, 0)
     58#define RBIF_LIMIT_CEILING_SHIFT	6
     59#define RBIF_LIMIT_FLOOR_BITS		6
     60#define RBIF_LIMIT_FLOOR_MASK		GENMASK(5, 0)
     61
     62#define RBIF_LIMIT_CEILING_DEFAULT	RBIF_LIMIT_CEILING_MASK
     63#define RBIF_LIMIT_FLOOR_DEFAULT	0
     64
     65#define REG_RBIF_SW_VLEVEL		0x94
     66#define RBIF_SW_VLEVEL_DEFAULT		0x20
     67
     68#define REG_RBCPR_STEP_QUOT		0x80
     69#define RBCPR_STEP_QUOT_STEPQUOT_MASK	GENMASK(7, 0)
     70#define RBCPR_STEP_QUOT_IDLE_CLK_MASK	GENMASK(3, 0)
     71#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT	8
     72
     73/* RBCPR Control Register */
     74#define REG_RBCPR_CTL			0x90
     75
     76#define RBCPR_CTL_LOOP_EN			BIT(0)
     77#define RBCPR_CTL_TIMER_EN			BIT(3)
     78#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN		BIT(5)
     79#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN	BIT(6)
     80#define RBCPR_CTL_COUNT_MODE			BIT(10)
     81#define RBCPR_CTL_UP_THRESHOLD_MASK	GENMASK(3, 0)
     82#define RBCPR_CTL_UP_THRESHOLD_SHIFT	24
     83#define RBCPR_CTL_DN_THRESHOLD_MASK	GENMASK(3, 0)
     84#define RBCPR_CTL_DN_THRESHOLD_SHIFT	28
     85
     86/* RBCPR Ack/Nack Response */
     87#define REG_RBIF_CONT_ACK_CMD		0x98
     88#define REG_RBIF_CONT_NACK_CMD		0x9c
     89
     90/* RBCPR Result status Register */
     91#define REG_RBCPR_RESULT_0		0xa0
     92
     93#define RBCPR_RESULT0_BUSY_SHIFT	19
     94#define RBCPR_RESULT0_BUSY_MASK		BIT(RBCPR_RESULT0_BUSY_SHIFT)
     95#define RBCPR_RESULT0_ERROR_LT0_SHIFT	18
     96#define RBCPR_RESULT0_ERROR_SHIFT	6
     97#define RBCPR_RESULT0_ERROR_MASK	GENMASK(11, 0)
     98#define RBCPR_RESULT0_ERROR_STEPS_SHIFT	2
     99#define RBCPR_RESULT0_ERROR_STEPS_MASK	GENMASK(3, 0)
    100#define RBCPR_RESULT0_STEP_UP_SHIFT	1
    101
    102/* RBCPR Interrupt Control Register */
    103#define REG_RBIF_IRQ_EN(n)		(0x100 + 4 * (n))
    104#define REG_RBIF_IRQ_CLEAR		0x110
    105#define REG_RBIF_IRQ_STATUS		0x114
    106
    107#define CPR_INT_DONE		BIT(0)
    108#define CPR_INT_MIN		BIT(1)
    109#define CPR_INT_DOWN		BIT(2)
    110#define CPR_INT_MID		BIT(3)
    111#define CPR_INT_UP		BIT(4)
    112#define CPR_INT_MAX		BIT(5)
    113#define CPR_INT_CLAMP		BIT(6)
    114#define CPR_INT_ALL	(CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
    115			CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
    116#define CPR_INT_DEFAULT	(CPR_INT_UP | CPR_INT_DOWN)
    117
    118#define CPR_NUM_RING_OSC	8
    119
    120/* CPR eFuse parameters */
    121#define CPR_FUSE_TARGET_QUOT_BITS_MASK	GENMASK(11, 0)
    122
    123#define CPR_FUSE_MIN_QUOT_DIFF		50
    124
    125#define FUSE_REVISION_UNKNOWN		(-1)
    126
    127enum voltage_change_dir {
    128	NO_CHANGE,
    129	DOWN,
    130	UP,
    131};
    132
    133struct cpr_fuse {
    134	char *ring_osc;
    135	char *init_voltage;
    136	char *quotient;
    137	char *quotient_offset;
    138};
    139
    140struct fuse_corner_data {
    141	int ref_uV;
    142	int max_uV;
    143	int min_uV;
    144	int max_volt_scale;
    145	int max_quot_scale;
    146	/* fuse quot */
    147	int quot_offset;
    148	int quot_scale;
    149	int quot_adjust;
    150	/* fuse quot_offset */
    151	int quot_offset_scale;
    152	int quot_offset_adjust;
    153};
    154
    155struct cpr_fuses {
    156	int init_voltage_step;
    157	int init_voltage_width;
    158	struct fuse_corner_data *fuse_corner_data;
    159};
    160
    161struct corner_data {
    162	unsigned int fuse_corner;
    163	unsigned long freq;
    164};
    165
    166struct cpr_desc {
    167	unsigned int num_fuse_corners;
    168	int min_diff_quot;
    169	int *step_quot;
    170
    171	unsigned int		timer_delay_us;
    172	unsigned int		timer_cons_up;
    173	unsigned int		timer_cons_down;
    174	unsigned int		up_threshold;
    175	unsigned int		down_threshold;
    176	unsigned int		idle_clocks;
    177	unsigned int		gcnt_us;
    178	unsigned int		vdd_apc_step_up_limit;
    179	unsigned int		vdd_apc_step_down_limit;
    180	unsigned int		clamp_timer_interval;
    181
    182	struct cpr_fuses cpr_fuses;
    183	bool reduce_to_fuse_uV;
    184	bool reduce_to_corner_uV;
    185};
    186
    187struct acc_desc {
    188	unsigned int	enable_reg;
    189	u32		enable_mask;
    190
    191	struct reg_sequence	*config;
    192	struct reg_sequence	*settings;
    193	int			num_regs_per_fuse;
    194};
    195
    196struct cpr_acc_desc {
    197	const struct cpr_desc *cpr_desc;
    198	const struct acc_desc *acc_desc;
    199};
    200
    201struct fuse_corner {
    202	int min_uV;
    203	int max_uV;
    204	int uV;
    205	int quot;
    206	int step_quot;
    207	const struct reg_sequence *accs;
    208	int num_accs;
    209	unsigned long max_freq;
    210	u8 ring_osc_idx;
    211};
    212
    213struct corner {
    214	int min_uV;
    215	int max_uV;
    216	int uV;
    217	int last_uV;
    218	int quot_adjust;
    219	u32 save_ctl;
    220	u32 save_irq;
    221	unsigned long freq;
    222	struct fuse_corner *fuse_corner;
    223};
    224
    225struct cpr_drv {
    226	unsigned int		num_corners;
    227	unsigned int		ref_clk_khz;
    228
    229	struct generic_pm_domain pd;
    230	struct device		*dev;
    231	struct device		*attached_cpu_dev;
    232	struct mutex		lock;
    233	void __iomem		*base;
    234	struct corner		*corner;
    235	struct regulator	*vdd_apc;
    236	struct clk		*cpu_clk;
    237	struct regmap		*tcsr;
    238	bool			loop_disabled;
    239	u32			gcnt;
    240	unsigned long		flags;
    241
    242	struct fuse_corner	*fuse_corners;
    243	struct corner		*corners;
    244
    245	const struct cpr_desc *desc;
    246	const struct acc_desc *acc_desc;
    247	const struct cpr_fuse *cpr_fuses;
    248
    249	struct dentry *debugfs;
    250};
    251
    252static bool cpr_is_allowed(struct cpr_drv *drv)
    253{
    254	return !drv->loop_disabled;
    255}
    256
    257static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
    258{
    259	writel_relaxed(value, drv->base + offset);
    260}
    261
    262static u32 cpr_read(struct cpr_drv *drv, u32 offset)
    263{
    264	return readl_relaxed(drv->base + offset);
    265}
    266
    267static void
    268cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
    269{
    270	u32 val;
    271
    272	val = readl_relaxed(drv->base + offset);
    273	val &= ~mask;
    274	val |= value & mask;
    275	writel_relaxed(val, drv->base + offset);
    276}
    277
    278static void cpr_irq_clr(struct cpr_drv *drv)
    279{
    280	cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
    281}
    282
    283static void cpr_irq_clr_nack(struct cpr_drv *drv)
    284{
    285	cpr_irq_clr(drv);
    286	cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
    287}
    288
    289static void cpr_irq_clr_ack(struct cpr_drv *drv)
    290{
    291	cpr_irq_clr(drv);
    292	cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
    293}
    294
    295static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
    296{
    297	cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
    298}
    299
    300static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
    301{
    302	cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
    303}
    304
    305static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
    306{
    307	u32 val, mask;
    308	const struct cpr_desc *desc = drv->desc;
    309
    310	/* Program Consecutive Up & Down */
    311	val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
    312	val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
    313	mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
    314	cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
    315	cpr_masked_write(drv, REG_RBCPR_CTL,
    316			 RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
    317			 RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
    318			 corner->save_ctl);
    319	cpr_irq_set(drv, corner->save_irq);
    320
    321	if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
    322		val = RBCPR_CTL_LOOP_EN;
    323	else
    324		val = 0;
    325	cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
    326}
    327
    328static void cpr_ctl_disable(struct cpr_drv *drv)
    329{
    330	cpr_irq_set(drv, 0);
    331	cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
    332		       RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
    333	cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
    334			 RBIF_TIMER_ADJ_CONS_UP_MASK |
    335			 RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
    336	cpr_irq_clr(drv);
    337	cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
    338	cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
    339	cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
    340}
    341
    342static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
    343{
    344	u32 reg_val;
    345
    346	reg_val = cpr_read(drv, REG_RBCPR_CTL);
    347	return reg_val & RBCPR_CTL_LOOP_EN;
    348}
    349
    350static bool cpr_ctl_is_busy(struct cpr_drv *drv)
    351{
    352	u32 reg_val;
    353
    354	reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
    355	return reg_val & RBCPR_RESULT0_BUSY_MASK;
    356}
    357
    358static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
    359{
    360	corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
    361	corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
    362}
    363
    364static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
    365{
    366	u32 gcnt, ctl, irq, ro_sel, step_quot;
    367	struct fuse_corner *fuse = corner->fuse_corner;
    368	const struct cpr_desc *desc = drv->desc;
    369	int i;
    370
    371	ro_sel = fuse->ring_osc_idx;
    372	gcnt = drv->gcnt;
    373	gcnt |= fuse->quot - corner->quot_adjust;
    374
    375	/* Program the step quotient and idle clocks */
    376	step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
    377	step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
    378	cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
    379
    380	/* Clear the target quotient value and gate count of all ROs */
    381	for (i = 0; i < CPR_NUM_RING_OSC; i++)
    382		cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
    383
    384	cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
    385	ctl = corner->save_ctl;
    386	cpr_write(drv, REG_RBCPR_CTL, ctl);
    387	irq = corner->save_irq;
    388	cpr_irq_set(drv, irq);
    389	dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
    390		ctl, irq);
    391}
    392
    393static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
    394			struct fuse_corner *end)
    395{
    396	if (f == end)
    397		return;
    398
    399	if (f < end) {
    400		for (f += 1; f <= end; f++)
    401			regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
    402	} else {
    403		for (f -= 1; f >= end; f--)
    404			regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
    405	}
    406}
    407
    408static int cpr_pre_voltage(struct cpr_drv *drv,
    409			   struct fuse_corner *fuse_corner,
    410			   enum voltage_change_dir dir)
    411{
    412	struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
    413
    414	if (drv->tcsr && dir == DOWN)
    415		cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
    416
    417	return 0;
    418}
    419
    420static int cpr_post_voltage(struct cpr_drv *drv,
    421			    struct fuse_corner *fuse_corner,
    422			    enum voltage_change_dir dir)
    423{
    424	struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
    425
    426	if (drv->tcsr && dir == UP)
    427		cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
    428
    429	return 0;
    430}
    431
    432static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
    433			     int new_uV, enum voltage_change_dir dir)
    434{
    435	int ret;
    436	struct fuse_corner *fuse_corner = corner->fuse_corner;
    437
    438	ret = cpr_pre_voltage(drv, fuse_corner, dir);
    439	if (ret)
    440		return ret;
    441
    442	ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
    443	if (ret) {
    444		dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
    445				    new_uV);
    446		return ret;
    447	}
    448
    449	ret = cpr_post_voltage(drv, fuse_corner, dir);
    450	if (ret)
    451		return ret;
    452
    453	return 0;
    454}
    455
    456static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
    457{
    458	return drv->corner ? drv->corner - drv->corners + 1 : 0;
    459}
    460
    461static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
    462{
    463	u32 val, error_steps, reg_mask;
    464	int last_uV, new_uV, step_uV, ret;
    465	struct corner *corner;
    466	const struct cpr_desc *desc = drv->desc;
    467
    468	if (dir != UP && dir != DOWN)
    469		return 0;
    470
    471	step_uV = regulator_get_linear_step(drv->vdd_apc);
    472	if (!step_uV)
    473		return -EINVAL;
    474
    475	corner = drv->corner;
    476
    477	val = cpr_read(drv, REG_RBCPR_RESULT_0);
    478
    479	error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
    480	error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
    481	last_uV = corner->last_uV;
    482
    483	if (dir == UP) {
    484		if (desc->clamp_timer_interval &&
    485		    error_steps < desc->up_threshold) {
    486			/*
    487			 * Handle the case where another measurement started
    488			 * after the interrupt was triggered due to a core
    489			 * exiting from power collapse.
    490			 */
    491			error_steps = max(desc->up_threshold,
    492					  desc->vdd_apc_step_up_limit);
    493		}
    494
    495		if (last_uV >= corner->max_uV) {
    496			cpr_irq_clr_nack(drv);
    497
    498			/* Maximize the UP threshold */
    499			reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
    500			reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
    501			val = reg_mask;
    502			cpr_ctl_modify(drv, reg_mask, val);
    503
    504			/* Disable UP interrupt */
    505			cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
    506
    507			return 0;
    508		}
    509
    510		if (error_steps > desc->vdd_apc_step_up_limit)
    511			error_steps = desc->vdd_apc_step_up_limit;
    512
    513		/* Calculate new voltage */
    514		new_uV = last_uV + error_steps * step_uV;
    515		new_uV = min(new_uV, corner->max_uV);
    516
    517		dev_dbg(drv->dev,
    518			"UP: -> new_uV: %d last_uV: %d perf state: %u\n",
    519			new_uV, last_uV, cpr_get_cur_perf_state(drv));
    520	} else {
    521		if (desc->clamp_timer_interval &&
    522		    error_steps < desc->down_threshold) {
    523			/*
    524			 * Handle the case where another measurement started
    525			 * after the interrupt was triggered due to a core
    526			 * exiting from power collapse.
    527			 */
    528			error_steps = max(desc->down_threshold,
    529					  desc->vdd_apc_step_down_limit);
    530		}
    531
    532		if (last_uV <= corner->min_uV) {
    533			cpr_irq_clr_nack(drv);
    534
    535			/* Enable auto nack down */
    536			reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
    537			val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
    538
    539			cpr_ctl_modify(drv, reg_mask, val);
    540
    541			/* Disable DOWN interrupt */
    542			cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
    543
    544			return 0;
    545		}
    546
    547		if (error_steps > desc->vdd_apc_step_down_limit)
    548			error_steps = desc->vdd_apc_step_down_limit;
    549
    550		/* Calculate new voltage */
    551		new_uV = last_uV - error_steps * step_uV;
    552		new_uV = max(new_uV, corner->min_uV);
    553
    554		dev_dbg(drv->dev,
    555			"DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
    556			new_uV, last_uV, cpr_get_cur_perf_state(drv));
    557	}
    558
    559	ret = cpr_scale_voltage(drv, corner, new_uV, dir);
    560	if (ret) {
    561		cpr_irq_clr_nack(drv);
    562		return ret;
    563	}
    564	drv->corner->last_uV = new_uV;
    565
    566	if (dir == UP) {
    567		/* Disable auto nack down */
    568		reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
    569		val = 0;
    570	} else {
    571		/* Restore default threshold for UP */
    572		reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
    573		reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
    574		val = desc->up_threshold;
    575		val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
    576	}
    577
    578	cpr_ctl_modify(drv, reg_mask, val);
    579
    580	/* Re-enable default interrupts */
    581	cpr_irq_set(drv, CPR_INT_DEFAULT);
    582
    583	/* Ack */
    584	cpr_irq_clr_ack(drv);
    585
    586	return 0;
    587}
    588
    589static irqreturn_t cpr_irq_handler(int irq, void *dev)
    590{
    591	struct cpr_drv *drv = dev;
    592	const struct cpr_desc *desc = drv->desc;
    593	irqreturn_t ret = IRQ_HANDLED;
    594	u32 val;
    595
    596	mutex_lock(&drv->lock);
    597
    598	val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
    599	if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
    600		val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
    601
    602	dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
    603
    604	if (!cpr_ctl_is_enabled(drv)) {
    605		dev_dbg(drv->dev, "CPR is disabled\n");
    606		ret = IRQ_NONE;
    607	} else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
    608		dev_dbg(drv->dev, "CPR measurement is not ready\n");
    609	} else if (!cpr_is_allowed(drv)) {
    610		val = cpr_read(drv, REG_RBCPR_CTL);
    611		dev_err_ratelimited(drv->dev,
    612				    "Interrupt broken? RBCPR_CTL = %#02x\n",
    613				    val);
    614		ret = IRQ_NONE;
    615	} else {
    616		/*
    617		 * Following sequence of handling is as per each IRQ's
    618		 * priority
    619		 */
    620		if (val & CPR_INT_UP) {
    621			cpr_scale(drv, UP);
    622		} else if (val & CPR_INT_DOWN) {
    623			cpr_scale(drv, DOWN);
    624		} else if (val & CPR_INT_MIN) {
    625			cpr_irq_clr_nack(drv);
    626		} else if (val & CPR_INT_MAX) {
    627			cpr_irq_clr_nack(drv);
    628		} else if (val & CPR_INT_MID) {
    629			/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
    630			dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
    631		} else {
    632			dev_dbg(drv->dev,
    633				"IRQ occurred for unknown flag (%#08x)\n", val);
    634		}
    635
    636		/* Save register values for the corner */
    637		cpr_corner_save(drv, drv->corner);
    638	}
    639
    640	mutex_unlock(&drv->lock);
    641
    642	return ret;
    643}
    644
    645static int cpr_enable(struct cpr_drv *drv)
    646{
    647	int ret;
    648
    649	ret = regulator_enable(drv->vdd_apc);
    650	if (ret)
    651		return ret;
    652
    653	mutex_lock(&drv->lock);
    654
    655	if (cpr_is_allowed(drv) && drv->corner) {
    656		cpr_irq_clr(drv);
    657		cpr_corner_restore(drv, drv->corner);
    658		cpr_ctl_enable(drv, drv->corner);
    659	}
    660
    661	mutex_unlock(&drv->lock);
    662
    663	return 0;
    664}
    665
    666static int cpr_disable(struct cpr_drv *drv)
    667{
    668	mutex_lock(&drv->lock);
    669
    670	if (cpr_is_allowed(drv)) {
    671		cpr_ctl_disable(drv);
    672		cpr_irq_clr(drv);
    673	}
    674
    675	mutex_unlock(&drv->lock);
    676
    677	return regulator_disable(drv->vdd_apc);
    678}
    679
    680static int cpr_config(struct cpr_drv *drv)
    681{
    682	int i;
    683	u32 val, gcnt;
    684	struct corner *corner;
    685	const struct cpr_desc *desc = drv->desc;
    686
    687	/* Disable interrupt and CPR */
    688	cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
    689	cpr_write(drv, REG_RBCPR_CTL, 0);
    690
    691	/* Program the default HW ceiling, floor and vlevel */
    692	val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
    693		<< RBIF_LIMIT_CEILING_SHIFT;
    694	val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
    695	cpr_write(drv, REG_RBIF_LIMIT, val);
    696	cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
    697
    698	/*
    699	 * Clear the target quotient value and gate count of all
    700	 * ring oscillators
    701	 */
    702	for (i = 0; i < CPR_NUM_RING_OSC; i++)
    703		cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
    704
    705	/* Init and save gcnt */
    706	gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
    707	gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
    708	gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
    709	drv->gcnt = gcnt;
    710
    711	/* Program the delay count for the timer */
    712	val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
    713	cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
    714	dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
    715		desc->timer_delay_us);
    716
    717	/* Program Consecutive Up & Down */
    718	val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
    719	val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
    720	val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
    721	cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
    722
    723	/* Program the control register */
    724	val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
    725	val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
    726	val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
    727	val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
    728	cpr_write(drv, REG_RBCPR_CTL, val);
    729
    730	for (i = 0; i < drv->num_corners; i++) {
    731		corner = &drv->corners[i];
    732		corner->save_ctl = val;
    733		corner->save_irq = CPR_INT_DEFAULT;
    734	}
    735
    736	cpr_irq_set(drv, CPR_INT_DEFAULT);
    737
    738	val = cpr_read(drv, REG_RBCPR_VERSION);
    739	if (val <= RBCPR_VER_2)
    740		drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
    741
    742	return 0;
    743}
    744
    745static int cpr_set_performance_state(struct generic_pm_domain *domain,
    746				     unsigned int state)
    747{
    748	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
    749	struct corner *corner, *end;
    750	enum voltage_change_dir dir;
    751	int ret = 0, new_uV;
    752
    753	mutex_lock(&drv->lock);
    754
    755	dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
    756		__func__, state, cpr_get_cur_perf_state(drv));
    757
    758	/*
    759	 * Determine new corner we're going to.
    760	 * Remove one since lowest performance state is 1.
    761	 */
    762	corner = drv->corners + state - 1;
    763	end = &drv->corners[drv->num_corners - 1];
    764	if (corner > end || corner < drv->corners) {
    765		ret = -EINVAL;
    766		goto unlock;
    767	}
    768
    769	/* Determine direction */
    770	if (drv->corner > corner)
    771		dir = DOWN;
    772	else if (drv->corner < corner)
    773		dir = UP;
    774	else
    775		dir = NO_CHANGE;
    776
    777	if (cpr_is_allowed(drv))
    778		new_uV = corner->last_uV;
    779	else
    780		new_uV = corner->uV;
    781
    782	if (cpr_is_allowed(drv))
    783		cpr_ctl_disable(drv);
    784
    785	ret = cpr_scale_voltage(drv, corner, new_uV, dir);
    786	if (ret)
    787		goto unlock;
    788
    789	if (cpr_is_allowed(drv)) {
    790		cpr_irq_clr(drv);
    791		if (drv->corner != corner)
    792			cpr_corner_restore(drv, corner);
    793		cpr_ctl_enable(drv, corner);
    794	}
    795
    796	drv->corner = corner;
    797
    798unlock:
    799	mutex_unlock(&drv->lock);
    800
    801	return ret;
    802}
    803
    804static int
    805cpr_populate_ring_osc_idx(struct cpr_drv *drv)
    806{
    807	struct fuse_corner *fuse = drv->fuse_corners;
    808	struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
    809	const struct cpr_fuse *fuses = drv->cpr_fuses;
    810	u32 data;
    811	int ret;
    812
    813	for (; fuse < end; fuse++, fuses++) {
    814		ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data);
    815		if (ret)
    816			return ret;
    817		fuse->ring_osc_idx = data;
    818	}
    819
    820	return 0;
    821}
    822
    823static int cpr_read_fuse_uV(const struct cpr_desc *desc,
    824			    const struct fuse_corner_data *fdata,
    825			    const char *init_v_efuse,
    826			    int step_volt,
    827			    struct cpr_drv *drv)
    828{
    829	int step_size_uV, steps, uV;
    830	u32 bits = 0;
    831	int ret;
    832
    833	ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits);
    834	if (ret)
    835		return ret;
    836
    837	steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
    838	/* Not two's complement.. instead highest bit is sign bit */
    839	if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
    840		steps = -steps;
    841
    842	step_size_uV = desc->cpr_fuses.init_voltage_step;
    843
    844	uV = fdata->ref_uV + steps * step_size_uV;
    845	return DIV_ROUND_UP(uV, step_volt) * step_volt;
    846}
    847
    848static int cpr_fuse_corner_init(struct cpr_drv *drv)
    849{
    850	const struct cpr_desc *desc = drv->desc;
    851	const struct cpr_fuse *fuses = drv->cpr_fuses;
    852	const struct acc_desc *acc_desc = drv->acc_desc;
    853	int i;
    854	unsigned int step_volt;
    855	struct fuse_corner_data *fdata;
    856	struct fuse_corner *fuse, *end;
    857	int uV;
    858	const struct reg_sequence *accs;
    859	int ret;
    860
    861	accs = acc_desc->settings;
    862
    863	step_volt = regulator_get_linear_step(drv->vdd_apc);
    864	if (!step_volt)
    865		return -EINVAL;
    866
    867	/* Populate fuse_corner members */
    868	fuse = drv->fuse_corners;
    869	end = &fuse[desc->num_fuse_corners - 1];
    870	fdata = desc->cpr_fuses.fuse_corner_data;
    871
    872	for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
    873		/*
    874		 * Update SoC voltages: platforms might choose a different
    875		 * regulators than the one used to characterize the algorithms
    876		 * (ie, init_voltage_step).
    877		 */
    878		fdata->min_uV = roundup(fdata->min_uV, step_volt);
    879		fdata->max_uV = roundup(fdata->max_uV, step_volt);
    880
    881		/* Populate uV */
    882		uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
    883				      step_volt, drv);
    884		if (uV < 0)
    885			return uV;
    886
    887		fuse->min_uV = fdata->min_uV;
    888		fuse->max_uV = fdata->max_uV;
    889		fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
    890
    891		if (fuse == end) {
    892			/*
    893			 * Allow the highest fuse corner's PVS voltage to
    894			 * define the ceiling voltage for that corner in order
    895			 * to support SoC's in which variable ceiling values
    896			 * are required.
    897			 */
    898			end->max_uV = max(end->max_uV, end->uV);
    899		}
    900
    901		/* Populate target quotient by scaling */
    902		ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot);
    903		if (ret)
    904			return ret;
    905
    906		fuse->quot *= fdata->quot_scale;
    907		fuse->quot += fdata->quot_offset;
    908		fuse->quot += fdata->quot_adjust;
    909		fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
    910
    911		/* Populate acc settings */
    912		fuse->accs = accs;
    913		fuse->num_accs = acc_desc->num_regs_per_fuse;
    914		accs += acc_desc->num_regs_per_fuse;
    915	}
    916
    917	/*
    918	 * Restrict all fuse corner PVS voltages based upon per corner
    919	 * ceiling and floor voltages.
    920	 */
    921	for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
    922		if (fuse->uV > fuse->max_uV)
    923			fuse->uV = fuse->max_uV;
    924		else if (fuse->uV < fuse->min_uV)
    925			fuse->uV = fuse->min_uV;
    926
    927		ret = regulator_is_supported_voltage(drv->vdd_apc,
    928						     fuse->min_uV,
    929						     fuse->min_uV);
    930		if (!ret) {
    931			dev_err(drv->dev,
    932				"min uV: %d (fuse corner: %d) not supported by regulator\n",
    933				fuse->min_uV, i);
    934			return -EINVAL;
    935		}
    936
    937		ret = regulator_is_supported_voltage(drv->vdd_apc,
    938						     fuse->max_uV,
    939						     fuse->max_uV);
    940		if (!ret) {
    941			dev_err(drv->dev,
    942				"max uV: %d (fuse corner: %d) not supported by regulator\n",
    943				fuse->max_uV, i);
    944			return -EINVAL;
    945		}
    946
    947		dev_dbg(drv->dev,
    948			"fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
    949			i, fuse->min_uV, fuse->uV, fuse->max_uV,
    950			fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
    951	}
    952
    953	return 0;
    954}
    955
    956static int cpr_calculate_scaling(const char *quot_offset,
    957				 struct cpr_drv *drv,
    958				 const struct fuse_corner_data *fdata,
    959				 const struct corner *corner)
    960{
    961	u32 quot_diff = 0;
    962	unsigned long freq_diff;
    963	int scaling;
    964	const struct fuse_corner *fuse, *prev_fuse;
    965	int ret;
    966
    967	fuse = corner->fuse_corner;
    968	prev_fuse = fuse - 1;
    969
    970	if (quot_offset) {
    971		ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, &quot_diff);
    972		if (ret)
    973			return ret;
    974
    975		quot_diff *= fdata->quot_offset_scale;
    976		quot_diff += fdata->quot_offset_adjust;
    977	} else {
    978		quot_diff = fuse->quot - prev_fuse->quot;
    979	}
    980
    981	freq_diff = fuse->max_freq - prev_fuse->max_freq;
    982	freq_diff /= 1000000; /* Convert to MHz */
    983	scaling = 1000 * quot_diff / freq_diff;
    984	return min(scaling, fdata->max_quot_scale);
    985}
    986
    987static int cpr_interpolate(const struct corner *corner, int step_volt,
    988			   const struct fuse_corner_data *fdata)
    989{
    990	unsigned long f_high, f_low, f_diff;
    991	int uV_high, uV_low, uV;
    992	u64 temp, temp_limit;
    993	const struct fuse_corner *fuse, *prev_fuse;
    994
    995	fuse = corner->fuse_corner;
    996	prev_fuse = fuse - 1;
    997
    998	f_high = fuse->max_freq;
    999	f_low = prev_fuse->max_freq;
   1000	uV_high = fuse->uV;
   1001	uV_low = prev_fuse->uV;
   1002	f_diff = fuse->max_freq - corner->freq;
   1003
   1004	/*
   1005	 * Don't interpolate in the wrong direction. This could happen
   1006	 * if the adjusted fuse voltage overlaps with the previous fuse's
   1007	 * adjusted voltage.
   1008	 */
   1009	if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
   1010		return corner->uV;
   1011
   1012	temp = f_diff * (uV_high - uV_low);
   1013	temp = div64_ul(temp, f_high - f_low);
   1014
   1015	/*
   1016	 * max_volt_scale has units of uV/MHz while freq values
   1017	 * have units of Hz.  Divide by 1000000 to convert to.
   1018	 */
   1019	temp_limit = f_diff * fdata->max_volt_scale;
   1020	do_div(temp_limit, 1000000);
   1021
   1022	uV = uV_high - min(temp, temp_limit);
   1023	return roundup(uV, step_volt);
   1024}
   1025
   1026static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
   1027{
   1028	struct device_node *np;
   1029	unsigned int fuse_corner = 0;
   1030
   1031	np = dev_pm_opp_get_of_node(opp);
   1032	if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
   1033		pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
   1034		       __func__);
   1035
   1036	of_node_put(np);
   1037
   1038	return fuse_corner;
   1039}
   1040
   1041static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
   1042					    struct device *cpu_dev)
   1043{
   1044	u64 rate = 0;
   1045	struct device_node *ref_np;
   1046	struct device_node *desc_np;
   1047	struct device_node *child_np = NULL;
   1048	struct device_node *child_req_np = NULL;
   1049
   1050	desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
   1051	if (!desc_np)
   1052		return 0;
   1053
   1054	ref_np = dev_pm_opp_get_of_node(ref);
   1055	if (!ref_np)
   1056		goto out_ref;
   1057
   1058	do {
   1059		of_node_put(child_req_np);
   1060		child_np = of_get_next_available_child(desc_np, child_np);
   1061		child_req_np = of_parse_phandle(child_np, "required-opps", 0);
   1062	} while (child_np && child_req_np != ref_np);
   1063
   1064	if (child_np && child_req_np == ref_np)
   1065		of_property_read_u64(child_np, "opp-hz", &rate);
   1066
   1067	of_node_put(child_req_np);
   1068	of_node_put(child_np);
   1069	of_node_put(ref_np);
   1070out_ref:
   1071	of_node_put(desc_np);
   1072
   1073	return (unsigned long) rate;
   1074}
   1075
   1076static int cpr_corner_init(struct cpr_drv *drv)
   1077{
   1078	const struct cpr_desc *desc = drv->desc;
   1079	const struct cpr_fuse *fuses = drv->cpr_fuses;
   1080	int i, level, scaling = 0;
   1081	unsigned int fnum, fc;
   1082	const char *quot_offset;
   1083	struct fuse_corner *fuse, *prev_fuse;
   1084	struct corner *corner, *end;
   1085	struct corner_data *cdata;
   1086	const struct fuse_corner_data *fdata;
   1087	bool apply_scaling;
   1088	unsigned long freq_diff, freq_diff_mhz;
   1089	unsigned long freq;
   1090	int step_volt = regulator_get_linear_step(drv->vdd_apc);
   1091	struct dev_pm_opp *opp;
   1092
   1093	if (!step_volt)
   1094		return -EINVAL;
   1095
   1096	corner = drv->corners;
   1097	end = &corner[drv->num_corners - 1];
   1098
   1099	cdata = devm_kcalloc(drv->dev, drv->num_corners,
   1100			     sizeof(struct corner_data),
   1101			     GFP_KERNEL);
   1102	if (!cdata)
   1103		return -ENOMEM;
   1104
   1105	/*
   1106	 * Store maximum frequency for each fuse corner based on the frequency
   1107	 * plan
   1108	 */
   1109	for (level = 1; level <= drv->num_corners; level++) {
   1110		opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
   1111		if (IS_ERR(opp))
   1112			return -EINVAL;
   1113		fc = cpr_get_fuse_corner(opp);
   1114		if (!fc) {
   1115			dev_pm_opp_put(opp);
   1116			return -EINVAL;
   1117		}
   1118		fnum = fc - 1;
   1119		freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
   1120		if (!freq) {
   1121			dev_pm_opp_put(opp);
   1122			return -EINVAL;
   1123		}
   1124		cdata[level - 1].fuse_corner = fnum;
   1125		cdata[level - 1].freq = freq;
   1126
   1127		fuse = &drv->fuse_corners[fnum];
   1128		dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
   1129			freq, dev_pm_opp_get_level(opp) - 1, fnum);
   1130		if (freq > fuse->max_freq)
   1131			fuse->max_freq = freq;
   1132		dev_pm_opp_put(opp);
   1133	}
   1134
   1135	/*
   1136	 * Get the quotient adjustment scaling factor, according to:
   1137	 *
   1138	 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
   1139	 *		/ (freq(corner_N) - freq(corner_N-1)), max_factor)
   1140	 *
   1141	 * QUOT(corner_N):	quotient read from fuse for fuse corner N
   1142	 * QUOT(corner_N-1):	quotient read from fuse for fuse corner (N - 1)
   1143	 * freq(corner_N):	max frequency in MHz supported by fuse corner N
   1144	 * freq(corner_N-1):	max frequency in MHz supported by fuse corner
   1145	 *			 (N - 1)
   1146	 *
   1147	 * Then walk through the corners mapped to each fuse corner
   1148	 * and calculate the quotient adjustment for each one using the
   1149	 * following formula:
   1150	 *
   1151	 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
   1152	 *
   1153	 * freq_max: max frequency in MHz supported by the fuse corner
   1154	 * freq_corner: frequency in MHz corresponding to the corner
   1155	 * scaling: calculated from above equation
   1156	 *
   1157	 *
   1158	 *     +                           +
   1159	 *     |                         v |
   1160	 *   q |           f c           o |           f c
   1161	 *   u |         c               l |         c
   1162	 *   o |       f                 t |       f
   1163	 *   t |     c                   a |     c
   1164	 *     | c f                     g | c f
   1165	 *     |                         e |
   1166	 *     +---------------            +----------------
   1167	 *       0 1 2 3 4 5 6               0 1 2 3 4 5 6
   1168	 *          corner                      corner
   1169	 *
   1170	 *    c = corner
   1171	 *    f = fuse corner
   1172	 *
   1173	 */
   1174	for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
   1175		fnum = cdata[i].fuse_corner;
   1176		fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
   1177		quot_offset = fuses[fnum].quotient_offset;
   1178		fuse = &drv->fuse_corners[fnum];
   1179		if (fnum)
   1180			prev_fuse = &drv->fuse_corners[fnum - 1];
   1181		else
   1182			prev_fuse = NULL;
   1183
   1184		corner->fuse_corner = fuse;
   1185		corner->freq = cdata[i].freq;
   1186		corner->uV = fuse->uV;
   1187
   1188		if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
   1189			scaling = cpr_calculate_scaling(quot_offset, drv,
   1190							fdata, corner);
   1191			if (scaling < 0)
   1192				return scaling;
   1193
   1194			apply_scaling = true;
   1195		} else if (corner->freq == fuse->max_freq) {
   1196			/* This is a fuse corner; don't scale anything */
   1197			apply_scaling = false;
   1198		}
   1199
   1200		if (apply_scaling) {
   1201			freq_diff = fuse->max_freq - corner->freq;
   1202			freq_diff_mhz = freq_diff / 1000000;
   1203			corner->quot_adjust = scaling * freq_diff_mhz / 1000;
   1204
   1205			corner->uV = cpr_interpolate(corner, step_volt, fdata);
   1206		}
   1207
   1208		corner->max_uV = fuse->max_uV;
   1209		corner->min_uV = fuse->min_uV;
   1210		corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
   1211		corner->last_uV = corner->uV;
   1212
   1213		/* Reduce the ceiling voltage if needed */
   1214		if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
   1215			corner->max_uV = corner->uV;
   1216		else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
   1217			corner->max_uV = max(corner->min_uV, fuse->uV);
   1218
   1219		dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
   1220			corner->min_uV, corner->uV, corner->max_uV,
   1221			fuse->quot - corner->quot_adjust);
   1222	}
   1223
   1224	return 0;
   1225}
   1226
   1227static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
   1228{
   1229	const struct cpr_desc *desc = drv->desc;
   1230	struct cpr_fuse *fuses;
   1231	int i;
   1232
   1233	fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
   1234			     sizeof(struct cpr_fuse),
   1235			     GFP_KERNEL);
   1236	if (!fuses)
   1237		return ERR_PTR(-ENOMEM);
   1238
   1239	for (i = 0; i < desc->num_fuse_corners; i++) {
   1240		char tbuf[32];
   1241
   1242		snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
   1243		fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
   1244		if (!fuses[i].ring_osc)
   1245			return ERR_PTR(-ENOMEM);
   1246
   1247		snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
   1248		fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
   1249						     GFP_KERNEL);
   1250		if (!fuses[i].init_voltage)
   1251			return ERR_PTR(-ENOMEM);
   1252
   1253		snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
   1254		fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
   1255		if (!fuses[i].quotient)
   1256			return ERR_PTR(-ENOMEM);
   1257
   1258		snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
   1259		fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
   1260							GFP_KERNEL);
   1261		if (!fuses[i].quotient_offset)
   1262			return ERR_PTR(-ENOMEM);
   1263	}
   1264
   1265	return fuses;
   1266}
   1267
   1268static void cpr_set_loop_allowed(struct cpr_drv *drv)
   1269{
   1270	drv->loop_disabled = false;
   1271}
   1272
   1273static int cpr_init_parameters(struct cpr_drv *drv)
   1274{
   1275	const struct cpr_desc *desc = drv->desc;
   1276	struct clk *clk;
   1277
   1278	clk = clk_get(drv->dev, "ref");
   1279	if (IS_ERR(clk))
   1280		return PTR_ERR(clk);
   1281
   1282	drv->ref_clk_khz = clk_get_rate(clk) / 1000;
   1283	clk_put(clk);
   1284
   1285	if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
   1286	    desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
   1287	    desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
   1288	    desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
   1289	    desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
   1290	    desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
   1291		return -EINVAL;
   1292
   1293	dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
   1294		desc->up_threshold, desc->down_threshold);
   1295
   1296	return 0;
   1297}
   1298
   1299static int cpr_find_initial_corner(struct cpr_drv *drv)
   1300{
   1301	unsigned long rate;
   1302	const struct corner *end;
   1303	struct corner *iter;
   1304	unsigned int i = 0;
   1305
   1306	if (!drv->cpu_clk) {
   1307		dev_err(drv->dev, "cannot get rate from NULL clk\n");
   1308		return -EINVAL;
   1309	}
   1310
   1311	end = &drv->corners[drv->num_corners - 1];
   1312	rate = clk_get_rate(drv->cpu_clk);
   1313
   1314	/*
   1315	 * Some bootloaders set a CPU clock frequency that is not defined
   1316	 * in the OPP table. When running at an unlisted frequency,
   1317	 * cpufreq_online() will change to the OPP which has the lowest
   1318	 * frequency, at or above the unlisted frequency.
   1319	 * Since cpufreq_online() always "rounds up" in the case of an
   1320	 * unlisted frequency, this function always "rounds down" in case
   1321	 * of an unlisted frequency. That way, when cpufreq_online()
   1322	 * triggers the first ever call to cpr_set_performance_state(),
   1323	 * it will correctly determine the direction as UP.
   1324	 */
   1325	for (iter = drv->corners; iter <= end; iter++) {
   1326		if (iter->freq > rate)
   1327			break;
   1328		i++;
   1329		if (iter->freq == rate) {
   1330			drv->corner = iter;
   1331			break;
   1332		}
   1333		if (iter->freq < rate)
   1334			drv->corner = iter;
   1335	}
   1336
   1337	if (!drv->corner) {
   1338		dev_err(drv->dev, "boot up corner not found\n");
   1339		return -EINVAL;
   1340	}
   1341
   1342	dev_dbg(drv->dev, "boot up perf state: %u\n", i);
   1343
   1344	return 0;
   1345}
   1346
   1347static const struct cpr_desc qcs404_cpr_desc = {
   1348	.num_fuse_corners = 3,
   1349	.min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
   1350	.step_quot = (int []){ 25, 25, 25, },
   1351	.timer_delay_us = 5000,
   1352	.timer_cons_up = 0,
   1353	.timer_cons_down = 2,
   1354	.up_threshold = 1,
   1355	.down_threshold = 3,
   1356	.idle_clocks = 15,
   1357	.gcnt_us = 1,
   1358	.vdd_apc_step_up_limit = 1,
   1359	.vdd_apc_step_down_limit = 1,
   1360	.cpr_fuses = {
   1361		.init_voltage_step = 8000,
   1362		.init_voltage_width = 6,
   1363		.fuse_corner_data = (struct fuse_corner_data[]){
   1364			/* fuse corner 0 */
   1365			{
   1366				.ref_uV = 1224000,
   1367				.max_uV = 1224000,
   1368				.min_uV = 1048000,
   1369				.max_volt_scale = 0,
   1370				.max_quot_scale = 0,
   1371				.quot_offset = 0,
   1372				.quot_scale = 1,
   1373				.quot_adjust = 0,
   1374				.quot_offset_scale = 5,
   1375				.quot_offset_adjust = 0,
   1376			},
   1377			/* fuse corner 1 */
   1378			{
   1379				.ref_uV = 1288000,
   1380				.max_uV = 1288000,
   1381				.min_uV = 1048000,
   1382				.max_volt_scale = 2000,
   1383				.max_quot_scale = 1400,
   1384				.quot_offset = 0,
   1385				.quot_scale = 1,
   1386				.quot_adjust = -20,
   1387				.quot_offset_scale = 5,
   1388				.quot_offset_adjust = 0,
   1389			},
   1390			/* fuse corner 2 */
   1391			{
   1392				.ref_uV = 1352000,
   1393				.max_uV = 1384000,
   1394				.min_uV = 1088000,
   1395				.max_volt_scale = 2000,
   1396				.max_quot_scale = 1400,
   1397				.quot_offset = 0,
   1398				.quot_scale = 1,
   1399				.quot_adjust = 0,
   1400				.quot_offset_scale = 5,
   1401				.quot_offset_adjust = 0,
   1402			},
   1403		},
   1404	},
   1405};
   1406
   1407static const struct acc_desc qcs404_acc_desc = {
   1408	.settings = (struct reg_sequence[]){
   1409		{ 0xb120, 0x1041040 },
   1410		{ 0xb124, 0x41 },
   1411		{ 0xb120, 0x0 },
   1412		{ 0xb124, 0x0 },
   1413		{ 0xb120, 0x0 },
   1414		{ 0xb124, 0x0 },
   1415	},
   1416	.config = (struct reg_sequence[]){
   1417		{ 0xb138, 0xff },
   1418		{ 0xb130, 0x5555 },
   1419	},
   1420	.num_regs_per_fuse = 2,
   1421};
   1422
   1423static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
   1424	.cpr_desc = &qcs404_cpr_desc,
   1425	.acc_desc = &qcs404_acc_desc,
   1426};
   1427
   1428static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
   1429					      struct dev_pm_opp *opp)
   1430{
   1431	return dev_pm_opp_get_level(opp);
   1432}
   1433
   1434static int cpr_power_off(struct generic_pm_domain *domain)
   1435{
   1436	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
   1437
   1438	return cpr_disable(drv);
   1439}
   1440
   1441static int cpr_power_on(struct generic_pm_domain *domain)
   1442{
   1443	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
   1444
   1445	return cpr_enable(drv);
   1446}
   1447
   1448static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
   1449			     struct device *dev)
   1450{
   1451	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
   1452	const struct acc_desc *acc_desc = drv->acc_desc;
   1453	int ret = 0;
   1454
   1455	mutex_lock(&drv->lock);
   1456
   1457	dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
   1458
   1459	/*
   1460	 * This driver only supports scaling voltage for a CPU cluster
   1461	 * where all CPUs in the cluster share a single regulator.
   1462	 * Therefore, save the struct device pointer only for the first
   1463	 * CPU device that gets attached. There is no need to do any
   1464	 * additional initialization when further CPUs get attached.
   1465	 */
   1466	if (drv->attached_cpu_dev)
   1467		goto unlock;
   1468
   1469	/*
   1470	 * cpr_scale_voltage() requires the direction (if we are changing
   1471	 * to a higher or lower OPP). The first time
   1472	 * cpr_set_performance_state() is called, there is no previous
   1473	 * performance state defined. Therefore, we call
   1474	 * cpr_find_initial_corner() that gets the CPU clock frequency
   1475	 * set by the bootloader, so that we can determine the direction
   1476	 * the first time cpr_set_performance_state() is called.
   1477	 */
   1478	drv->cpu_clk = devm_clk_get(dev, NULL);
   1479	if (IS_ERR(drv->cpu_clk)) {
   1480		ret = PTR_ERR(drv->cpu_clk);
   1481		if (ret != -EPROBE_DEFER)
   1482			dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
   1483		goto unlock;
   1484	}
   1485	drv->attached_cpu_dev = dev;
   1486
   1487	dev_dbg(drv->dev, "using cpu clk from: %s\n",
   1488		dev_name(drv->attached_cpu_dev));
   1489
   1490	/*
   1491	 * Everything related to (virtual) corners has to be initialized
   1492	 * here, when attaching to the power domain, since we need to know
   1493	 * the maximum frequency for each fuse corner, and this is only
   1494	 * available after the cpufreq driver has attached to us.
   1495	 * The reason for this is that we need to know the highest
   1496	 * frequency associated with each fuse corner.
   1497	 */
   1498	ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
   1499	if (ret < 0) {
   1500		dev_err(drv->dev, "could not get OPP count\n");
   1501		goto unlock;
   1502	}
   1503	drv->num_corners = ret;
   1504
   1505	if (drv->num_corners < 2) {
   1506		dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
   1507		ret = -EINVAL;
   1508		goto unlock;
   1509	}
   1510
   1511	drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
   1512				    sizeof(*drv->corners),
   1513				    GFP_KERNEL);
   1514	if (!drv->corners) {
   1515		ret = -ENOMEM;
   1516		goto unlock;
   1517	}
   1518
   1519	ret = cpr_corner_init(drv);
   1520	if (ret)
   1521		goto unlock;
   1522
   1523	cpr_set_loop_allowed(drv);
   1524
   1525	ret = cpr_init_parameters(drv);
   1526	if (ret)
   1527		goto unlock;
   1528
   1529	/* Configure CPR HW but keep it disabled */
   1530	ret = cpr_config(drv);
   1531	if (ret)
   1532		goto unlock;
   1533
   1534	ret = cpr_find_initial_corner(drv);
   1535	if (ret)
   1536		goto unlock;
   1537
   1538	if (acc_desc->config)
   1539		regmap_multi_reg_write(drv->tcsr, acc_desc->config,
   1540				       acc_desc->num_regs_per_fuse);
   1541
   1542	/* Enable ACC if required */
   1543	if (acc_desc->enable_mask)
   1544		regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
   1545				   acc_desc->enable_mask,
   1546				   acc_desc->enable_mask);
   1547
   1548	dev_info(drv->dev, "driver initialized with %u OPPs\n",
   1549		 drv->num_corners);
   1550
   1551unlock:
   1552	mutex_unlock(&drv->lock);
   1553
   1554	return ret;
   1555}
   1556
   1557static int cpr_debug_info_show(struct seq_file *s, void *unused)
   1558{
   1559	u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
   1560	u32 step_dn, step_up, error, error_lt0, busy;
   1561	struct cpr_drv *drv = s->private;
   1562	struct fuse_corner *fuse_corner;
   1563	struct corner *corner;
   1564
   1565	corner = drv->corner;
   1566	fuse_corner = corner->fuse_corner;
   1567
   1568	seq_printf(s, "corner, current_volt = %d uV\n",
   1569		       corner->last_uV);
   1570
   1571	ro_sel = fuse_corner->ring_osc_idx;
   1572	gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
   1573	seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
   1574
   1575	ctl = cpr_read(drv, REG_RBCPR_CTL);
   1576	seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
   1577
   1578	irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
   1579	seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
   1580
   1581	reg = cpr_read(drv, REG_RBCPR_RESULT_0);
   1582	seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
   1583
   1584	step_dn = reg & 0x01;
   1585	step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
   1586	seq_printf(s, "  [step_dn = %u", step_dn);
   1587
   1588	seq_printf(s, ", step_up = %u", step_up);
   1589
   1590	error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
   1591				& RBCPR_RESULT0_ERROR_STEPS_MASK;
   1592	seq_printf(s, ", error_steps = %u", error_steps);
   1593
   1594	error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
   1595	seq_printf(s, ", error = %u", error);
   1596
   1597	error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
   1598	seq_printf(s, ", error_lt_0 = %u", error_lt0);
   1599
   1600	busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
   1601	seq_printf(s, ", busy = %u]\n", busy);
   1602
   1603	return 0;
   1604}
   1605DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
   1606
   1607static void cpr_debugfs_init(struct cpr_drv *drv)
   1608{
   1609	drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
   1610
   1611	debugfs_create_file("debug_info", 0444, drv->debugfs,
   1612			    drv, &cpr_debug_info_fops);
   1613}
   1614
   1615static int cpr_probe(struct platform_device *pdev)
   1616{
   1617	struct device *dev = &pdev->dev;
   1618	struct cpr_drv *drv;
   1619	int irq, ret;
   1620	const struct cpr_acc_desc *data;
   1621	struct device_node *np;
   1622	u32 cpr_rev = FUSE_REVISION_UNKNOWN;
   1623
   1624	data = of_device_get_match_data(dev);
   1625	if (!data || !data->cpr_desc || !data->acc_desc)
   1626		return -EINVAL;
   1627
   1628	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
   1629	if (!drv)
   1630		return -ENOMEM;
   1631	drv->dev = dev;
   1632	drv->desc = data->cpr_desc;
   1633	drv->acc_desc = data->acc_desc;
   1634
   1635	drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
   1636					 sizeof(*drv->fuse_corners),
   1637					 GFP_KERNEL);
   1638	if (!drv->fuse_corners)
   1639		return -ENOMEM;
   1640
   1641	np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
   1642	if (!np)
   1643		return -ENODEV;
   1644
   1645	drv->tcsr = syscon_node_to_regmap(np);
   1646	of_node_put(np);
   1647	if (IS_ERR(drv->tcsr))
   1648		return PTR_ERR(drv->tcsr);
   1649
   1650	drv->base = devm_platform_ioremap_resource(pdev, 0);
   1651	if (IS_ERR(drv->base))
   1652		return PTR_ERR(drv->base);
   1653
   1654	irq = platform_get_irq(pdev, 0);
   1655	if (irq < 0)
   1656		return -EINVAL;
   1657
   1658	drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
   1659	if (IS_ERR(drv->vdd_apc))
   1660		return PTR_ERR(drv->vdd_apc);
   1661
   1662	/*
   1663	 * Initialize fuse corners, since it simply depends
   1664	 * on data in efuses.
   1665	 * Everything related to (virtual) corners has to be
   1666	 * initialized after attaching to the power domain,
   1667	 * since it depends on the CPU's OPP table.
   1668	 */
   1669	ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev);
   1670	if (ret)
   1671		return ret;
   1672
   1673	drv->cpr_fuses = cpr_get_fuses(drv);
   1674	if (IS_ERR(drv->cpr_fuses))
   1675		return PTR_ERR(drv->cpr_fuses);
   1676
   1677	ret = cpr_populate_ring_osc_idx(drv);
   1678	if (ret)
   1679		return ret;
   1680
   1681	ret = cpr_fuse_corner_init(drv);
   1682	if (ret)
   1683		return ret;
   1684
   1685	mutex_init(&drv->lock);
   1686
   1687	ret = devm_request_threaded_irq(dev, irq, NULL,
   1688					cpr_irq_handler,
   1689					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
   1690					"cpr", drv);
   1691	if (ret)
   1692		return ret;
   1693
   1694	drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
   1695					  GFP_KERNEL);
   1696	if (!drv->pd.name)
   1697		return -EINVAL;
   1698
   1699	drv->pd.power_off = cpr_power_off;
   1700	drv->pd.power_on = cpr_power_on;
   1701	drv->pd.set_performance_state = cpr_set_performance_state;
   1702	drv->pd.opp_to_performance_state = cpr_get_performance_state;
   1703	drv->pd.attach_dev = cpr_pd_attach_dev;
   1704
   1705	ret = pm_genpd_init(&drv->pd, NULL, true);
   1706	if (ret)
   1707		return ret;
   1708
   1709	ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
   1710	if (ret)
   1711		return ret;
   1712
   1713	platform_set_drvdata(pdev, drv);
   1714	cpr_debugfs_init(drv);
   1715
   1716	return 0;
   1717}
   1718
   1719static int cpr_remove(struct platform_device *pdev)
   1720{
   1721	struct cpr_drv *drv = platform_get_drvdata(pdev);
   1722
   1723	if (cpr_is_allowed(drv)) {
   1724		cpr_ctl_disable(drv);
   1725		cpr_irq_set(drv, 0);
   1726	}
   1727
   1728	of_genpd_del_provider(pdev->dev.of_node);
   1729	pm_genpd_remove(&drv->pd);
   1730
   1731	debugfs_remove_recursive(drv->debugfs);
   1732
   1733	return 0;
   1734}
   1735
   1736static const struct of_device_id cpr_match_table[] = {
   1737	{ .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
   1738	{ }
   1739};
   1740MODULE_DEVICE_TABLE(of, cpr_match_table);
   1741
   1742static struct platform_driver cpr_driver = {
   1743	.probe		= cpr_probe,
   1744	.remove		= cpr_remove,
   1745	.driver		= {
   1746		.name	= "qcom-cpr",
   1747		.of_match_table = cpr_match_table,
   1748	},
   1749};
   1750module_platform_driver(cpr_driver);
   1751
   1752MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
   1753MODULE_LICENSE("GPL v2");