cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gdsc.c (13562B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
      4 */
      5
      6#include <linux/bitops.h>
      7#include <linux/delay.h>
      8#include <linux/err.h>
      9#include <linux/export.h>
     10#include <linux/jiffies.h>
     11#include <linux/kernel.h>
     12#include <linux/ktime.h>
     13#include <linux/pm_domain.h>
     14#include <linux/pm_runtime.h>
     15#include <linux/regmap.h>
     16#include <linux/regulator/consumer.h>
     17#include <linux/reset-controller.h>
     18#include <linux/slab.h>
     19#include "gdsc.h"
     20
     21#define PWR_ON_MASK		BIT(31)
     22#define EN_REST_WAIT_MASK	GENMASK_ULL(23, 20)
     23#define EN_FEW_WAIT_MASK	GENMASK_ULL(19, 16)
     24#define CLK_DIS_WAIT_MASK	GENMASK_ULL(15, 12)
     25#define SW_OVERRIDE_MASK	BIT(2)
     26#define HW_CONTROL_MASK		BIT(1)
     27#define SW_COLLAPSE_MASK	BIT(0)
     28#define GMEM_CLAMP_IO_MASK	BIT(0)
     29#define GMEM_RESET_MASK		BIT(4)
     30
     31/* CFG_GDSCR */
     32#define GDSC_POWER_UP_COMPLETE		BIT(16)
     33#define GDSC_POWER_DOWN_COMPLETE	BIT(15)
     34#define GDSC_RETAIN_FF_ENABLE		BIT(11)
     35#define CFG_GDSCR_OFFSET		0x4
     36
     37/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
     38#define EN_REST_WAIT_VAL	0x2
     39#define EN_FEW_WAIT_VAL		0x8
     40#define CLK_DIS_WAIT_VAL	0x2
     41
     42/* Transition delay shifts */
     43#define EN_REST_WAIT_SHIFT	20
     44#define EN_FEW_WAIT_SHIFT	16
     45#define CLK_DIS_WAIT_SHIFT	12
     46
     47#define RETAIN_MEM		BIT(14)
     48#define RETAIN_PERIPH		BIT(13)
     49
     50#define TIMEOUT_US		500
     51
     52#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
     53
     54enum gdsc_status {
     55	GDSC_OFF,
     56	GDSC_ON
     57};
     58
     59static int gdsc_pm_runtime_get(struct gdsc *sc)
     60{
     61	if (!sc->dev)
     62		return 0;
     63
     64	return pm_runtime_resume_and_get(sc->dev);
     65}
     66
     67static int gdsc_pm_runtime_put(struct gdsc *sc)
     68{
     69	if (!sc->dev)
     70		return 0;
     71
     72	return pm_runtime_put_sync(sc->dev);
     73}
     74
     75/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
     76static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
     77{
     78	unsigned int reg;
     79	u32 val;
     80	int ret;
     81
     82	if (sc->flags & POLL_CFG_GDSCR)
     83		reg = sc->gdscr + CFG_GDSCR_OFFSET;
     84	else if (sc->gds_hw_ctrl)
     85		reg = sc->gds_hw_ctrl;
     86	else
     87		reg = sc->gdscr;
     88
     89	ret = regmap_read(sc->regmap, reg, &val);
     90	if (ret)
     91		return ret;
     92
     93	if (sc->flags & POLL_CFG_GDSCR) {
     94		switch (status) {
     95		case GDSC_ON:
     96			return !!(val & GDSC_POWER_UP_COMPLETE);
     97		case GDSC_OFF:
     98			return !!(val & GDSC_POWER_DOWN_COMPLETE);
     99		}
    100	}
    101
    102	switch (status) {
    103	case GDSC_ON:
    104		return !!(val & PWR_ON_MASK);
    105	case GDSC_OFF:
    106		return !(val & PWR_ON_MASK);
    107	}
    108
    109	return -EINVAL;
    110}
    111
    112static int gdsc_hwctrl(struct gdsc *sc, bool en)
    113{
    114	u32 val = en ? HW_CONTROL_MASK : 0;
    115
    116	return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
    117}
    118
    119static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
    120{
    121	ktime_t start;
    122
    123	start = ktime_get();
    124	do {
    125		if (gdsc_check_status(sc, status))
    126			return 0;
    127	} while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
    128
    129	if (gdsc_check_status(sc, status))
    130		return 0;
    131
    132	return -ETIMEDOUT;
    133}
    134
    135static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
    136{
    137	int ret;
    138	u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
    139
    140	if (status == GDSC_ON && sc->rsupply) {
    141		ret = regulator_enable(sc->rsupply);
    142		if (ret < 0)
    143			return ret;
    144	}
    145
    146	ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
    147	if (ret)
    148		return ret;
    149
    150	/* If disabling votable gdscs, don't poll on status */
    151	if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
    152		/*
    153		 * Add a short delay here to ensure that an enable
    154		 * right after it was disabled does not put it in an
    155		 * unknown state
    156		 */
    157		udelay(TIMEOUT_US);
    158		return 0;
    159	}
    160
    161	if (sc->gds_hw_ctrl) {
    162		/*
    163		 * The gds hw controller asserts/de-asserts the status bit soon
    164		 * after it receives a power on/off request from a master.
    165		 * The controller then takes around 8 xo cycles to start its
    166		 * internal state machine and update the status bit. During
    167		 * this time, the status bit does not reflect the true status
    168		 * of the core.
    169		 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
    170		 * and polling the status bit.
    171		 */
    172		udelay(1);
    173	}
    174
    175	ret = gdsc_poll_status(sc, status);
    176	WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
    177
    178	if (!ret && status == GDSC_OFF && sc->rsupply) {
    179		ret = regulator_disable(sc->rsupply);
    180		if (ret < 0)
    181			return ret;
    182	}
    183
    184	return ret;
    185}
    186
    187static inline int gdsc_deassert_reset(struct gdsc *sc)
    188{
    189	int i;
    190
    191	for (i = 0; i < sc->reset_count; i++)
    192		sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
    193	return 0;
    194}
    195
    196static inline int gdsc_assert_reset(struct gdsc *sc)
    197{
    198	int i;
    199
    200	for (i = 0; i < sc->reset_count; i++)
    201		sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
    202	return 0;
    203}
    204
    205static inline void gdsc_force_mem_on(struct gdsc *sc)
    206{
    207	int i;
    208	u32 mask = RETAIN_MEM;
    209
    210	if (!(sc->flags & NO_RET_PERIPH))
    211		mask |= RETAIN_PERIPH;
    212
    213	for (i = 0; i < sc->cxc_count; i++)
    214		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
    215}
    216
    217static inline void gdsc_clear_mem_on(struct gdsc *sc)
    218{
    219	int i;
    220	u32 mask = RETAIN_MEM;
    221
    222	if (!(sc->flags & NO_RET_PERIPH))
    223		mask |= RETAIN_PERIPH;
    224
    225	for (i = 0; i < sc->cxc_count; i++)
    226		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
    227}
    228
    229static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
    230{
    231	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
    232			   GMEM_CLAMP_IO_MASK, 0);
    233}
    234
    235static inline void gdsc_assert_clamp_io(struct gdsc *sc)
    236{
    237	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
    238			   GMEM_CLAMP_IO_MASK, 1);
    239}
    240
    241static inline void gdsc_assert_reset_aon(struct gdsc *sc)
    242{
    243	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
    244			   GMEM_RESET_MASK, 1);
    245	udelay(1);
    246	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
    247			   GMEM_RESET_MASK, 0);
    248}
    249
    250static void gdsc_retain_ff_on(struct gdsc *sc)
    251{
    252	u32 mask = GDSC_RETAIN_FF_ENABLE;
    253
    254	regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
    255}
    256
    257static int _gdsc_enable(struct gdsc *sc)
    258{
    259	int ret;
    260
    261	if (sc->pwrsts == PWRSTS_ON)
    262		return gdsc_deassert_reset(sc);
    263
    264	if (sc->flags & SW_RESET) {
    265		gdsc_assert_reset(sc);
    266		udelay(1);
    267		gdsc_deassert_reset(sc);
    268	}
    269
    270	if (sc->flags & CLAMP_IO) {
    271		if (sc->flags & AON_RESET)
    272			gdsc_assert_reset_aon(sc);
    273		gdsc_deassert_clamp_io(sc);
    274	}
    275
    276	ret = gdsc_toggle_logic(sc, GDSC_ON);
    277	if (ret)
    278		return ret;
    279
    280	if (sc->pwrsts & PWRSTS_OFF)
    281		gdsc_force_mem_on(sc);
    282
    283	/*
    284	 * If clocks to this power domain were already on, they will take an
    285	 * additional 4 clock cycles to re-enable after the power domain is
    286	 * enabled. Delay to account for this. A delay is also needed to ensure
    287	 * clocks are not enabled within 400ns of enabling power to the
    288	 * memories.
    289	 */
    290	udelay(1);
    291
    292	/* Turn on HW trigger mode if supported */
    293	if (sc->flags & HW_CTRL) {
    294		ret = gdsc_hwctrl(sc, true);
    295		if (ret)
    296			return ret;
    297		/*
    298		 * Wait for the GDSC to go through a power down and
    299		 * up cycle.  In case a firmware ends up polling status
    300		 * bits for the gdsc, it might read an 'on' status before
    301		 * the GDSC can finish the power cycle.
    302		 * We wait 1us before returning to ensure the firmware
    303		 * can't immediately poll the status bits.
    304		 */
    305		udelay(1);
    306	}
    307
    308	if (sc->flags & RETAIN_FF_ENABLE)
    309		gdsc_retain_ff_on(sc);
    310
    311	return 0;
    312}
    313
    314static int gdsc_enable(struct generic_pm_domain *domain)
    315{
    316	struct gdsc *sc = domain_to_gdsc(domain);
    317	int ret;
    318
    319	ret = gdsc_pm_runtime_get(sc);
    320	if (ret)
    321		return ret;
    322
    323	return _gdsc_enable(sc);
    324}
    325
    326static int _gdsc_disable(struct gdsc *sc)
    327{
    328	int ret;
    329
    330	if (sc->pwrsts == PWRSTS_ON)
    331		return gdsc_assert_reset(sc);
    332
    333	/* Turn off HW trigger mode if supported */
    334	if (sc->flags & HW_CTRL) {
    335		ret = gdsc_hwctrl(sc, false);
    336		if (ret < 0)
    337			return ret;
    338		/*
    339		 * Wait for the GDSC to go through a power down and
    340		 * up cycle.  In case we end up polling status
    341		 * bits for the gdsc before the power cycle is completed
    342		 * it might read an 'on' status wrongly.
    343		 */
    344		udelay(1);
    345
    346		ret = gdsc_poll_status(sc, GDSC_ON);
    347		if (ret)
    348			return ret;
    349	}
    350
    351	if (sc->pwrsts & PWRSTS_OFF)
    352		gdsc_clear_mem_on(sc);
    353
    354	ret = gdsc_toggle_logic(sc, GDSC_OFF);
    355	if (ret)
    356		return ret;
    357
    358	if (sc->flags & CLAMP_IO)
    359		gdsc_assert_clamp_io(sc);
    360
    361	return 0;
    362}
    363
    364static int gdsc_disable(struct generic_pm_domain *domain)
    365{
    366	struct gdsc *sc = domain_to_gdsc(domain);
    367	int ret;
    368
    369	ret = _gdsc_disable(sc);
    370
    371	gdsc_pm_runtime_put(sc);
    372
    373	return ret;
    374}
    375
    376static int gdsc_init(struct gdsc *sc)
    377{
    378	u32 mask, val;
    379	int on, ret;
    380
    381	/*
    382	 * Disable HW trigger: collapse/restore occur based on registers writes.
    383	 * Disable SW override: Use hardware state-machine for sequencing.
    384	 * Configure wait time between states.
    385	 */
    386	mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
    387	       EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
    388
    389	if (!sc->en_rest_wait_val)
    390		sc->en_rest_wait_val = EN_REST_WAIT_VAL;
    391	if (!sc->en_few_wait_val)
    392		sc->en_few_wait_val = EN_FEW_WAIT_VAL;
    393	if (!sc->clk_dis_wait_val)
    394		sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
    395
    396	val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
    397		sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
    398		sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
    399
    400	ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
    401	if (ret)
    402		return ret;
    403
    404	/* Force gdsc ON if only ON state is supported */
    405	if (sc->pwrsts == PWRSTS_ON) {
    406		ret = gdsc_toggle_logic(sc, GDSC_ON);
    407		if (ret)
    408			return ret;
    409	}
    410
    411	on = gdsc_check_status(sc, GDSC_ON);
    412	if (on < 0)
    413		return on;
    414
    415	if (on) {
    416		/* The regulator must be on, sync the kernel state */
    417		if (sc->rsupply) {
    418			ret = regulator_enable(sc->rsupply);
    419			if (ret < 0)
    420				return ret;
    421		}
    422
    423		/*
    424		 * Votable GDSCs can be ON due to Vote from other masters.
    425		 * If a Votable GDSC is ON, make sure we have a Vote.
    426		 */
    427		if (sc->flags & VOTABLE) {
    428			ret = regmap_update_bits(sc->regmap, sc->gdscr,
    429						 SW_COLLAPSE_MASK, val);
    430			if (ret)
    431				return ret;
    432		}
    433
    434		/* Turn on HW trigger mode if supported */
    435		if (sc->flags & HW_CTRL) {
    436			ret = gdsc_hwctrl(sc, true);
    437			if (ret < 0)
    438				return ret;
    439		}
    440
    441		/*
    442		 * Make sure the retain bit is set if the GDSC is already on,
    443		 * otherwise we end up turning off the GDSC and destroying all
    444		 * the register contents that we thought we were saving.
    445		 */
    446		if (sc->flags & RETAIN_FF_ENABLE)
    447			gdsc_retain_ff_on(sc);
    448	} else if (sc->flags & ALWAYS_ON) {
    449		/* If ALWAYS_ON GDSCs are not ON, turn them ON */
    450		gdsc_enable(&sc->pd);
    451		on = true;
    452	}
    453
    454	if (on || (sc->pwrsts & PWRSTS_RET))
    455		gdsc_force_mem_on(sc);
    456	else
    457		gdsc_clear_mem_on(sc);
    458
    459	if (sc->flags & ALWAYS_ON)
    460		sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
    461	if (!sc->pd.power_off)
    462		sc->pd.power_off = gdsc_disable;
    463	if (!sc->pd.power_on)
    464		sc->pd.power_on = gdsc_enable;
    465	pm_genpd_init(&sc->pd, NULL, !on);
    466
    467	return 0;
    468}
    469
    470int gdsc_register(struct gdsc_desc *desc,
    471		  struct reset_controller_dev *rcdev, struct regmap *regmap)
    472{
    473	int i, ret;
    474	struct genpd_onecell_data *data;
    475	struct device *dev = desc->dev;
    476	struct gdsc **scs = desc->scs;
    477	size_t num = desc->num;
    478
    479	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
    480	if (!data)
    481		return -ENOMEM;
    482
    483	data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
    484				     GFP_KERNEL);
    485	if (!data->domains)
    486		return -ENOMEM;
    487
    488	for (i = 0; i < num; i++) {
    489		if (!scs[i] || !scs[i]->supply)
    490			continue;
    491
    492		scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
    493		if (IS_ERR(scs[i]->rsupply))
    494			return PTR_ERR(scs[i]->rsupply);
    495	}
    496
    497	data->num_domains = num;
    498	for (i = 0; i < num; i++) {
    499		if (!scs[i])
    500			continue;
    501		if (pm_runtime_enabled(dev))
    502			scs[i]->dev = dev;
    503		scs[i]->regmap = regmap;
    504		scs[i]->rcdev = rcdev;
    505		ret = gdsc_init(scs[i]);
    506		if (ret)
    507			return ret;
    508		data->domains[i] = &scs[i]->pd;
    509	}
    510
    511	/* Add subdomains */
    512	for (i = 0; i < num; i++) {
    513		if (!scs[i])
    514			continue;
    515		if (scs[i]->parent)
    516			pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
    517		else if (!IS_ERR_OR_NULL(dev->pm_domain))
    518			pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
    519	}
    520
    521	return of_genpd_add_provider_onecell(dev->of_node, data);
    522}
    523
    524void gdsc_unregister(struct gdsc_desc *desc)
    525{
    526	int i;
    527	struct device *dev = desc->dev;
    528	struct gdsc **scs = desc->scs;
    529	size_t num = desc->num;
    530
    531	/* Remove subdomains */
    532	for (i = 0; i < num; i++) {
    533		if (!scs[i])
    534			continue;
    535		if (scs[i]->parent)
    536			pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
    537		else if (!IS_ERR_OR_NULL(dev->pm_domain))
    538			pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
    539	}
    540	of_genpd_del_provider(dev->of_node);
    541}
    542
    543/*
    544 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
    545 * running in the CX domain so the CPU doesn't need to know anything about the
    546 * GX domain EXCEPT....
    547 *
    548 * Hardware constraints dictate that the GX be powered down before the CX. If
    549 * the GMU crashes it could leave the GX on. In order to successfully bring back
    550 * the device the CPU needs to disable the GX headswitch. There being no sane
    551 * way to reach in and touch that register from deep inside the GPU driver we
    552 * need to set up the infrastructure to be able to ensure that the GPU can
    553 * ensure that the GX is off during this super special case. We do this by
    554 * defining a GX gdsc with a dummy enable function and a "default" disable
    555 * function.
    556 *
    557 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
    558 * driver. During power up, nothing will happen from the CPU (and the GMU will
    559 * power up normally but during power down this will ensure that the GX domain
    560 * is *really* off - this gives us a semi standard way of doing what we need.
    561 */
    562int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
    563{
    564	/* Do nothing but give genpd the impression that we were successful */
    565	return 0;
    566}
    567EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);