cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gk20a.c (15419B)


      1/*
      2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 *
     22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
     23 *
     24 */
     25#include "priv.h"
     26#include "gk20a.h"
     27
     28#include <core/tegra.h>
     29#include <subdev/timer.h>
     30
     31static const u8 _pl_to_div[] = {
     32/* PL:   0, 1, 2, 3, 4, 5, 6,  7,  8,  9, 10, 11, 12, 13, 14 */
     33/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
     34};
     35
     36static u32 pl_to_div(u32 pl)
     37{
     38	if (pl >= ARRAY_SIZE(_pl_to_div))
     39		return 1;
     40
     41	return _pl_to_div[pl];
     42}
     43
     44static u32 div_to_pl(u32 div)
     45{
     46	u32 pl;
     47
     48	for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
     49		if (_pl_to_div[pl] >= div)
     50			return pl;
     51	}
     52
     53	return ARRAY_SIZE(_pl_to_div) - 1;
     54}
     55
     56static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
     57	.min_vco = 1000000, .max_vco = 2064000,
     58	.min_u = 12000, .max_u = 38000,
     59	.min_m = 1, .max_m = 255,
     60	.min_n = 8, .max_n = 255,
     61	.min_pl = 1, .max_pl = 32,
     62};
     63
     64void
     65gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
     66{
     67	struct nvkm_device *device = clk->base.subdev.device;
     68	u32 val;
     69
     70	val = nvkm_rd32(device, GPCPLL_COEFF);
     71	pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
     72	pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
     73	pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
     74}
     75
     76void
     77gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
     78{
     79	struct nvkm_device *device = clk->base.subdev.device;
     80	u32 val;
     81
     82	val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
     83	val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
     84	val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
     85	nvkm_wr32(device, GPCPLL_COEFF, val);
     86}
     87
     88u32
     89gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
     90{
     91	u32 rate;
     92	u32 divider;
     93
     94	rate = clk->parent_rate * pll->n;
     95	divider = pll->m * clk->pl_to_div(pll->pl);
     96
     97	return rate / divider / 2;
     98}
     99
    100int
    101gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
    102		    struct gk20a_pll *pll)
    103{
    104	struct nvkm_subdev *subdev = &clk->base.subdev;
    105	u32 target_clk_f, ref_clk_f, target_freq;
    106	u32 min_vco_f, max_vco_f;
    107	u32 low_pl, high_pl, best_pl;
    108	u32 target_vco_f;
    109	u32 best_m, best_n;
    110	u32 best_delta = ~0;
    111	u32 pl;
    112
    113	target_clk_f = rate * 2 / KHZ;
    114	ref_clk_f = clk->parent_rate / KHZ;
    115
    116	target_vco_f = target_clk_f + target_clk_f / 50;
    117	max_vco_f = max(clk->params->max_vco, target_vco_f);
    118	min_vco_f = clk->params->min_vco;
    119	best_m = clk->params->max_m;
    120	best_n = clk->params->min_n;
    121	best_pl = clk->params->min_pl;
    122
    123	/* min_pl <= high_pl <= max_pl */
    124	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
    125	high_pl = min(high_pl, clk->params->max_pl);
    126	high_pl = max(high_pl, clk->params->min_pl);
    127	high_pl = clk->div_to_pl(high_pl);
    128
    129	/* min_pl <= low_pl <= max_pl */
    130	low_pl = min_vco_f / target_vco_f;
    131	low_pl = min(low_pl, clk->params->max_pl);
    132	low_pl = max(low_pl, clk->params->min_pl);
    133	low_pl = clk->div_to_pl(low_pl);
    134
    135	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
    136		   clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
    137
    138	/* Select lowest possible VCO */
    139	for (pl = low_pl; pl <= high_pl; pl++) {
    140		u32 m, n, n2;
    141
    142		target_vco_f = target_clk_f * clk->pl_to_div(pl);
    143
    144		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
    145			u32 u_f = ref_clk_f / m;
    146
    147			if (u_f < clk->params->min_u)
    148				break;
    149			if (u_f > clk->params->max_u)
    150				continue;
    151
    152			n = (target_vco_f * m) / ref_clk_f;
    153			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
    154
    155			if (n > clk->params->max_n)
    156				break;
    157
    158			for (; n <= n2; n++) {
    159				u32 vco_f;
    160
    161				if (n < clk->params->min_n)
    162					continue;
    163				if (n > clk->params->max_n)
    164					break;
    165
    166				vco_f = ref_clk_f * n / m;
    167
    168				if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
    169					u32 delta, lwv;
    170
    171					lwv = (vco_f + (clk->pl_to_div(pl) / 2))
    172						/ clk->pl_to_div(pl);
    173					delta = abs(lwv - target_clk_f);
    174
    175					if (delta < best_delta) {
    176						best_delta = delta;
    177						best_m = m;
    178						best_n = n;
    179						best_pl = pl;
    180
    181						if (best_delta == 0)
    182							goto found_match;
    183					}
    184				}
    185			}
    186		}
    187	}
    188
    189found_match:
    190	WARN_ON(best_delta == ~0);
    191
    192	if (best_delta != 0)
    193		nvkm_debug(subdev,
    194			   "no best match for target @ %dMHz on gpc_pll",
    195			   target_clk_f / KHZ);
    196
    197	pll->m = best_m;
    198	pll->n = best_n;
    199	pll->pl = best_pl;
    200
    201	target_freq = gk20a_pllg_calc_rate(clk, pll);
    202
    203	nvkm_debug(subdev,
    204		   "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
    205		   target_freq / KHZ, pll->m, pll->n, pll->pl,
    206		   clk->pl_to_div(pll->pl));
    207	return 0;
    208}
    209
    210static int
    211gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
    212{
    213	struct nvkm_subdev *subdev = &clk->base.subdev;
    214	struct nvkm_device *device = subdev->device;
    215	struct gk20a_pll pll;
    216	int ret = 0;
    217
    218	/* get old coefficients */
    219	gk20a_pllg_read_mnp(clk, &pll);
    220	/* do nothing if NDIV is the same */
    221	if (n == pll.n)
    222		return 0;
    223
    224	/* pll slowdown mode */
    225	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    226		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
    227		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
    228
    229	/* new ndiv ready for ramp */
    230	pll.n = n;
    231	udelay(1);
    232	gk20a_pllg_write_mnp(clk, &pll);
    233
    234	/* dynamic ramp to new ndiv */
    235	udelay(1);
    236	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    237		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
    238		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
    239
    240	/* wait for ramping to complete */
    241	if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
    242		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
    243		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
    244		ret = -ETIMEDOUT;
    245
    246	/* exit slowdown mode */
    247	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
    248		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
    249		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
    250	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
    251
    252	return ret;
    253}
    254
    255static int
    256gk20a_pllg_enable(struct gk20a_clk *clk)
    257{
    258	struct nvkm_device *device = clk->base.subdev.device;
    259	u32 val;
    260
    261	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
    262	nvkm_rd32(device, GPCPLL_CFG);
    263
    264	/* enable lock detection */
    265	val = nvkm_rd32(device, GPCPLL_CFG);
    266	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
    267		val &= ~GPCPLL_CFG_LOCK_DET_OFF;
    268		nvkm_wr32(device, GPCPLL_CFG, val);
    269	}
    270
    271	/* wait for lock */
    272	if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
    273			   GPCPLL_CFG_LOCK) < 0)
    274		return -ETIMEDOUT;
    275
    276	/* switch to VCO mode */
    277	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
    278		BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
    279
    280	return 0;
    281}
    282
    283static void
    284gk20a_pllg_disable(struct gk20a_clk *clk)
    285{
    286	struct nvkm_device *device = clk->base.subdev.device;
    287
    288	/* put PLL in bypass before disabling it */
    289	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
    290
    291	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
    292	nvkm_rd32(device, GPCPLL_CFG);
    293}
    294
    295static int
    296gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
    297{
    298	struct nvkm_subdev *subdev = &clk->base.subdev;
    299	struct nvkm_device *device = subdev->device;
    300	struct gk20a_pll cur_pll;
    301	int ret;
    302
    303	gk20a_pllg_read_mnp(clk, &cur_pll);
    304
    305	/* split VCO-to-bypass jump in half by setting out divider 1:2 */
    306	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    307		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
    308	/* Intentional 2nd write to assure linear divider operation */
    309	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    310		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
    311	nvkm_rd32(device, GPC2CLK_OUT);
    312	udelay(2);
    313
    314	gk20a_pllg_disable(clk);
    315
    316	gk20a_pllg_write_mnp(clk, pll);
    317
    318	ret = gk20a_pllg_enable(clk);
    319	if (ret)
    320		return ret;
    321
    322	/* restore out divider 1:1 */
    323	udelay(2);
    324	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    325		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
    326	/* Intentional 2nd write to assure linear divider operation */
    327	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
    328		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
    329	nvkm_rd32(device, GPC2CLK_OUT);
    330
    331	return 0;
    332}
    333
    334static int
    335gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
    336{
    337	struct gk20a_pll cur_pll;
    338	int ret;
    339
    340	if (gk20a_pllg_is_enabled(clk)) {
    341		gk20a_pllg_read_mnp(clk, &cur_pll);
    342
    343		/* just do NDIV slide if there is no change to M and PL */
    344		if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
    345			return gk20a_pllg_slide(clk, pll->n);
    346
    347		/* slide down to current NDIV_LO */
    348		cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
    349		ret = gk20a_pllg_slide(clk, cur_pll.n);
    350		if (ret)
    351			return ret;
    352	}
    353
    354	/* program MNP with the new clock parameters and new NDIV_LO */
    355	cur_pll = *pll;
    356	cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
    357	ret = gk20a_pllg_program_mnp(clk, &cur_pll);
    358	if (ret)
    359		return ret;
    360
    361	/* slide up to new NDIV */
    362	return gk20a_pllg_slide(clk, pll->n);
    363}
    364
    365static struct nvkm_pstate
    366gk20a_pstates[] = {
    367	{
    368		.base = {
    369			.domain[nv_clk_src_gpc] = 72000,
    370			.voltage = 0,
    371		},
    372	},
    373	{
    374		.base = {
    375			.domain[nv_clk_src_gpc] = 108000,
    376			.voltage = 1,
    377		},
    378	},
    379	{
    380		.base = {
    381			.domain[nv_clk_src_gpc] = 180000,
    382			.voltage = 2,
    383		},
    384	},
    385	{
    386		.base = {
    387			.domain[nv_clk_src_gpc] = 252000,
    388			.voltage = 3,
    389		},
    390	},
    391	{
    392		.base = {
    393			.domain[nv_clk_src_gpc] = 324000,
    394			.voltage = 4,
    395		},
    396	},
    397	{
    398		.base = {
    399			.domain[nv_clk_src_gpc] = 396000,
    400			.voltage = 5,
    401		},
    402	},
    403	{
    404		.base = {
    405			.domain[nv_clk_src_gpc] = 468000,
    406			.voltage = 6,
    407		},
    408	},
    409	{
    410		.base = {
    411			.domain[nv_clk_src_gpc] = 540000,
    412			.voltage = 7,
    413		},
    414	},
    415	{
    416		.base = {
    417			.domain[nv_clk_src_gpc] = 612000,
    418			.voltage = 8,
    419		},
    420	},
    421	{
    422		.base = {
    423			.domain[nv_clk_src_gpc] = 648000,
    424			.voltage = 9,
    425		},
    426	},
    427	{
    428		.base = {
    429			.domain[nv_clk_src_gpc] = 684000,
    430			.voltage = 10,
    431		},
    432	},
    433	{
    434		.base = {
    435			.domain[nv_clk_src_gpc] = 708000,
    436			.voltage = 11,
    437		},
    438	},
    439	{
    440		.base = {
    441			.domain[nv_clk_src_gpc] = 756000,
    442			.voltage = 12,
    443		},
    444	},
    445	{
    446		.base = {
    447			.domain[nv_clk_src_gpc] = 804000,
    448			.voltage = 13,
    449		},
    450	},
    451	{
    452		.base = {
    453			.domain[nv_clk_src_gpc] = 852000,
    454			.voltage = 14,
    455		},
    456	},
    457};
    458
    459int
    460gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
    461{
    462	struct gk20a_clk *clk = gk20a_clk(base);
    463	struct nvkm_subdev *subdev = &clk->base.subdev;
    464	struct nvkm_device *device = subdev->device;
    465	struct gk20a_pll pll;
    466
    467	switch (src) {
    468	case nv_clk_src_crystal:
    469		return device->crystal;
    470	case nv_clk_src_gpc:
    471		gk20a_pllg_read_mnp(clk, &pll);
    472		return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
    473	default:
    474		nvkm_error(subdev, "invalid clock source %d\n", src);
    475		return -EINVAL;
    476	}
    477}
    478
    479int
    480gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
    481{
    482	struct gk20a_clk *clk = gk20a_clk(base);
    483
    484	return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
    485					 GK20A_CLK_GPC_MDIV, &clk->pll);
    486}
    487
    488int
    489gk20a_clk_prog(struct nvkm_clk *base)
    490{
    491	struct gk20a_clk *clk = gk20a_clk(base);
    492	int ret;
    493
    494	ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
    495	if (ret)
    496		ret = gk20a_pllg_program_mnp(clk, &clk->pll);
    497
    498	return ret;
    499}
    500
    501void
    502gk20a_clk_tidy(struct nvkm_clk *base)
    503{
    504}
    505
    506int
    507gk20a_clk_setup_slide(struct gk20a_clk *clk)
    508{
    509	struct nvkm_subdev *subdev = &clk->base.subdev;
    510	struct nvkm_device *device = subdev->device;
    511	u32 step_a, step_b;
    512
    513	switch (clk->parent_rate) {
    514	case 12000000:
    515	case 12800000:
    516	case 13000000:
    517		step_a = 0x2b;
    518		step_b = 0x0b;
    519		break;
    520	case 19200000:
    521		step_a = 0x12;
    522		step_b = 0x08;
    523		break;
    524	case 38400000:
    525		step_a = 0x04;
    526		step_b = 0x05;
    527		break;
    528	default:
    529		nvkm_error(subdev, "invalid parent clock rate %u KHz",
    530			   clk->parent_rate / KHZ);
    531		return -EINVAL;
    532	}
    533
    534	nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
    535		step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
    536	nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
    537		step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
    538
    539	return 0;
    540}
    541
    542void
    543gk20a_clk_fini(struct nvkm_clk *base)
    544{
    545	struct nvkm_device *device = base->subdev.device;
    546	struct gk20a_clk *clk = gk20a_clk(base);
    547
    548	/* slide to VCO min */
    549	if (gk20a_pllg_is_enabled(clk)) {
    550		struct gk20a_pll pll;
    551		u32 n_lo;
    552
    553		gk20a_pllg_read_mnp(clk, &pll);
    554		n_lo = gk20a_pllg_n_lo(clk, &pll);
    555		gk20a_pllg_slide(clk, n_lo);
    556	}
    557
    558	gk20a_pllg_disable(clk);
    559
    560	/* set IDDQ */
    561	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
    562}
    563
    564static int
    565gk20a_clk_init(struct nvkm_clk *base)
    566{
    567	struct gk20a_clk *clk = gk20a_clk(base);
    568	struct nvkm_subdev *subdev = &clk->base.subdev;
    569	struct nvkm_device *device = subdev->device;
    570	int ret;
    571
    572	/* get out from IDDQ */
    573	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
    574	nvkm_rd32(device, GPCPLL_CFG);
    575	udelay(5);
    576
    577	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
    578		  GPC2CLK_OUT_INIT_VAL);
    579
    580	ret = gk20a_clk_setup_slide(clk);
    581	if (ret)
    582		return ret;
    583
    584	/* Start with lowest frequency */
    585	base->func->calc(base, &base->func->pstates[0].base);
    586	ret = base->func->prog(&clk->base);
    587	if (ret) {
    588		nvkm_error(subdev, "cannot initialize clock\n");
    589		return ret;
    590	}
    591
    592	return 0;
    593}
    594
    595static const struct nvkm_clk_func
    596gk20a_clk = {
    597	.init = gk20a_clk_init,
    598	.fini = gk20a_clk_fini,
    599	.read = gk20a_clk_read,
    600	.calc = gk20a_clk_calc,
    601	.prog = gk20a_clk_prog,
    602	.tidy = gk20a_clk_tidy,
    603	.pstates = gk20a_pstates,
    604	.nr_pstates = ARRAY_SIZE(gk20a_pstates),
    605	.domains = {
    606		{ nv_clk_src_crystal, 0xff },
    607		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
    608		{ nv_clk_src_max }
    609	}
    610};
    611
    612int
    613gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
    614	       const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
    615	       struct gk20a_clk *clk)
    616{
    617	struct nvkm_device_tegra *tdev = device->func->tegra(device);
    618	int ret;
    619	int i;
    620
    621	/* Finish initializing the pstates */
    622	for (i = 0; i < func->nr_pstates; i++) {
    623		INIT_LIST_HEAD(&func->pstates[i].list);
    624		func->pstates[i].pstate = i + 1;
    625	}
    626
    627	clk->params = params;
    628	clk->parent_rate = clk_get_rate(tdev->clk);
    629
    630	ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
    631	if (ret)
    632		return ret;
    633
    634	nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
    635		   clk->parent_rate / KHZ);
    636
    637	return 0;
    638}
    639
    640int
    641gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
    642	      struct nvkm_clk **pclk)
    643{
    644	struct gk20a_clk *clk;
    645	int ret;
    646
    647	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
    648	if (!clk)
    649		return -ENOMEM;
    650	*pclk = &clk->base;
    651
    652	ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
    653
    654	clk->pl_to_div = pl_to_div;
    655	clk->div_to_pl = div_to_pl;
    656	return ret;
    657}