cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

msm_gpu_devfreq.c (8952B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2013 Red Hat
      4 * Author: Rob Clark <robdclark@gmail.com>
      5 */
      6
      7#include "msm_gpu.h"
      8#include "msm_gpu_trace.h"
      9
     10#include <linux/devfreq.h>
     11#include <linux/devfreq_cooling.h>
     12#include <linux/math64.h>
     13#include <linux/units.h>
     14
     15/*
     16 * Power Management:
     17 */
     18
     19static int msm_devfreq_target(struct device *dev, unsigned long *freq,
     20		u32 flags)
     21{
     22	struct msm_gpu *gpu = dev_to_gpu(dev);
     23	struct dev_pm_opp *opp;
     24
     25	/*
     26	 * Note that devfreq_recommended_opp() can modify the freq
     27	 * to something that actually is in the opp table:
     28	 */
     29	opp = devfreq_recommended_opp(dev, freq, flags);
     30	if (IS_ERR(opp))
     31		return PTR_ERR(opp);
     32
     33	trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
     34
     35	if (gpu->funcs->gpu_set_freq)
     36		gpu->funcs->gpu_set_freq(gpu, opp);
     37	else
     38		clk_set_rate(gpu->core_clk, *freq);
     39
     40	dev_pm_opp_put(opp);
     41
     42	return 0;
     43}
     44
     45static unsigned long get_freq(struct msm_gpu *gpu)
     46{
     47	if (gpu->funcs->gpu_get_freq)
     48		return gpu->funcs->gpu_get_freq(gpu);
     49
     50	return clk_get_rate(gpu->core_clk);
     51}
     52
     53static void get_raw_dev_status(struct msm_gpu *gpu,
     54		struct devfreq_dev_status *status)
     55{
     56	struct msm_gpu_devfreq *df = &gpu->devfreq;
     57	u64 busy_cycles, busy_time;
     58	unsigned long sample_rate;
     59	ktime_t time;
     60
     61	status->current_frequency = get_freq(gpu);
     62	busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
     63	time = ktime_get();
     64
     65	busy_time = busy_cycles - df->busy_cycles;
     66	status->total_time = ktime_us_delta(time, df->time);
     67
     68	df->busy_cycles = busy_cycles;
     69	df->time = time;
     70
     71	busy_time *= USEC_PER_SEC;
     72	do_div(busy_time, sample_rate);
     73	if (WARN_ON(busy_time > ~0LU))
     74		busy_time = ~0LU;
     75
     76	status->busy_time = busy_time;
     77}
     78
     79static void update_average_dev_status(struct msm_gpu *gpu,
     80		const struct devfreq_dev_status *raw)
     81{
     82	struct msm_gpu_devfreq *df = &gpu->devfreq;
     83	const u32 polling_ms = df->devfreq->profile->polling_ms;
     84	const u32 max_history_ms = polling_ms * 11 / 10;
     85	struct devfreq_dev_status *avg = &df->average_status;
     86	u64 avg_freq;
     87
     88	/* simple_ondemand governor interacts poorly with gpu->clamp_to_idle.
     89	 * When we enforce the constraint on idle, it calls get_dev_status
     90	 * which would normally reset the stats.  When we remove the
     91	 * constraint on active, it calls get_dev_status again where busy_time
     92	 * would be 0.
     93	 *
     94	 * To remedy this, we always return the average load over the past
     95	 * polling_ms.
     96	 */
     97
     98	/* raw is longer than polling_ms or avg has no history */
     99	if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms ||
    100	    !avg->total_time) {
    101		*avg = *raw;
    102		return;
    103	}
    104
    105	/* Truncate the oldest history first.
    106	 *
    107	 * Because we keep the history with a single devfreq_dev_status,
    108	 * rather than a list of devfreq_dev_status, we have to assume freq
    109	 * and load are the same over avg->total_time.  We can scale down
    110	 * avg->busy_time and avg->total_time by the same factor to drop
    111	 * history.
    112	 */
    113	if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >=
    114			max_history_ms) {
    115		const u32 new_total_time = polling_ms * USEC_PER_MSEC -
    116			raw->total_time;
    117		avg->busy_time = div_u64(
    118				mul_u32_u32(avg->busy_time, new_total_time),
    119				avg->total_time);
    120		avg->total_time = new_total_time;
    121	}
    122
    123	/* compute the average freq over avg->total_time + raw->total_time */
    124	avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time);
    125	avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time);
    126	do_div(avg_freq, avg->total_time + raw->total_time);
    127
    128	avg->current_frequency = avg_freq;
    129	avg->busy_time += raw->busy_time;
    130	avg->total_time += raw->total_time;
    131}
    132
    133static int msm_devfreq_get_dev_status(struct device *dev,
    134		struct devfreq_dev_status *status)
    135{
    136	struct msm_gpu *gpu = dev_to_gpu(dev);
    137	struct devfreq_dev_status raw;
    138
    139	get_raw_dev_status(gpu, &raw);
    140	update_average_dev_status(gpu, &raw);
    141	*status = gpu->devfreq.average_status;
    142
    143	return 0;
    144}
    145
    146static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
    147{
    148	*freq = get_freq(dev_to_gpu(dev));
    149
    150	return 0;
    151}
    152
    153static struct devfreq_dev_profile msm_devfreq_profile = {
    154	.timer = DEVFREQ_TIMER_DELAYED,
    155	.polling_ms = 50,
    156	.target = msm_devfreq_target,
    157	.get_dev_status = msm_devfreq_get_dev_status,
    158	.get_cur_freq = msm_devfreq_get_cur_freq,
    159};
    160
    161static void msm_devfreq_boost_work(struct kthread_work *work);
    162static void msm_devfreq_idle_work(struct kthread_work *work);
    163
    164static bool has_devfreq(struct msm_gpu *gpu)
    165{
    166	struct msm_gpu_devfreq *df = &gpu->devfreq;
    167	return !!df->devfreq;
    168}
    169
    170void msm_devfreq_init(struct msm_gpu *gpu)
    171{
    172	struct msm_gpu_devfreq *df = &gpu->devfreq;
    173
    174	/* We need target support to do devfreq */
    175	if (!gpu->funcs->gpu_busy)
    176		return;
    177
    178	dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
    179			       DEV_PM_QOS_MAX_FREQUENCY,
    180			       PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
    181	dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
    182			       DEV_PM_QOS_MIN_FREQUENCY, 0);
    183
    184	msm_devfreq_profile.initial_freq = gpu->fast_rate;
    185
    186	/*
    187	 * Don't set the freq_table or max_state and let devfreq build the table
    188	 * from OPP
    189	 * After a deferred probe, these may have be left to non-zero values,
    190	 * so set them back to zero before creating the devfreq device
    191	 */
    192	msm_devfreq_profile.freq_table = NULL;
    193	msm_devfreq_profile.max_state = 0;
    194
    195	df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
    196			&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
    197			NULL);
    198
    199	if (IS_ERR(df->devfreq)) {
    200		DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
    201		df->devfreq = NULL;
    202		return;
    203	}
    204
    205	devfreq_suspend_device(df->devfreq);
    206
    207	gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
    208	if (IS_ERR(gpu->cooling)) {
    209		DRM_DEV_ERROR(&gpu->pdev->dev,
    210				"Couldn't register GPU cooling device\n");
    211		gpu->cooling = NULL;
    212	}
    213
    214	msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
    215			      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    216	msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
    217			      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    218}
    219
    220static void cancel_idle_work(struct msm_gpu_devfreq *df)
    221{
    222	hrtimer_cancel(&df->idle_work.timer);
    223	kthread_cancel_work_sync(&df->idle_work.work);
    224}
    225
    226static void cancel_boost_work(struct msm_gpu_devfreq *df)
    227{
    228	hrtimer_cancel(&df->boost_work.timer);
    229	kthread_cancel_work_sync(&df->boost_work.work);
    230}
    231
    232void msm_devfreq_cleanup(struct msm_gpu *gpu)
    233{
    234	struct msm_gpu_devfreq *df = &gpu->devfreq;
    235
    236	if (!has_devfreq(gpu))
    237		return;
    238
    239	devfreq_cooling_unregister(gpu->cooling);
    240	dev_pm_qos_remove_request(&df->boost_freq);
    241	dev_pm_qos_remove_request(&df->idle_freq);
    242}
    243
    244void msm_devfreq_resume(struct msm_gpu *gpu)
    245{
    246	struct msm_gpu_devfreq *df = &gpu->devfreq;
    247
    248	if (!has_devfreq(gpu))
    249		return;
    250
    251	df->busy_cycles = 0;
    252	df->time = ktime_get();
    253
    254	devfreq_resume_device(df->devfreq);
    255}
    256
    257void msm_devfreq_suspend(struct msm_gpu *gpu)
    258{
    259	struct msm_gpu_devfreq *df = &gpu->devfreq;
    260
    261	if (!has_devfreq(gpu))
    262		return;
    263
    264	devfreq_suspend_device(df->devfreq);
    265
    266	cancel_idle_work(df);
    267	cancel_boost_work(df);
    268}
    269
    270static void msm_devfreq_boost_work(struct kthread_work *work)
    271{
    272	struct msm_gpu_devfreq *df = container_of(work,
    273			struct msm_gpu_devfreq, boost_work.work);
    274
    275	dev_pm_qos_update_request(&df->boost_freq, 0);
    276}
    277
    278void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
    279{
    280	struct msm_gpu_devfreq *df = &gpu->devfreq;
    281	uint64_t freq;
    282
    283	if (!has_devfreq(gpu))
    284		return;
    285
    286	freq = get_freq(gpu);
    287	freq *= factor;
    288
    289	/*
    290	 * A nice little trap is that PM QoS operates in terms of KHz,
    291	 * while devfreq operates in terms of Hz:
    292	 */
    293	do_div(freq, HZ_PER_KHZ);
    294
    295	dev_pm_qos_update_request(&df->boost_freq, freq);
    296
    297	msm_hrtimer_queue_work(&df->boost_work,
    298			       ms_to_ktime(msm_devfreq_profile.polling_ms),
    299			       HRTIMER_MODE_REL);
    300}
    301
    302void msm_devfreq_active(struct msm_gpu *gpu)
    303{
    304	struct msm_gpu_devfreq *df = &gpu->devfreq;
    305	unsigned int idle_time;
    306
    307	if (!has_devfreq(gpu))
    308		return;
    309
    310	/*
    311	 * Cancel any pending transition to idle frequency:
    312	 */
    313	cancel_idle_work(df);
    314
    315	idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
    316
    317	/*
    318	 * If we've been idle for a significant fraction of a polling
    319	 * interval, then we won't meet the threshold of busyness for
    320	 * the governor to ramp up the freq.. so give some boost
    321	 */
    322	if (idle_time > msm_devfreq_profile.polling_ms) {
    323		msm_devfreq_boost(gpu, 2);
    324	}
    325
    326	dev_pm_qos_update_request(&df->idle_freq,
    327				  PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
    328}
    329
    330
    331static void msm_devfreq_idle_work(struct kthread_work *work)
    332{
    333	struct msm_gpu_devfreq *df = container_of(work,
    334			struct msm_gpu_devfreq, idle_work.work);
    335	struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
    336
    337	df->idle_time = ktime_get();
    338
    339	if (gpu->clamp_to_idle)
    340		dev_pm_qos_update_request(&df->idle_freq, 0);
    341}
    342
    343void msm_devfreq_idle(struct msm_gpu *gpu)
    344{
    345	struct msm_gpu_devfreq *df = &gpu->devfreq;
    346
    347	if (!has_devfreq(gpu))
    348		return;
    349
    350	msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
    351			       HRTIMER_MODE_REL);
    352}