cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_self_refresh_helper.c (8454B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright (C) 2019 Google, Inc.
      4 *
      5 * Authors:
      6 * Sean Paul <seanpaul@chromium.org>
      7 */
      8#include <linux/average.h>
      9#include <linux/bitops.h>
     10#include <linux/slab.h>
     11#include <linux/workqueue.h>
     12
     13#include <drm/drm_atomic.h>
     14#include <drm/drm_atomic_helper.h>
     15#include <drm/drm_connector.h>
     16#include <drm/drm_crtc.h>
     17#include <drm/drm_device.h>
     18#include <drm/drm_mode_config.h>
     19#include <drm/drm_modeset_lock.h>
     20#include <drm/drm_print.h>
     21#include <drm/drm_self_refresh_helper.h>
     22
     23/**
     24 * DOC: overview
     25 *
     26 * This helper library provides an easy way for drivers to leverage the atomic
     27 * framework to implement panel self refresh (SR) support. Drivers are
     28 * responsible for initializing and cleaning up the SR helpers on load/unload
     29 * (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup).
     30 * The connector is responsible for setting
     31 * &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware
     32 * (meaning it knows how to initiate self refresh on the panel).
     33 *
     34 * Once a crtc has enabled SR using &drm_self_refresh_helper_init, the
     35 * helpers will monitor activity and call back into the driver to enable/disable
     36 * SR as appropriate. The best way to think about this is that it's a DPMS
     37 * on/off request with &drm_crtc_state.self_refresh_active set in crtc state
     38 * that tells you to disable/enable SR on the panel instead of power-cycling it.
     39 *
     40 * During SR, drivers may choose to fully disable their crtc/encoder/bridge
     41 * hardware (in which case no driver changes are necessary), or they can inspect
     42 * &drm_crtc_state.self_refresh_active if they want to enter low power mode
     43 * without full disable (in case full disable/enable is too slow).
     44 *
     45 * SR will be deactivated if there are any atomic updates affecting the
     46 * pipe that is in SR mode. If a crtc is driving multiple connectors, all
     47 * connectors must be SR aware and all will enter/exit SR mode at the same time.
     48 *
     49 * If the crtc and connector are SR aware, but the panel connected does not
     50 * support it (or is otherwise unable to enter SR), the driver should fail
     51 * atomic_check when &drm_crtc_state.self_refresh_active is true.
     52 */
     53
     54#define SELF_REFRESH_AVG_SEED_MS 200
     55
     56DECLARE_EWMA(psr_time, 4, 4)
     57
     58struct drm_self_refresh_data {
     59	struct drm_crtc *crtc;
     60	struct delayed_work entry_work;
     61
     62	struct mutex avg_mutex;
     63	struct ewma_psr_time entry_avg_ms;
     64	struct ewma_psr_time exit_avg_ms;
     65};
     66
     67static void drm_self_refresh_helper_entry_work(struct work_struct *work)
     68{
     69	struct drm_self_refresh_data *sr_data = container_of(
     70				to_delayed_work(work),
     71				struct drm_self_refresh_data, entry_work);
     72	struct drm_crtc *crtc = sr_data->crtc;
     73	struct drm_device *dev = crtc->dev;
     74	struct drm_modeset_acquire_ctx ctx;
     75	struct drm_atomic_state *state;
     76	struct drm_connector *conn;
     77	struct drm_connector_state *conn_state;
     78	struct drm_crtc_state *crtc_state;
     79	int i, ret = 0;
     80
     81	drm_modeset_acquire_init(&ctx, 0);
     82
     83	state = drm_atomic_state_alloc(dev);
     84	if (!state) {
     85		ret = -ENOMEM;
     86		goto out_drop_locks;
     87	}
     88
     89retry:
     90	state->acquire_ctx = &ctx;
     91
     92	crtc_state = drm_atomic_get_crtc_state(state, crtc);
     93	if (IS_ERR(crtc_state)) {
     94		ret = PTR_ERR(crtc_state);
     95		goto out;
     96	}
     97
     98	if (!crtc_state->enable)
     99		goto out;
    100
    101	ret = drm_atomic_add_affected_connectors(state, crtc);
    102	if (ret)
    103		goto out;
    104
    105	for_each_new_connector_in_state(state, conn, conn_state, i) {
    106		if (!conn_state->self_refresh_aware)
    107			goto out;
    108	}
    109
    110	crtc_state->active = false;
    111	crtc_state->self_refresh_active = true;
    112
    113	ret = drm_atomic_commit(state);
    114	if (ret)
    115		goto out;
    116
    117out:
    118	if (ret == -EDEADLK) {
    119		drm_atomic_state_clear(state);
    120		ret = drm_modeset_backoff(&ctx);
    121		if (!ret)
    122			goto retry;
    123	}
    124
    125	drm_atomic_state_put(state);
    126
    127out_drop_locks:
    128	drm_modeset_drop_locks(&ctx);
    129	drm_modeset_acquire_fini(&ctx);
    130}
    131
    132/**
    133 * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
    134 * @state: the state which has just been applied to hardware
    135 * @commit_time_ms: the amount of time in ms that this commit took to complete
    136 * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
    137 *    new state
    138 *
    139 * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
    140 * update the average entry/exit self refresh times on self refresh transitions.
    141 * These averages will be used when calculating how long to delay before
    142 * entering self refresh mode after activity.
    143 */
    144void
    145drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
    146					 unsigned int commit_time_ms,
    147					 unsigned int new_self_refresh_mask)
    148{
    149	struct drm_crtc *crtc;
    150	struct drm_crtc_state *old_crtc_state;
    151	int i;
    152
    153	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
    154		bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
    155		struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
    156		struct ewma_psr_time *time;
    157
    158		if (old_crtc_state->self_refresh_active ==
    159		    new_self_refresh_active)
    160			continue;
    161
    162		if (new_self_refresh_active)
    163			time = &sr_data->entry_avg_ms;
    164		else
    165			time = &sr_data->exit_avg_ms;
    166
    167		mutex_lock(&sr_data->avg_mutex);
    168		ewma_psr_time_add(time, commit_time_ms);
    169		mutex_unlock(&sr_data->avg_mutex);
    170	}
    171}
    172EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times);
    173
    174/**
    175 * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit
    176 * @state: the state currently being checked
    177 *
    178 * Called at the end of atomic check. This function checks the state for flags
    179 * incompatible with self refresh exit and changes them. This is a bit
    180 * disingenuous since userspace is expecting one thing and we're giving it
    181 * another. However in order to keep self refresh entirely hidden from
    182 * userspace, this is required.
    183 *
    184 * At the end, we queue up the self refresh entry work so we can enter PSR after
    185 * the desired delay.
    186 */
    187void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state)
    188{
    189	struct drm_crtc *crtc;
    190	struct drm_crtc_state *crtc_state;
    191	int i;
    192
    193	if (state->async_update || !state->allow_modeset) {
    194		for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
    195			if (crtc_state->self_refresh_active) {
    196				state->async_update = false;
    197				state->allow_modeset = true;
    198				break;
    199			}
    200		}
    201	}
    202
    203	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
    204		struct drm_self_refresh_data *sr_data;
    205		unsigned int delay;
    206
    207		/* Don't trigger the entry timer when we're already in SR */
    208		if (crtc_state->self_refresh_active)
    209			continue;
    210
    211		sr_data = crtc->self_refresh_data;
    212		if (!sr_data)
    213			continue;
    214
    215		mutex_lock(&sr_data->avg_mutex);
    216		delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) +
    217			 ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2;
    218		mutex_unlock(&sr_data->avg_mutex);
    219
    220		mod_delayed_work(system_wq, &sr_data->entry_work,
    221				 msecs_to_jiffies(delay));
    222	}
    223}
    224EXPORT_SYMBOL(drm_self_refresh_helper_alter_state);
    225
    226/**
    227 * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc
    228 * @crtc: the crtc which supports self refresh supported displays
    229 *
    230 * Returns zero if successful or -errno on failure
    231 */
    232int drm_self_refresh_helper_init(struct drm_crtc *crtc)
    233{
    234	struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
    235
    236	/* Helper is already initialized */
    237	if (WARN_ON(sr_data))
    238		return -EINVAL;
    239
    240	sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
    241	if (!sr_data)
    242		return -ENOMEM;
    243
    244	INIT_DELAYED_WORK(&sr_data->entry_work,
    245			  drm_self_refresh_helper_entry_work);
    246	sr_data->crtc = crtc;
    247	mutex_init(&sr_data->avg_mutex);
    248	ewma_psr_time_init(&sr_data->entry_avg_ms);
    249	ewma_psr_time_init(&sr_data->exit_avg_ms);
    250
    251	/*
    252	 * Seed the averages so they're non-zero (and sufficiently large
    253	 * for even poorly performing panels). As time goes on, this will be
    254	 * averaged out and the values will trend to their true value.
    255	 */
    256	ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS);
    257	ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS);
    258
    259	crtc->self_refresh_data = sr_data;
    260	return 0;
    261}
    262EXPORT_SYMBOL(drm_self_refresh_helper_init);
    263
    264/**
    265 * drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc
    266 * @crtc: the crtc to cleanup
    267 */
    268void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc)
    269{
    270	struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
    271
    272	/* Helper is already uninitialized */
    273	if (!sr_data)
    274		return;
    275
    276	crtc->self_refresh_data = NULL;
    277
    278	cancel_delayed_work_sync(&sr_data->entry_work);
    279	kfree(sr_data);
    280}
    281EXPORT_SYMBOL(drm_self_refresh_helper_cleanup);