cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vkms_crtc.c (8331B)


      1// SPDX-License-Identifier: GPL-2.0+
      2
      3#include <linux/dma-fence.h>
      4
      5#include <drm/drm_atomic.h>
      6#include <drm/drm_atomic_helper.h>
      7#include <drm/drm_probe_helper.h>
      8#include <drm/drm_vblank.h>
      9
     10#include "vkms_drv.h"
     11
     12static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
     13{
     14	struct vkms_output *output = container_of(timer, struct vkms_output,
     15						  vblank_hrtimer);
     16	struct drm_crtc *crtc = &output->crtc;
     17	struct vkms_crtc_state *state;
     18	u64 ret_overrun;
     19	bool ret, fence_cookie;
     20
     21	fence_cookie = dma_fence_begin_signalling();
     22
     23	ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
     24					  output->period_ns);
     25	if (ret_overrun != 1)
     26		pr_warn("%s: vblank timer overrun\n", __func__);
     27
     28	spin_lock(&output->lock);
     29	ret = drm_crtc_handle_vblank(crtc);
     30	if (!ret)
     31		DRM_ERROR("vkms failure on handling vblank");
     32
     33	state = output->composer_state;
     34	spin_unlock(&output->lock);
     35
     36	if (state && output->composer_enabled) {
     37		u64 frame = drm_crtc_accurate_vblank_count(crtc);
     38
     39		/* update frame_start only if a queued vkms_composer_worker()
     40		 * has read the data
     41		 */
     42		spin_lock(&output->composer_lock);
     43		if (!state->crc_pending)
     44			state->frame_start = frame;
     45		else
     46			DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
     47					 state->frame_start, frame);
     48		state->frame_end = frame;
     49		state->crc_pending = true;
     50		spin_unlock(&output->composer_lock);
     51
     52		ret = queue_work(output->composer_workq, &state->composer_work);
     53		if (!ret)
     54			DRM_DEBUG_DRIVER("Composer worker already queued\n");
     55	}
     56
     57	dma_fence_end_signalling(fence_cookie);
     58
     59	return HRTIMER_RESTART;
     60}
     61
     62static int vkms_enable_vblank(struct drm_crtc *crtc)
     63{
     64	struct drm_device *dev = crtc->dev;
     65	unsigned int pipe = drm_crtc_index(crtc);
     66	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
     67	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
     68
     69	drm_calc_timestamping_constants(crtc, &crtc->mode);
     70
     71	hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
     72	out->vblank_hrtimer.function = &vkms_vblank_simulate;
     73	out->period_ns = ktime_set(0, vblank->framedur_ns);
     74	hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
     75
     76	return 0;
     77}
     78
     79static void vkms_disable_vblank(struct drm_crtc *crtc)
     80{
     81	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
     82
     83	hrtimer_cancel(&out->vblank_hrtimer);
     84}
     85
     86static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
     87				      int *max_error, ktime_t *vblank_time,
     88				      bool in_vblank_irq)
     89{
     90	struct drm_device *dev = crtc->dev;
     91	unsigned int pipe = crtc->index;
     92	struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
     93	struct vkms_output *output = &vkmsdev->output;
     94	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
     95
     96	if (!READ_ONCE(vblank->enabled)) {
     97		*vblank_time = ktime_get();
     98		return true;
     99	}
    100
    101	*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
    102
    103	if (WARN_ON(*vblank_time == vblank->time))
    104		return true;
    105
    106	/*
    107	 * To prevent races we roll the hrtimer forward before we do any
    108	 * interrupt processing - this is how real hw works (the interrupt is
    109	 * only generated after all the vblank registers are updated) and what
    110	 * the vblank core expects. Therefore we need to always correct the
    111	 * timestampe by one frame.
    112	 */
    113	*vblank_time -= output->period_ns;
    114
    115	return true;
    116}
    117
    118static struct drm_crtc_state *
    119vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
    120{
    121	struct vkms_crtc_state *vkms_state;
    122
    123	if (WARN_ON(!crtc->state))
    124		return NULL;
    125
    126	vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
    127	if (!vkms_state)
    128		return NULL;
    129
    130	__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
    131
    132	INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
    133
    134	return &vkms_state->base;
    135}
    136
    137static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
    138					   struct drm_crtc_state *state)
    139{
    140	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
    141
    142	__drm_atomic_helper_crtc_destroy_state(state);
    143
    144	WARN_ON(work_pending(&vkms_state->composer_work));
    145	kfree(vkms_state->active_planes);
    146	kfree(vkms_state);
    147}
    148
    149static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
    150{
    151	struct vkms_crtc_state *vkms_state =
    152		kzalloc(sizeof(*vkms_state), GFP_KERNEL);
    153
    154	if (crtc->state)
    155		vkms_atomic_crtc_destroy_state(crtc, crtc->state);
    156
    157	__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
    158	if (vkms_state)
    159		INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
    160}
    161
    162static const struct drm_crtc_funcs vkms_crtc_funcs = {
    163	.set_config             = drm_atomic_helper_set_config,
    164	.destroy                = drm_crtc_cleanup,
    165	.page_flip              = drm_atomic_helper_page_flip,
    166	.reset                  = vkms_atomic_crtc_reset,
    167	.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
    168	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
    169	.enable_vblank		= vkms_enable_vblank,
    170	.disable_vblank		= vkms_disable_vblank,
    171	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
    172	.get_crc_sources	= vkms_get_crc_sources,
    173	.set_crc_source		= vkms_set_crc_source,
    174	.verify_crc_source	= vkms_verify_crc_source,
    175};
    176
    177static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
    178				  struct drm_atomic_state *state)
    179{
    180	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
    181									  crtc);
    182	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
    183	struct drm_plane *plane;
    184	struct drm_plane_state *plane_state;
    185	int i = 0, ret;
    186
    187	if (vkms_state->active_planes)
    188		return 0;
    189
    190	ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
    191	if (ret < 0)
    192		return ret;
    193
    194	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
    195		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
    196								  plane);
    197		WARN_ON(!plane_state);
    198
    199		if (!plane_state->visible)
    200			continue;
    201
    202		i++;
    203	}
    204
    205	vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
    206	if (!vkms_state->active_planes)
    207		return -ENOMEM;
    208	vkms_state->num_active_planes = i;
    209
    210	i = 0;
    211	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
    212		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
    213								  plane);
    214
    215		if (!plane_state->visible)
    216			continue;
    217
    218		vkms_state->active_planes[i++] =
    219			to_vkms_plane_state(plane_state);
    220	}
    221
    222	return 0;
    223}
    224
    225static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
    226				    struct drm_atomic_state *state)
    227{
    228	drm_crtc_vblank_on(crtc);
    229}
    230
    231static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
    232				     struct drm_atomic_state *state)
    233{
    234	drm_crtc_vblank_off(crtc);
    235}
    236
    237static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
    238				   struct drm_atomic_state *state)
    239{
    240	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
    241
    242	/* This lock is held across the atomic commit to block vblank timer
    243	 * from scheduling vkms_composer_worker until the composer is updated
    244	 */
    245	spin_lock_irq(&vkms_output->lock);
    246}
    247
    248static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
    249				   struct drm_atomic_state *state)
    250{
    251	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
    252
    253	if (crtc->state->event) {
    254		spin_lock(&crtc->dev->event_lock);
    255
    256		if (drm_crtc_vblank_get(crtc) != 0)
    257			drm_crtc_send_vblank_event(crtc, crtc->state->event);
    258		else
    259			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
    260
    261		spin_unlock(&crtc->dev->event_lock);
    262
    263		crtc->state->event = NULL;
    264	}
    265
    266	vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
    267
    268	spin_unlock_irq(&vkms_output->lock);
    269}
    270
    271static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
    272	.atomic_check	= vkms_crtc_atomic_check,
    273	.atomic_begin	= vkms_crtc_atomic_begin,
    274	.atomic_flush	= vkms_crtc_atomic_flush,
    275	.atomic_enable	= vkms_crtc_atomic_enable,
    276	.atomic_disable	= vkms_crtc_atomic_disable,
    277};
    278
    279int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
    280		   struct drm_plane *primary, struct drm_plane *cursor)
    281{
    282	struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
    283	int ret;
    284
    285	ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
    286					&vkms_crtc_funcs, NULL);
    287	if (ret) {
    288		DRM_ERROR("Failed to init CRTC\n");
    289		return ret;
    290	}
    291
    292	drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
    293
    294	spin_lock_init(&vkms_out->lock);
    295	spin_lock_init(&vkms_out->composer_lock);
    296
    297	vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
    298	if (!vkms_out->composer_workq)
    299		return -ENOMEM;
    300
    301	return ret;
    302}