cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

crc.c (18981B)


      1// SPDX-License-Identifier: MIT
      2#include <linux/string.h>
      3#include <drm/drm_crtc.h>
      4#include <drm/drm_atomic_helper.h>
      5#include <drm/drm_vblank.h>
      6#include <drm/drm_vblank_work.h>
      7
      8#include <nvif/class.h>
      9#include <nvif/cl0002.h>
     10#include <nvif/timer.h>
     11
     12#include <nvhw/class/cl907d.h>
     13
     14#include "nouveau_drv.h"
     15#include "core.h"
     16#include "head.h"
     17#include "wndw.h"
     18#include "handles.h"
     19#include "crc.h"
     20
     21static const char * const nv50_crc_sources[] = {
     22	[NV50_CRC_SOURCE_NONE] = "none",
     23	[NV50_CRC_SOURCE_AUTO] = "auto",
     24	[NV50_CRC_SOURCE_RG] = "rg",
     25	[NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active",
     26	[NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete",
     27	[NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive",
     28};
     29
     30static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s)
     31{
     32	int i;
     33
     34	if (!buf) {
     35		*s = NV50_CRC_SOURCE_NONE;
     36		return 0;
     37	}
     38
     39	i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf);
     40	if (i < 0)
     41		return i;
     42
     43	*s = i;
     44	return 0;
     45}
     46
     47int
     48nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name,
     49		       size_t *values_cnt)
     50{
     51	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
     52	enum nv50_crc_source source;
     53
     54	if (nv50_crc_parse_source(source_name, &source) < 0) {
     55		NV_DEBUG(drm, "unknown source %s\n", source_name);
     56		return -EINVAL;
     57	}
     58
     59	*values_cnt = 1;
     60	return 0;
     61}
     62
     63const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count)
     64{
     65	*count = ARRAY_SIZE(nv50_crc_sources);
     66	return nv50_crc_sources;
     67}
     68
     69static void
     70nv50_crc_program_ctx(struct nv50_head *head,
     71		     struct nv50_crc_notifier_ctx *ctx)
     72{
     73	struct nv50_disp *disp = nv50_disp(head->base.base.dev);
     74	struct nv50_core *core = disp->core;
     75	u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 };
     76
     77	core->func->crc->set_ctx(head, ctx);
     78	core->func->update(core, interlock, false);
     79}
     80
     81static void nv50_crc_ctx_flip_work(struct kthread_work *base)
     82{
     83	struct drm_vblank_work *work = to_drm_vblank_work(base);
     84	struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
     85	struct nv50_head *head = container_of(crc, struct nv50_head, crc);
     86	struct drm_crtc *crtc = &head->base.base;
     87	struct drm_device *dev = crtc->dev;
     88	struct nv50_disp *disp = nv50_disp(dev);
     89	const uint64_t start_vbl = drm_crtc_vblank_count(crtc);
     90	uint64_t end_vbl;
     91	u8 new_idx = crc->ctx_idx ^ 1;
     92
     93	/*
     94	 * We don't want to accidentally wait for longer then the vblank, so
     95	 * try again for the next vblank if we don't grab the lock
     96	 */
     97	if (!mutex_trylock(&disp->mutex)) {
     98		drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name);
     99		drm_vblank_work_schedule(work, start_vbl + 1, true);
    100		return;
    101	}
    102
    103	drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n",
    104		    crtc->name, crc->ctx_idx, new_idx);
    105
    106	nv50_crc_program_ctx(head, NULL);
    107	nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
    108	mutex_unlock(&disp->mutex);
    109
    110	end_vbl = drm_crtc_vblank_count(crtc);
    111	if (unlikely(end_vbl != start_vbl))
    112		NV_ERROR(nouveau_drm(dev),
    113			 "Failed to flip CRC context on %s on time (%llu > %llu)\n",
    114			 crtc->name, end_vbl, start_vbl);
    115
    116	spin_lock_irq(&crc->lock);
    117	crc->ctx_changed = true;
    118	spin_unlock_irq(&crc->lock);
    119}
    120
    121static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx)
    122{
    123	memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
    124}
    125
    126static void
    127nv50_crc_get_entries(struct nv50_head *head,
    128		     const struct nv50_crc_func *func,
    129		     enum nv50_crc_source source)
    130{
    131	struct drm_crtc *crtc = &head->base.base;
    132	struct nv50_crc *crc = &head->crc;
    133	u32 output_crc;
    134
    135	while (crc->entry_idx < func->num_entries) {
    136		/*
    137		 * While Nvidia's documentation says CRCs are written on each
    138		 * subsequent vblank after being enabled, in practice they
    139		 * aren't written immediately.
    140		 */
    141		output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx],
    142					     source, crc->entry_idx);
    143		if (!output_crc)
    144			return;
    145
    146		drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc);
    147		crc->frame++;
    148		crc->entry_idx++;
    149	}
    150}
    151
    152void nv50_crc_handle_vblank(struct nv50_head *head)
    153{
    154	struct drm_crtc *crtc = &head->base.base;
    155	struct nv50_crc *crc = &head->crc;
    156	const struct nv50_crc_func *func =
    157		nv50_disp(head->base.base.dev)->core->func->crc;
    158	struct nv50_crc_notifier_ctx *ctx;
    159	bool need_reschedule = false;
    160
    161	if (!func)
    162		return;
    163
    164	/*
    165	 * We don't lose events if we aren't able to report CRCs until the
    166	 * next vblank, so only report CRCs if the locks we need aren't
    167	 * contended to prevent missing an actual vblank event
    168	 */
    169	if (!spin_trylock(&crc->lock))
    170		return;
    171
    172	if (!crc->src)
    173		goto out;
    174
    175	ctx = &crc->ctx[crc->ctx_idx];
    176	if (crc->ctx_changed && func->ctx_finished(head, ctx)) {
    177		nv50_crc_get_entries(head, func, crc->src);
    178
    179		crc->ctx_idx ^= 1;
    180		crc->entry_idx = 0;
    181		crc->ctx_changed = false;
    182
    183		/*
    184		 * Unfortunately when notifier contexts are changed during CRC
    185		 * capture, we will inevitably lose the CRC entry for the
    186		 * frame where the hardware actually latched onto the first
    187		 * UPDATE. According to Nvidia's hardware engineers, there's
    188		 * no workaround for this.
    189		 *
    190		 * Now, we could try to be smart here and calculate the number
    191		 * of missed CRCs based on audit timestamps, but those were
    192		 * removed starting with volta. Since we always flush our
    193		 * updates back-to-back without waiting, we'll just be
    194		 * optimistic and assume we always miss exactly one frame.
    195		 */
    196		drm_dbg_kms(head->base.base.dev,
    197			    "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
    198			    head->base.index, crc->frame);
    199		crc->frame++;
    200
    201		nv50_crc_reset_ctx(ctx);
    202		need_reschedule = true;
    203	}
    204
    205	nv50_crc_get_entries(head, func, crc->src);
    206
    207	if (need_reschedule)
    208		drm_vblank_work_schedule(&crc->flip_work,
    209					 drm_crtc_vblank_count(crtc)
    210					 + crc->flip_threshold
    211					 - crc->entry_idx,
    212					 true);
    213
    214out:
    215	spin_unlock(&crc->lock);
    216}
    217
    218static void nv50_crc_wait_ctx_finished(struct nv50_head *head,
    219				       const struct nv50_crc_func *func,
    220				       struct nv50_crc_notifier_ctx *ctx)
    221{
    222	struct drm_device *dev = head->base.base.dev;
    223	struct nouveau_drm *drm = nouveau_drm(dev);
    224	s64 ret;
    225
    226	ret = nvif_msec(&drm->client.device, 50,
    227			if (func->ctx_finished(head, ctx)) break;);
    228	if (ret == -ETIMEDOUT)
    229		NV_ERROR(drm,
    230			 "CRC notifier ctx for head %d not finished after 50ms\n",
    231			 head->base.index);
    232	else if (ret)
    233		NV_ATOMIC(drm,
    234			  "CRC notifier ctx for head-%d finished after %lldns\n",
    235			  head->base.index, ret);
    236}
    237
    238void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state)
    239{
    240	struct drm_crtc_state *crtc_state;
    241	struct drm_crtc *crtc;
    242	int i;
    243
    244	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
    245		struct nv50_head *head = nv50_head(crtc);
    246		struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
    247		struct nv50_crc *crc = &head->crc;
    248
    249		if (!asyh->clr.crc)
    250			continue;
    251
    252		spin_lock_irq(&crc->lock);
    253		crc->src = NV50_CRC_SOURCE_NONE;
    254		spin_unlock_irq(&crc->lock);
    255
    256		drm_crtc_vblank_put(crtc);
    257		drm_vblank_work_cancel_sync(&crc->flip_work);
    258
    259		NV_ATOMIC(nouveau_drm(crtc->dev),
    260			  "CRC reporting on vblank for head-%d disabled\n",
    261			  head->base.index);
    262
    263		/* CRC generation is still enabled in hw, we'll just report
    264		 * any remaining CRC entries ourselves after it gets disabled
    265		 * in hardware
    266		 */
    267	}
    268}
    269
    270void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state)
    271{
    272	struct drm_crtc_state *new_crtc_state;
    273	struct drm_crtc *crtc;
    274	int i;
    275
    276	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    277		struct nv50_head *head = nv50_head(crtc);
    278		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
    279		struct nv50_crc *crc = &head->crc;
    280		int i;
    281
    282		if (!asyh->set.crc)
    283			continue;
    284
    285		crc->entry_idx = 0;
    286		crc->ctx_changed = false;
    287		for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
    288			nv50_crc_reset_ctx(&crc->ctx[i]);
    289	}
    290}
    291
    292void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state)
    293{
    294	const struct nv50_crc_func *func =
    295		nv50_disp(state->dev)->core->func->crc;
    296	struct drm_crtc_state *new_crtc_state;
    297	struct drm_crtc *crtc;
    298	int i;
    299
    300	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
    301		struct nv50_head *head = nv50_head(crtc);
    302		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
    303		struct nv50_crc *crc = &head->crc;
    304		struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx];
    305
    306		if (!asyh->clr.crc)
    307			continue;
    308
    309		if (crc->ctx_changed) {
    310			nv50_crc_wait_ctx_finished(head, func, ctx);
    311			ctx = &crc->ctx[crc->ctx_idx ^ 1];
    312		}
    313		nv50_crc_wait_ctx_finished(head, func, ctx);
    314	}
    315}
    316
    317void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state)
    318{
    319	struct drm_crtc_state *crtc_state;
    320	struct drm_crtc *crtc;
    321	int i;
    322
    323	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
    324		struct nv50_head *head = nv50_head(crtc);
    325		struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
    326		struct nv50_crc *crc = &head->crc;
    327		u64 vbl_count;
    328
    329		if (!asyh->set.crc)
    330			continue;
    331
    332		drm_crtc_vblank_get(crtc);
    333
    334		spin_lock_irq(&crc->lock);
    335		vbl_count = drm_crtc_vblank_count(crtc);
    336		crc->frame = vbl_count;
    337		crc->src = asyh->crc.src;
    338		drm_vblank_work_schedule(&crc->flip_work,
    339					 vbl_count + crc->flip_threshold,
    340					 true);
    341		spin_unlock_irq(&crc->lock);
    342
    343		NV_ATOMIC(nouveau_drm(crtc->dev),
    344			  "CRC reporting on vblank for head-%d enabled\n",
    345			  head->base.index);
    346	}
    347}
    348
    349int nv50_crc_atomic_check_head(struct nv50_head *head,
    350			       struct nv50_head_atom *asyh,
    351			       struct nv50_head_atom *armh)
    352{
    353	struct nv50_atom *atom = nv50_atom(asyh->state.state);
    354	bool changed = armh->crc.src != asyh->crc.src;
    355
    356	if (!armh->crc.src && !asyh->crc.src) {
    357		asyh->set.crc = false;
    358		asyh->clr.crc = false;
    359		return 0;
    360	}
    361
    362	if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) {
    363		asyh->clr.crc = armh->crc.src && armh->state.active;
    364		asyh->set.crc = asyh->crc.src && asyh->state.active;
    365		if (changed)
    366			asyh->set.or |= armh->or.crc_raster !=
    367					asyh->or.crc_raster;
    368
    369		if (asyh->clr.crc && asyh->set.crc)
    370			atom->flush_disable = true;
    371	} else {
    372		asyh->set.crc = false;
    373		asyh->clr.crc = false;
    374	}
    375
    376	return 0;
    377}
    378
    379void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
    380{
    381	struct drm_crtc *crtc;
    382	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
    383	int i;
    384
    385	if (atom->flush_disable)
    386		return;
    387
    388	for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state,
    389				      new_crtc_state, i) {
    390		struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
    391		struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
    392		struct nv50_outp_atom *outp_atom;
    393		struct nouveau_encoder *outp;
    394		struct drm_encoder *encoder, *enc;
    395
    396		enc = nv50_head_atom_get_encoder(armh);
    397		if (!enc)
    398			continue;
    399
    400		outp = nv50_real_outp(enc);
    401		if (!outp)
    402			continue;
    403
    404		encoder = &outp->base.base;
    405
    406		if (!asyh->clr.crc)
    407			continue;
    408
    409		/*
    410		 * Re-programming ORs can't be done in the same flush as
    411		 * disabling CRCs
    412		 */
    413		list_for_each_entry(outp_atom, &atom->outp, head) {
    414			if (outp_atom->encoder == encoder) {
    415				if (outp_atom->set.mask) {
    416					atom->flush_disable = true;
    417					return;
    418				} else {
    419					break;
    420				}
    421			}
    422		}
    423	}
    424}
    425
    426static enum nv50_crc_source_type
    427nv50_crc_source_type(struct nouveau_encoder *outp,
    428		     enum nv50_crc_source source)
    429{
    430	struct dcb_output *dcbe = outp->dcb;
    431
    432	switch (source) {
    433	case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE;
    434	case NV50_CRC_SOURCE_RG:   return NV50_CRC_SOURCE_TYPE_RG;
    435	default:		   break;
    436	}
    437
    438	if (dcbe->location != DCB_LOC_ON_CHIP)
    439		return NV50_CRC_SOURCE_TYPE_PIOR;
    440
    441	switch (dcbe->type) {
    442	case DCB_OUTPUT_DP:	return NV50_CRC_SOURCE_TYPE_SF;
    443	case DCB_OUTPUT_ANALOG:	return NV50_CRC_SOURCE_TYPE_DAC;
    444	default:		return NV50_CRC_SOURCE_TYPE_SOR;
    445	}
    446}
    447
    448void nv50_crc_atomic_set(struct nv50_head *head,
    449			 struct nv50_head_atom *asyh)
    450{
    451	struct drm_crtc *crtc = &head->base.base;
    452	struct drm_device *dev = crtc->dev;
    453	struct nv50_crc *crc = &head->crc;
    454	const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
    455	struct nouveau_encoder *outp;
    456	struct drm_encoder *encoder;
    457
    458	encoder = nv50_head_atom_get_encoder(asyh);
    459	if (!encoder)
    460		return;
    461
    462	outp = nv50_real_outp(encoder);
    463	if (!outp)
    464		return;
    465
    466	func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src),
    467		      &crc->ctx[crc->ctx_idx]);
    468}
    469
    470void nv50_crc_atomic_clr(struct nv50_head *head)
    471{
    472	const struct nv50_crc_func *func =
    473		nv50_disp(head->base.base.dev)->core->func->crc;
    474
    475	func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL);
    476}
    477
    478static inline int
    479nv50_crc_raster_type(enum nv50_crc_source source)
    480{
    481	switch (source) {
    482	case NV50_CRC_SOURCE_NONE:
    483	case NV50_CRC_SOURCE_AUTO:
    484	case NV50_CRC_SOURCE_RG:
    485	case NV50_CRC_SOURCE_OUTP_ACTIVE:
    486		return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
    487	case NV50_CRC_SOURCE_OUTP_COMPLETE:
    488		return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
    489	case NV50_CRC_SOURCE_OUTP_INACTIVE:
    490		return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
    491	}
    492
    493	return 0;
    494}
    495
    496/* We handle mapping the memory for CRC notifiers ourselves, since each
    497 * notifier needs it's own handle
    498 */
    499static inline int
    500nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
    501		  struct nv50_crc_notifier_ctx *ctx, size_t len, int idx)
    502{
    503	struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
    504	int ret;
    505
    506	ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
    507	if (ret)
    508		return ret;
    509
    510	ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
    511			       NV50_DISP_HANDLE_CRC_CTX(head, idx),
    512			       NV_DMA_IN_MEMORY,
    513			       &(struct nv_dma_v0) {
    514					.target = NV_DMA_V0_TARGET_VRAM,
    515					.access = NV_DMA_V0_ACCESS_RDWR,
    516					.start = ctx->mem.addr,
    517					.limit =  ctx->mem.addr
    518						+ ctx->mem.size - 1,
    519			       }, sizeof(struct nv_dma_v0),
    520			       &ctx->ntfy);
    521	if (ret)
    522		goto fail_fini;
    523
    524	return 0;
    525
    526fail_fini:
    527	nvif_mem_dtor(&ctx->mem);
    528	return ret;
    529}
    530
    531static inline void
    532nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
    533{
    534	nvif_object_dtor(&ctx->ntfy);
    535	nvif_mem_dtor(&ctx->mem);
    536}
    537
    538int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
    539{
    540	struct drm_device *dev = crtc->dev;
    541	struct drm_atomic_state *state;
    542	struct drm_modeset_acquire_ctx ctx;
    543	struct nv50_head *head = nv50_head(crtc);
    544	struct nv50_crc *crc = &head->crc;
    545	const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
    546	struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu;
    547	struct nv50_head_atom *asyh;
    548	struct drm_crtc_state *crtc_state;
    549	enum nv50_crc_source source;
    550	int ret = 0, ctx_flags = 0, i;
    551
    552	ret = nv50_crc_parse_source(source_str, &source);
    553	if (ret)
    554		return ret;
    555
    556	/*
    557	 * Since we don't want the user to accidentally interrupt us as we're
    558	 * disabling CRCs
    559	 */
    560	if (source)
    561		ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
    562	drm_modeset_acquire_init(&ctx, ctx_flags);
    563
    564	state = drm_atomic_state_alloc(dev);
    565	if (!state) {
    566		ret = -ENOMEM;
    567		goto out_acquire_fini;
    568	}
    569	state->acquire_ctx = &ctx;
    570
    571	if (source) {
    572		for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) {
    573			ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i],
    574						func->notifier_len, i);
    575			if (ret)
    576				goto out_ctx_fini;
    577		}
    578	}
    579
    580retry:
    581	crtc_state = drm_atomic_get_crtc_state(state, &head->base.base);
    582	if (IS_ERR(crtc_state)) {
    583		ret = PTR_ERR(crtc_state);
    584		if (ret == -EDEADLK)
    585			goto deadlock;
    586		else if (ret)
    587			goto out_drop_locks;
    588	}
    589	asyh = nv50_head_atom(crtc_state);
    590	asyh->crc.src = source;
    591	asyh->or.crc_raster = nv50_crc_raster_type(source);
    592
    593	ret = drm_atomic_commit(state);
    594	if (ret == -EDEADLK)
    595		goto deadlock;
    596	else if (ret)
    597		goto out_drop_locks;
    598
    599	if (!source) {
    600		/*
    601		 * If the user specified a custom flip threshold through
    602		 * debugfs, reset it
    603		 */
    604		crc->flip_threshold = func->flip_threshold;
    605	}
    606
    607out_drop_locks:
    608	drm_modeset_drop_locks(&ctx);
    609out_ctx_fini:
    610	if (!source || ret) {
    611		for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
    612			nv50_crc_ctx_fini(&crc->ctx[i]);
    613	}
    614	drm_atomic_state_put(state);
    615out_acquire_fini:
    616	drm_modeset_acquire_fini(&ctx);
    617	return ret;
    618
    619deadlock:
    620	drm_atomic_state_clear(state);
    621	drm_modeset_backoff(&ctx);
    622	goto retry;
    623}
    624
    625static int
    626nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data)
    627{
    628	struct nv50_head *head = m->private;
    629	struct drm_crtc *crtc = &head->base.base;
    630	struct nv50_crc *crc = &head->crc;
    631	int ret;
    632
    633	ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
    634	if (ret)
    635		return ret;
    636
    637	seq_printf(m, "%d\n", crc->flip_threshold);
    638
    639	drm_modeset_unlock(&crtc->mutex);
    640	return ret;
    641}
    642
    643static int
    644nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file)
    645{
    646	return single_open(file, nv50_crc_debugfs_flip_threshold_get,
    647			   inode->i_private);
    648}
    649
    650static ssize_t
    651nv50_crc_debugfs_flip_threshold_set(struct file *file,
    652				    const char __user *ubuf, size_t len,
    653				    loff_t *offp)
    654{
    655	struct seq_file *m = file->private_data;
    656	struct nv50_head *head = m->private;
    657	struct nv50_head_atom *armh;
    658	struct drm_crtc *crtc = &head->base.base;
    659	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
    660	struct nv50_crc *crc = &head->crc;
    661	const struct nv50_crc_func *func =
    662		nv50_disp(crtc->dev)->core->func->crc;
    663	int value, ret;
    664
    665	ret = kstrtoint_from_user(ubuf, len, 10, &value);
    666	if (ret)
    667		return ret;
    668
    669	if (value > func->flip_threshold)
    670		return -EINVAL;
    671	else if (value == -1)
    672		value = func->flip_threshold;
    673	else if (value < -1)
    674		return -EINVAL;
    675
    676	ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
    677	if (ret)
    678		return ret;
    679
    680	armh = nv50_head_atom(crtc->state);
    681	if (armh->crc.src) {
    682		ret = -EBUSY;
    683		goto out;
    684	}
    685
    686	NV_DEBUG(drm,
    687		 "Changing CRC flip threshold for next capture on head-%d to %d\n",
    688		 head->base.index, value);
    689	crc->flip_threshold = value;
    690	ret = len;
    691
    692out:
    693	drm_modeset_unlock(&crtc->mutex);
    694	return ret;
    695}
    696
    697static const struct file_operations nv50_crc_flip_threshold_fops = {
    698	.owner = THIS_MODULE,
    699	.open = nv50_crc_debugfs_flip_threshold_open,
    700	.read = seq_read,
    701	.write = nv50_crc_debugfs_flip_threshold_set,
    702	.release = single_release,
    703};
    704
    705int nv50_head_crc_late_register(struct nv50_head *head)
    706{
    707	struct drm_crtc *crtc = &head->base.base;
    708	const struct nv50_crc_func *func =
    709		nv50_disp(crtc->dev)->core->func->crc;
    710	struct dentry *root;
    711
    712	if (!func || !crtc->debugfs_entry)
    713		return 0;
    714
    715	root = debugfs_create_dir("nv_crc", crtc->debugfs_entry);
    716	debugfs_create_file("flip_threshold", 0644, root, head,
    717			    &nv50_crc_flip_threshold_fops);
    718
    719	return 0;
    720}
    721
    722static inline void
    723nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func,
    724		   struct nv50_head *head)
    725{
    726	struct nv50_crc *crc = &head->crc;
    727
    728	crc->flip_threshold = func->flip_threshold;
    729	spin_lock_init(&crc->lock);
    730	drm_vblank_work_init(&crc->flip_work, &head->base.base,
    731			     nv50_crc_ctx_flip_work);
    732}
    733
    734void nv50_crc_init(struct drm_device *dev)
    735{
    736	struct nv50_disp *disp = nv50_disp(dev);
    737	struct drm_crtc *crtc;
    738	const struct nv50_crc_func *func = disp->core->func->crc;
    739
    740	if (!func)
    741		return;
    742
    743	drm_for_each_crtc(crtc, dev)
    744		nv50_crc_init_head(disp, func, nv50_head(crtc));
    745}