cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

panfrost_drv.c (18551B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
      3/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
      4/* Copyright 2019 Collabora ltd. */
      5
      6#include <linux/module.h>
      7#include <linux/of_platform.h>
      8#include <linux/pagemap.h>
      9#include <linux/pm_runtime.h>
     10#include <drm/panfrost_drm.h>
     11#include <drm/drm_drv.h>
     12#include <drm/drm_ioctl.h>
     13#include <drm/drm_syncobj.h>
     14#include <drm/drm_utils.h>
     15
     16#include "panfrost_device.h"
     17#include "panfrost_gem.h"
     18#include "panfrost_mmu.h"
     19#include "panfrost_job.h"
     20#include "panfrost_gpu.h"
     21#include "panfrost_perfcnt.h"
     22
     23static bool unstable_ioctls;
     24module_param_unsafe(unstable_ioctls, bool, 0600);
     25
     26static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
     27{
     28	struct drm_panfrost_get_param *param = data;
     29	struct panfrost_device *pfdev = ddev->dev_private;
     30
     31	if (param->pad != 0)
     32		return -EINVAL;
     33
     34#define PANFROST_FEATURE(name, member)			\
     35	case DRM_PANFROST_PARAM_ ## name:		\
     36		param->value = pfdev->features.member;	\
     37		break
     38#define PANFROST_FEATURE_ARRAY(name, member, max)			\
     39	case DRM_PANFROST_PARAM_ ## name ## 0 ...			\
     40		DRM_PANFROST_PARAM_ ## name ## max:			\
     41		param->value = pfdev->features.member[param->param -	\
     42			DRM_PANFROST_PARAM_ ## name ## 0];		\
     43		break
     44
     45	switch (param->param) {
     46		PANFROST_FEATURE(GPU_PROD_ID, id);
     47		PANFROST_FEATURE(GPU_REVISION, revision);
     48		PANFROST_FEATURE(SHADER_PRESENT, shader_present);
     49		PANFROST_FEATURE(TILER_PRESENT, tiler_present);
     50		PANFROST_FEATURE(L2_PRESENT, l2_present);
     51		PANFROST_FEATURE(STACK_PRESENT, stack_present);
     52		PANFROST_FEATURE(AS_PRESENT, as_present);
     53		PANFROST_FEATURE(JS_PRESENT, js_present);
     54		PANFROST_FEATURE(L2_FEATURES, l2_features);
     55		PANFROST_FEATURE(CORE_FEATURES, core_features);
     56		PANFROST_FEATURE(TILER_FEATURES, tiler_features);
     57		PANFROST_FEATURE(MEM_FEATURES, mem_features);
     58		PANFROST_FEATURE(MMU_FEATURES, mmu_features);
     59		PANFROST_FEATURE(THREAD_FEATURES, thread_features);
     60		PANFROST_FEATURE(MAX_THREADS, max_threads);
     61		PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
     62				thread_max_workgroup_sz);
     63		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
     64				thread_max_barrier_sz);
     65		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
     66		PANFROST_FEATURE(AFBC_FEATURES, afbc_features);
     67		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
     68		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
     69		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
     70		PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
     71	default:
     72		return -EINVAL;
     73	}
     74
     75	return 0;
     76}
     77
     78static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
     79		struct drm_file *file)
     80{
     81	struct panfrost_file_priv *priv = file->driver_priv;
     82	struct panfrost_gem_object *bo;
     83	struct drm_panfrost_create_bo *args = data;
     84	struct panfrost_gem_mapping *mapping;
     85
     86	if (!args->size || args->pad ||
     87	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
     88		return -EINVAL;
     89
     90	/* Heaps should never be executable */
     91	if ((args->flags & PANFROST_BO_HEAP) &&
     92	    !(args->flags & PANFROST_BO_NOEXEC))
     93		return -EINVAL;
     94
     95	bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
     96					     &args->handle);
     97	if (IS_ERR(bo))
     98		return PTR_ERR(bo);
     99
    100	mapping = panfrost_gem_mapping_get(bo, priv);
    101	if (!mapping) {
    102		drm_gem_object_put(&bo->base.base);
    103		return -EINVAL;
    104	}
    105
    106	args->offset = mapping->mmnode.start << PAGE_SHIFT;
    107	panfrost_gem_mapping_put(mapping);
    108
    109	return 0;
    110}
    111
    112/**
    113 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
    114 * referenced by the job.
    115 * @dev: DRM device
    116 * @file_priv: DRM file for this fd
    117 * @args: IOCTL args
    118 * @job: job being set up
    119 *
    120 * Resolve handles from userspace to BOs and attach them to job.
    121 *
    122 * Note that this function doesn't need to unreference the BOs on
    123 * failure, because that will happen at panfrost_job_cleanup() time.
    124 */
    125static int
    126panfrost_lookup_bos(struct drm_device *dev,
    127		  struct drm_file *file_priv,
    128		  struct drm_panfrost_submit *args,
    129		  struct panfrost_job *job)
    130{
    131	struct panfrost_file_priv *priv = file_priv->driver_priv;
    132	struct panfrost_gem_object *bo;
    133	unsigned int i;
    134	int ret;
    135
    136	job->bo_count = args->bo_handle_count;
    137
    138	if (!job->bo_count)
    139		return 0;
    140
    141	ret = drm_gem_objects_lookup(file_priv,
    142				     (void __user *)(uintptr_t)args->bo_handles,
    143				     job->bo_count, &job->bos);
    144	if (ret)
    145		return ret;
    146
    147	job->mappings = kvmalloc_array(job->bo_count,
    148				       sizeof(struct panfrost_gem_mapping *),
    149				       GFP_KERNEL | __GFP_ZERO);
    150	if (!job->mappings)
    151		return -ENOMEM;
    152
    153	for (i = 0; i < job->bo_count; i++) {
    154		struct panfrost_gem_mapping *mapping;
    155
    156		bo = to_panfrost_bo(job->bos[i]);
    157		mapping = panfrost_gem_mapping_get(bo, priv);
    158		if (!mapping) {
    159			ret = -EINVAL;
    160			break;
    161		}
    162
    163		atomic_inc(&bo->gpu_usecount);
    164		job->mappings[i] = mapping;
    165	}
    166
    167	return ret;
    168}
    169
    170/**
    171 * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
    172 * referenced by the job.
    173 * @dev: DRM device
    174 * @file_priv: DRM file for this fd
    175 * @args: IOCTL args
    176 * @job: job being set up
    177 *
    178 * Resolve syncobjs from userspace to fences and attach them to job.
    179 *
    180 * Note that this function doesn't need to unreference the fences on
    181 * failure, because that will happen at panfrost_job_cleanup() time.
    182 */
    183static int
    184panfrost_copy_in_sync(struct drm_device *dev,
    185		  struct drm_file *file_priv,
    186		  struct drm_panfrost_submit *args,
    187		  struct panfrost_job *job)
    188{
    189	u32 *handles;
    190	int ret = 0;
    191	int i, in_fence_count;
    192
    193	in_fence_count = args->in_sync_count;
    194
    195	if (!in_fence_count)
    196		return 0;
    197
    198	handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
    199	if (!handles) {
    200		ret = -ENOMEM;
    201		DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
    202		goto fail;
    203	}
    204
    205	if (copy_from_user(handles,
    206			   (void __user *)(uintptr_t)args->in_syncs,
    207			   in_fence_count * sizeof(u32))) {
    208		ret = -EFAULT;
    209		DRM_DEBUG("Failed to copy in syncobj handles\n");
    210		goto fail;
    211	}
    212
    213	for (i = 0; i < in_fence_count; i++) {
    214		struct dma_fence *fence;
    215
    216		ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
    217					     &fence);
    218		if (ret)
    219			goto fail;
    220
    221		ret = drm_sched_job_add_dependency(&job->base, fence);
    222
    223		if (ret)
    224			goto fail;
    225	}
    226
    227fail:
    228	kvfree(handles);
    229	return ret;
    230}
    231
    232static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
    233		struct drm_file *file)
    234{
    235	struct panfrost_device *pfdev = dev->dev_private;
    236	struct panfrost_file_priv *file_priv = file->driver_priv;
    237	struct drm_panfrost_submit *args = data;
    238	struct drm_syncobj *sync_out = NULL;
    239	struct panfrost_job *job;
    240	int ret = 0, slot;
    241
    242	if (!args->jc)
    243		return -EINVAL;
    244
    245	if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
    246		return -EINVAL;
    247
    248	if (args->out_sync > 0) {
    249		sync_out = drm_syncobj_find(file, args->out_sync);
    250		if (!sync_out)
    251			return -ENODEV;
    252	}
    253
    254	job = kzalloc(sizeof(*job), GFP_KERNEL);
    255	if (!job) {
    256		ret = -ENOMEM;
    257		goto out_put_syncout;
    258	}
    259
    260	kref_init(&job->refcount);
    261
    262	job->pfdev = pfdev;
    263	job->jc = args->jc;
    264	job->requirements = args->requirements;
    265	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
    266	job->mmu = file_priv->mmu;
    267
    268	slot = panfrost_job_get_slot(job);
    269
    270	ret = drm_sched_job_init(&job->base,
    271				 &file_priv->sched_entity[slot],
    272				 NULL);
    273	if (ret)
    274		goto out_put_job;
    275
    276	ret = panfrost_copy_in_sync(dev, file, args, job);
    277	if (ret)
    278		goto out_cleanup_job;
    279
    280	ret = panfrost_lookup_bos(dev, file, args, job);
    281	if (ret)
    282		goto out_cleanup_job;
    283
    284	ret = panfrost_job_push(job);
    285	if (ret)
    286		goto out_cleanup_job;
    287
    288	/* Update the return sync object for the job */
    289	if (sync_out)
    290		drm_syncobj_replace_fence(sync_out, job->render_done_fence);
    291
    292out_cleanup_job:
    293	if (ret)
    294		drm_sched_job_cleanup(&job->base);
    295out_put_job:
    296	panfrost_job_put(job);
    297out_put_syncout:
    298	if (sync_out)
    299		drm_syncobj_put(sync_out);
    300
    301	return ret;
    302}
    303
    304static int
    305panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
    306		       struct drm_file *file_priv)
    307{
    308	long ret;
    309	struct drm_panfrost_wait_bo *args = data;
    310	struct drm_gem_object *gem_obj;
    311	unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
    312
    313	if (args->pad)
    314		return -EINVAL;
    315
    316	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    317	if (!gem_obj)
    318		return -ENOENT;
    319
    320	ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
    321				    true, timeout);
    322	if (!ret)
    323		ret = timeout ? -ETIMEDOUT : -EBUSY;
    324
    325	drm_gem_object_put(gem_obj);
    326
    327	return ret;
    328}
    329
    330static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
    331		      struct drm_file *file_priv)
    332{
    333	struct drm_panfrost_mmap_bo *args = data;
    334	struct drm_gem_object *gem_obj;
    335	int ret;
    336
    337	if (args->flags != 0) {
    338		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
    339		return -EINVAL;
    340	}
    341
    342	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    343	if (!gem_obj) {
    344		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    345		return -ENOENT;
    346	}
    347
    348	/* Don't allow mmapping of heap objects as pages are not pinned. */
    349	if (to_panfrost_bo(gem_obj)->is_heap) {
    350		ret = -EINVAL;
    351		goto out;
    352	}
    353
    354	ret = drm_gem_create_mmap_offset(gem_obj);
    355	if (ret == 0)
    356		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
    357
    358out:
    359	drm_gem_object_put(gem_obj);
    360	return ret;
    361}
    362
    363static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
    364			    struct drm_file *file_priv)
    365{
    366	struct panfrost_file_priv *priv = file_priv->driver_priv;
    367	struct drm_panfrost_get_bo_offset *args = data;
    368	struct panfrost_gem_mapping *mapping;
    369	struct drm_gem_object *gem_obj;
    370	struct panfrost_gem_object *bo;
    371
    372	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    373	if (!gem_obj) {
    374		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    375		return -ENOENT;
    376	}
    377	bo = to_panfrost_bo(gem_obj);
    378
    379	mapping = panfrost_gem_mapping_get(bo, priv);
    380	drm_gem_object_put(gem_obj);
    381
    382	if (!mapping)
    383		return -EINVAL;
    384
    385	args->offset = mapping->mmnode.start << PAGE_SHIFT;
    386	panfrost_gem_mapping_put(mapping);
    387	return 0;
    388}
    389
    390static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
    391				  struct drm_file *file_priv)
    392{
    393	struct panfrost_file_priv *priv = file_priv->driver_priv;
    394	struct drm_panfrost_madvise *args = data;
    395	struct panfrost_device *pfdev = dev->dev_private;
    396	struct drm_gem_object *gem_obj;
    397	struct panfrost_gem_object *bo;
    398	int ret = 0;
    399
    400	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
    401	if (!gem_obj) {
    402		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
    403		return -ENOENT;
    404	}
    405
    406	bo = to_panfrost_bo(gem_obj);
    407
    408	mutex_lock(&pfdev->shrinker_lock);
    409	mutex_lock(&bo->mappings.lock);
    410	if (args->madv == PANFROST_MADV_DONTNEED) {
    411		struct panfrost_gem_mapping *first;
    412
    413		first = list_first_entry(&bo->mappings.list,
    414					 struct panfrost_gem_mapping,
    415					 node);
    416
    417		/*
    418		 * If we want to mark the BO purgeable, there must be only one
    419		 * user: the caller FD.
    420		 * We could do something smarter and mark the BO purgeable only
    421		 * when all its users have marked it purgeable, but globally
    422		 * visible/shared BOs are likely to never be marked purgeable
    423		 * anyway, so let's not bother.
    424		 */
    425		if (!list_is_singular(&bo->mappings.list) ||
    426		    WARN_ON_ONCE(first->mmu != priv->mmu)) {
    427			ret = -EINVAL;
    428			goto out_unlock_mappings;
    429		}
    430	}
    431
    432	args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
    433
    434	if (args->retained) {
    435		if (args->madv == PANFROST_MADV_DONTNEED)
    436			list_add_tail(&bo->base.madv_list,
    437				      &pfdev->shrinker_list);
    438		else if (args->madv == PANFROST_MADV_WILLNEED)
    439			list_del_init(&bo->base.madv_list);
    440	}
    441
    442out_unlock_mappings:
    443	mutex_unlock(&bo->mappings.lock);
    444	mutex_unlock(&pfdev->shrinker_lock);
    445
    446	drm_gem_object_put(gem_obj);
    447	return ret;
    448}
    449
    450int panfrost_unstable_ioctl_check(void)
    451{
    452	if (!unstable_ioctls)
    453		return -ENOSYS;
    454
    455	return 0;
    456}
    457
    458static int
    459panfrost_open(struct drm_device *dev, struct drm_file *file)
    460{
    461	int ret;
    462	struct panfrost_device *pfdev = dev->dev_private;
    463	struct panfrost_file_priv *panfrost_priv;
    464
    465	panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
    466	if (!panfrost_priv)
    467		return -ENOMEM;
    468
    469	panfrost_priv->pfdev = pfdev;
    470	file->driver_priv = panfrost_priv;
    471
    472	panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev);
    473	if (IS_ERR(panfrost_priv->mmu)) {
    474		ret = PTR_ERR(panfrost_priv->mmu);
    475		goto err_free;
    476	}
    477
    478	ret = panfrost_job_open(panfrost_priv);
    479	if (ret)
    480		goto err_job;
    481
    482	return 0;
    483
    484err_job:
    485	panfrost_mmu_ctx_put(panfrost_priv->mmu);
    486err_free:
    487	kfree(panfrost_priv);
    488	return ret;
    489}
    490
    491static void
    492panfrost_postclose(struct drm_device *dev, struct drm_file *file)
    493{
    494	struct panfrost_file_priv *panfrost_priv = file->driver_priv;
    495
    496	panfrost_perfcnt_close(file);
    497	panfrost_job_close(panfrost_priv);
    498
    499	panfrost_mmu_ctx_put(panfrost_priv->mmu);
    500	kfree(panfrost_priv);
    501}
    502
    503static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
    504#define PANFROST_IOCTL(n, func, flags) \
    505	DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
    506
    507	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW),
    508	PANFROST_IOCTL(WAIT_BO,		wait_bo,	DRM_RENDER_ALLOW),
    509	PANFROST_IOCTL(CREATE_BO,	create_bo,	DRM_RENDER_ALLOW),
    510	PANFROST_IOCTL(MMAP_BO,		mmap_bo,	DRM_RENDER_ALLOW),
    511	PANFROST_IOCTL(GET_PARAM,	get_param,	DRM_RENDER_ALLOW),
    512	PANFROST_IOCTL(GET_BO_OFFSET,	get_bo_offset,	DRM_RENDER_ALLOW),
    513	PANFROST_IOCTL(PERFCNT_ENABLE,	perfcnt_enable,	DRM_RENDER_ALLOW),
    514	PANFROST_IOCTL(PERFCNT_DUMP,	perfcnt_dump,	DRM_RENDER_ALLOW),
    515	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
    516};
    517
    518DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
    519
    520/*
    521 * Panfrost driver version:
    522 * - 1.0 - initial interface
    523 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
    524 * - 1.2 - adds AFBC_FEATURES query
    525 */
    526static const struct drm_driver panfrost_drm_driver = {
    527	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
    528	.open			= panfrost_open,
    529	.postclose		= panfrost_postclose,
    530	.ioctls			= panfrost_drm_driver_ioctls,
    531	.num_ioctls		= ARRAY_SIZE(panfrost_drm_driver_ioctls),
    532	.fops			= &panfrost_drm_driver_fops,
    533	.name			= "panfrost",
    534	.desc			= "panfrost DRM",
    535	.date			= "20180908",
    536	.major			= 1,
    537	.minor			= 2,
    538
    539	.gem_create_object	= panfrost_gem_create_object,
    540	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
    541	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
    542	.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
    543	.gem_prime_mmap		= drm_gem_prime_mmap,
    544};
    545
    546static int panfrost_probe(struct platform_device *pdev)
    547{
    548	struct panfrost_device *pfdev;
    549	struct drm_device *ddev;
    550	int err;
    551
    552	pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
    553	if (!pfdev)
    554		return -ENOMEM;
    555
    556	pfdev->pdev = pdev;
    557	pfdev->dev = &pdev->dev;
    558
    559	platform_set_drvdata(pdev, pfdev);
    560
    561	pfdev->comp = of_device_get_match_data(&pdev->dev);
    562	if (!pfdev->comp)
    563		return -ENODEV;
    564
    565	pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
    566
    567	/* Allocate and initialize the DRM device. */
    568	ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
    569	if (IS_ERR(ddev))
    570		return PTR_ERR(ddev);
    571
    572	ddev->dev_private = pfdev;
    573	pfdev->ddev = ddev;
    574
    575	mutex_init(&pfdev->shrinker_lock);
    576	INIT_LIST_HEAD(&pfdev->shrinker_list);
    577
    578	err = panfrost_device_init(pfdev);
    579	if (err) {
    580		if (err != -EPROBE_DEFER)
    581			dev_err(&pdev->dev, "Fatal error during GPU init\n");
    582		goto err_out0;
    583	}
    584
    585	pm_runtime_set_active(pfdev->dev);
    586	pm_runtime_mark_last_busy(pfdev->dev);
    587	pm_runtime_enable(pfdev->dev);
    588	pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
    589	pm_runtime_use_autosuspend(pfdev->dev);
    590
    591	/*
    592	 * Register the DRM device with the core and the connectors with
    593	 * sysfs
    594	 */
    595	err = drm_dev_register(ddev, 0);
    596	if (err < 0)
    597		goto err_out1;
    598
    599	panfrost_gem_shrinker_init(ddev);
    600
    601	return 0;
    602
    603err_out1:
    604	pm_runtime_disable(pfdev->dev);
    605	panfrost_device_fini(pfdev);
    606	pm_runtime_set_suspended(pfdev->dev);
    607err_out0:
    608	drm_dev_put(ddev);
    609	return err;
    610}
    611
    612static int panfrost_remove(struct platform_device *pdev)
    613{
    614	struct panfrost_device *pfdev = platform_get_drvdata(pdev);
    615	struct drm_device *ddev = pfdev->ddev;
    616
    617	drm_dev_unregister(ddev);
    618	panfrost_gem_shrinker_cleanup(ddev);
    619
    620	pm_runtime_get_sync(pfdev->dev);
    621	pm_runtime_disable(pfdev->dev);
    622	panfrost_device_fini(pfdev);
    623	pm_runtime_set_suspended(pfdev->dev);
    624
    625	drm_dev_put(ddev);
    626	return 0;
    627}
    628
    629static const char * const default_supplies[] = { "mali" };
    630static const struct panfrost_compatible default_data = {
    631	.num_supplies = ARRAY_SIZE(default_supplies),
    632	.supply_names = default_supplies,
    633	.num_pm_domains = 1, /* optional */
    634	.pm_domain_names = NULL,
    635};
    636
    637static const struct panfrost_compatible amlogic_data = {
    638	.num_supplies = ARRAY_SIZE(default_supplies),
    639	.supply_names = default_supplies,
    640	.vendor_quirk = panfrost_gpu_amlogic_quirk,
    641};
    642
    643static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
    644static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
    645static const struct panfrost_compatible mediatek_mt8183_data = {
    646	.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
    647	.supply_names = mediatek_mt8183_supplies,
    648	.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
    649	.pm_domain_names = mediatek_mt8183_pm_domains,
    650};
    651
    652static const struct of_device_id dt_match[] = {
    653	/* Set first to probe before the generic compatibles */
    654	{ .compatible = "amlogic,meson-gxm-mali",
    655	  .data = &amlogic_data, },
    656	{ .compatible = "amlogic,meson-g12a-mali",
    657	  .data = &amlogic_data, },
    658	{ .compatible = "arm,mali-t604", .data = &default_data, },
    659	{ .compatible = "arm,mali-t624", .data = &default_data, },
    660	{ .compatible = "arm,mali-t628", .data = &default_data, },
    661	{ .compatible = "arm,mali-t720", .data = &default_data, },
    662	{ .compatible = "arm,mali-t760", .data = &default_data, },
    663	{ .compatible = "arm,mali-t820", .data = &default_data, },
    664	{ .compatible = "arm,mali-t830", .data = &default_data, },
    665	{ .compatible = "arm,mali-t860", .data = &default_data, },
    666	{ .compatible = "arm,mali-t880", .data = &default_data, },
    667	{ .compatible = "arm,mali-bifrost", .data = &default_data, },
    668	{ .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
    669	{}
    670};
    671MODULE_DEVICE_TABLE(of, dt_match);
    672
    673static const struct dev_pm_ops panfrost_pm_ops = {
    674	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
    675	SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
    676};
    677
    678static struct platform_driver panfrost_driver = {
    679	.probe		= panfrost_probe,
    680	.remove		= panfrost_remove,
    681	.driver		= {
    682		.name	= "panfrost",
    683		.pm	= &panfrost_pm_ops,
    684		.of_match_table = dt_match,
    685	},
    686};
    687module_platform_driver(panfrost_driver);
    688
    689MODULE_AUTHOR("Panfrost Project Developers");
    690MODULE_DESCRIPTION("Panfrost DRM Driver");
    691MODULE_LICENSE("GPL v2");