cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kfd_priv.h (42480B)


      1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
      2/*
      3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice shall be included in
     13 * all copies or substantial portions of the Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21 * OTHER DEALINGS IN THE SOFTWARE.
     22 */
     23
     24#ifndef KFD_PRIV_H_INCLUDED
     25#define KFD_PRIV_H_INCLUDED
     26
     27#include <linux/hashtable.h>
     28#include <linux/mmu_notifier.h>
     29#include <linux/memremap.h>
     30#include <linux/mutex.h>
     31#include <linux/types.h>
     32#include <linux/atomic.h>
     33#include <linux/workqueue.h>
     34#include <linux/spinlock.h>
     35#include <linux/kfd_ioctl.h>
     36#include <linux/idr.h>
     37#include <linux/kfifo.h>
     38#include <linux/seq_file.h>
     39#include <linux/kref.h>
     40#include <linux/sysfs.h>
     41#include <linux/device_cgroup.h>
     42#include <drm/drm_file.h>
     43#include <drm/drm_drv.h>
     44#include <drm/drm_device.h>
     45#include <drm/drm_ioctl.h>
     46#include <kgd_kfd_interface.h>
     47#include <linux/swap.h>
     48
     49#include "amd_shared.h"
     50#include "amdgpu.h"
     51
     52#define KFD_MAX_RING_ENTRY_SIZE	8
     53
     54#define KFD_SYSFS_FILE_MODE 0444
     55
     56/* GPU ID hash width in bits */
     57#define KFD_GPU_ID_HASH_WIDTH 16
     58
     59/* Use upper bits of mmap offset to store KFD driver specific information.
     60 * BITS[63:62] - Encode MMAP type
     61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
     62 * BITS[45:0]  - MMAP offset value
     63 *
     64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
     65 *  defines are w.r.t to PAGE_SIZE
     66 */
     67#define KFD_MMAP_TYPE_SHIFT	62
     68#define KFD_MMAP_TYPE_MASK	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
     69#define KFD_MMAP_TYPE_DOORBELL	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
     70#define KFD_MMAP_TYPE_EVENTS	(0x2ULL << KFD_MMAP_TYPE_SHIFT)
     71#define KFD_MMAP_TYPE_RESERVED_MEM	(0x1ULL << KFD_MMAP_TYPE_SHIFT)
     72#define KFD_MMAP_TYPE_MMIO	(0x0ULL << KFD_MMAP_TYPE_SHIFT)
     73
     74#define KFD_MMAP_GPU_ID_SHIFT 46
     75#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
     76				<< KFD_MMAP_GPU_ID_SHIFT)
     77#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
     78				& KFD_MMAP_GPU_ID_MASK)
     79#define KFD_MMAP_GET_GPU_ID(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
     80				>> KFD_MMAP_GPU_ID_SHIFT)
     81
     82/*
     83 * When working with cp scheduler we should assign the HIQ manually or via
     84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
     85 * definitions for Kaveri. In Kaveri only the first ME queues participates
     86 * in the cp scheduling taking that in mind we set the HIQ slot in the
     87 * second ME.
     88 */
     89#define KFD_CIK_HIQ_PIPE 4
     90#define KFD_CIK_HIQ_QUEUE 0
     91
     92/* Macro for allocating structures */
     93#define kfd_alloc_struct(ptr_to_struct)	\
     94	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
     95
     96#define KFD_MAX_NUM_OF_PROCESSES 512
     97#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
     98
     99/*
    100 * Size of the per-process TBA+TMA buffer: 2 pages
    101 *
    102 * The first page is the TBA used for the CWSR ISA code. The second
    103 * page is used as TMA for user-mode trap handler setup in daisy-chain mode.
    104 */
    105#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
    106#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
    107
    108#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE		\
    109	(KFD_MAX_NUM_OF_PROCESSES *			\
    110			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
    111
    112#define KFD_KERNEL_QUEUE_SIZE 2048
    113
    114#define KFD_UNMAP_LATENCY_MS	(4000)
    115
    116/*
    117 * 512 = 0x200
    118 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
    119 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
    120 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
    121 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
    122 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
    123 */
    124#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
    125
    126/**
    127 * enum kfd_ioctl_flags - KFD ioctl flags
    128 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how
    129 * userspace can use a given ioctl.
    130 */
    131enum kfd_ioctl_flags {
    132	/*
    133	 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE:
    134	 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially
    135	 * perform privileged operations and load arbitrary data into MQDs and
    136	 * eventually HQD registers when the queue is mapped by HWS. In order to
    137	 * prevent this we should perform additional security checks.
    138	 *
    139	 * This is equivalent to callers with the CHECKPOINT_RESTORE capability.
    140	 *
    141	 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE,
    142	 * we also allow ioctls with SYS_ADMIN capability.
    143	 */
    144	KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0),
    145};
    146/*
    147 * Kernel module parameter to specify maximum number of supported queues per
    148 * device
    149 */
    150extern int max_num_of_queues_per_device;
    151
    152
    153/* Kernel module parameter to specify the scheduling policy */
    154extern int sched_policy;
    155
    156/*
    157 * Kernel module parameter to specify the maximum process
    158 * number per HW scheduler
    159 */
    160extern int hws_max_conc_proc;
    161
    162extern int cwsr_enable;
    163
    164/*
    165 * Kernel module parameter to specify whether to send sigterm to HSA process on
    166 * unhandled exception
    167 */
    168extern int send_sigterm;
    169
    170/*
    171 * This kernel module is used to simulate large bar machine on non-large bar
    172 * enabled machines.
    173 */
    174extern int debug_largebar;
    175
    176/*
    177 * Ignore CRAT table during KFD initialization, can be used to work around
    178 * broken CRAT tables on some AMD systems
    179 */
    180extern int ignore_crat;
    181
    182/* Set sh_mem_config.retry_disable on GFX v9 */
    183extern int amdgpu_noretry;
    184
    185/* Halt if HWS hang is detected */
    186extern int halt_if_hws_hang;
    187
    188/* Whether MEC FW support GWS barriers */
    189extern bool hws_gws_support;
    190
    191/* Queue preemption timeout in ms */
    192extern int queue_preemption_timeout_ms;
    193
    194/*
    195 * Don't evict process queues on vm fault
    196 */
    197extern int amdgpu_no_queue_eviction_on_vm_fault;
    198
    199/* Enable eviction debug messages */
    200extern bool debug_evictions;
    201
    202enum cache_policy {
    203	cache_policy_coherent,
    204	cache_policy_noncoherent
    205};
    206
    207#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
    208#define KFD_IS_SOC15(dev)   ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
    209
    210struct kfd_event_interrupt_class {
    211	bool (*interrupt_isr)(struct kfd_dev *dev,
    212			const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
    213			bool *patched_flag);
    214	void (*interrupt_wq)(struct kfd_dev *dev,
    215			const uint32_t *ih_ring_entry);
    216};
    217
    218struct kfd_device_info {
    219	uint32_t gfx_target_version;
    220	const struct kfd_event_interrupt_class *event_interrupt_class;
    221	unsigned int max_pasid_bits;
    222	unsigned int max_no_of_hqd;
    223	unsigned int doorbell_size;
    224	size_t ih_ring_entry_size;
    225	uint8_t num_of_watch_points;
    226	uint16_t mqd_size_aligned;
    227	bool supports_cwsr;
    228	bool needs_iommu_device;
    229	bool needs_pci_atomics;
    230	uint32_t no_atomic_fw_version;
    231	unsigned int num_sdma_queues_per_engine;
    232	unsigned int num_reserved_sdma_queues_per_engine;
    233	uint64_t reserved_sdma_queues_bitmap;
    234};
    235
    236unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev);
    237unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev);
    238
    239struct kfd_mem_obj {
    240	uint32_t range_start;
    241	uint32_t range_end;
    242	uint64_t gpu_addr;
    243	uint32_t *cpu_ptr;
    244	void *gtt_mem;
    245};
    246
    247struct kfd_vmid_info {
    248	uint32_t first_vmid_kfd;
    249	uint32_t last_vmid_kfd;
    250	uint32_t vmid_num_kfd;
    251};
    252
    253struct kfd_dev {
    254	struct amdgpu_device *adev;
    255
    256	struct kfd_device_info device_info;
    257	struct pci_dev *pdev;
    258	struct drm_device *ddev;
    259
    260	unsigned int id;		/* topology stub index */
    261
    262	phys_addr_t doorbell_base;	/* Start of actual doorbells used by
    263					 * KFD. It is aligned for mapping
    264					 * into user mode
    265					 */
    266	size_t doorbell_base_dw_offset;	/* Offset from the start of the PCI
    267					 * doorbell BAR to the first KFD
    268					 * doorbell in dwords. GFX reserves
    269					 * the segment before this offset.
    270					 */
    271	u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
    272					   * page used by kernel queue
    273					   */
    274
    275	struct kgd2kfd_shared_resources shared_resources;
    276	struct kfd_vmid_info vm_info;
    277	struct kfd_local_mem_info local_mem_info;
    278
    279	const struct kfd2kgd_calls *kfd2kgd;
    280	struct mutex doorbell_mutex;
    281	DECLARE_BITMAP(doorbell_available_index,
    282			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
    283
    284	void *gtt_mem;
    285	uint64_t gtt_start_gpu_addr;
    286	void *gtt_start_cpu_ptr;
    287	void *gtt_sa_bitmap;
    288	struct mutex gtt_sa_lock;
    289	unsigned int gtt_sa_chunk_size;
    290	unsigned int gtt_sa_num_of_chunks;
    291
    292	/* Interrupts */
    293	struct kfifo ih_fifo;
    294	struct workqueue_struct *ih_wq;
    295	struct work_struct interrupt_work;
    296	spinlock_t interrupt_lock;
    297
    298	/* QCM Device instance */
    299	struct device_queue_manager *dqm;
    300
    301	bool init_complete;
    302	/*
    303	 * Interrupts of interest to KFD are copied
    304	 * from the HW ring into a SW ring.
    305	 */
    306	bool interrupts_active;
    307
    308	/* Firmware versions */
    309	uint16_t mec_fw_version;
    310	uint16_t mec2_fw_version;
    311	uint16_t sdma_fw_version;
    312
    313	/* Maximum process number mapped to HW scheduler */
    314	unsigned int max_proc_per_quantum;
    315
    316	/* CWSR */
    317	bool cwsr_enabled;
    318	const void *cwsr_isa;
    319	unsigned int cwsr_isa_size;
    320
    321	/* xGMI */
    322	uint64_t hive_id;
    323
    324	bool pci_atomic_requested;
    325
    326	/* Use IOMMU v2 flag */
    327	bool use_iommu_v2;
    328
    329	/* SRAM ECC flag */
    330	atomic_t sram_ecc_flag;
    331
    332	/* Compute Profile ref. count */
    333	atomic_t compute_profile;
    334
    335	/* Global GWS resource shared between processes */
    336	void *gws;
    337
    338	/* Clients watching SMI events */
    339	struct list_head smi_clients;
    340	spinlock_t smi_lock;
    341
    342	uint32_t reset_seq_num;
    343
    344	struct ida doorbell_ida;
    345	unsigned int max_doorbell_slices;
    346
    347	int noretry;
    348
    349	/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
    350	struct dev_pagemap pgmap;
    351};
    352
    353enum kfd_mempool {
    354	KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
    355	KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
    356	KFD_MEMPOOL_FRAMEBUFFER = 3,
    357};
    358
    359/* Character device interface */
    360int kfd_chardev_init(void);
    361void kfd_chardev_exit(void);
    362
    363/**
    364 * enum kfd_unmap_queues_filter - Enum for queue filters.
    365 *
    366 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
    367 *						running queues list.
    368 *
    369 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues
    370 *						in the run list.
    371 *
    372 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
    373 *						specific process.
    374 *
    375 */
    376enum kfd_unmap_queues_filter {
    377	KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1,
    378	KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2,
    379	KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3
    380};
    381
    382/**
    383 * enum kfd_queue_type - Enum for various queue types.
    384 *
    385 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
    386 *
    387 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type.
    388 *
    389 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
    390 *
    391 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
    392 *
    393 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
    394 */
    395enum kfd_queue_type  {
    396	KFD_QUEUE_TYPE_COMPUTE,
    397	KFD_QUEUE_TYPE_SDMA,
    398	KFD_QUEUE_TYPE_HIQ,
    399	KFD_QUEUE_TYPE_DIQ,
    400	KFD_QUEUE_TYPE_SDMA_XGMI
    401};
    402
    403enum kfd_queue_format {
    404	KFD_QUEUE_FORMAT_PM4,
    405	KFD_QUEUE_FORMAT_AQL
    406};
    407
    408enum KFD_QUEUE_PRIORITY {
    409	KFD_QUEUE_PRIORITY_MINIMUM = 0,
    410	KFD_QUEUE_PRIORITY_MAXIMUM = 15
    411};
    412
    413/**
    414 * struct queue_properties
    415 *
    416 * @type: The queue type.
    417 *
    418 * @queue_id: Queue identifier.
    419 *
    420 * @queue_address: Queue ring buffer address.
    421 *
    422 * @queue_size: Queue ring buffer size.
    423 *
    424 * @priority: Defines the queue priority relative to other queues in the
    425 * process.
    426 * This is just an indication and HW scheduling may override the priority as
    427 * necessary while keeping the relative prioritization.
    428 * the priority granularity is from 0 to f which f is the highest priority.
    429 * currently all queues are initialized with the highest priority.
    430 *
    431 * @queue_percent: This field is partially implemented and currently a zero in
    432 * this field defines that the queue is non active.
    433 *
    434 * @read_ptr: User space address which points to the number of dwords the
    435 * cp read from the ring buffer. This field updates automatically by the H/W.
    436 *
    437 * @write_ptr: Defines the number of dwords written to the ring buffer.
    438 *
    439 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring
    440 * buffer. This field should be similar to write_ptr and the user should
    441 * update this field after updating the write_ptr.
    442 *
    443 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
    444 *
    445 * @is_interop: Defines if this is a interop queue. Interop queue means that
    446 * the queue can access both graphics and compute resources.
    447 *
    448 * @is_evicted: Defines if the queue is evicted. Only active queues
    449 * are evicted, rendering them inactive.
    450 *
    451 * @is_active: Defines if the queue is active or not. @is_active and
    452 * @is_evicted are protected by the DQM lock.
    453 *
    454 * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
    455 * @is_gws should be protected by the DQM lock, since changing it can yield the
    456 * possibility of updating DQM state on number of GWS queues.
    457 *
    458 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
    459 * of the queue.
    460 *
    461 * This structure represents the queue properties for each queue no matter if
    462 * it's user mode or kernel mode queue.
    463 *
    464 */
    465
    466struct queue_properties {
    467	enum kfd_queue_type type;
    468	enum kfd_queue_format format;
    469	unsigned int queue_id;
    470	uint64_t queue_address;
    471	uint64_t  queue_size;
    472	uint32_t priority;
    473	uint32_t queue_percent;
    474	uint32_t *read_ptr;
    475	uint32_t *write_ptr;
    476	void __iomem *doorbell_ptr;
    477	uint32_t doorbell_off;
    478	bool is_interop;
    479	bool is_evicted;
    480	bool is_active;
    481	bool is_gws;
    482	/* Not relevant for user mode queues in cp scheduling */
    483	unsigned int vmid;
    484	/* Relevant only for sdma queues*/
    485	uint32_t sdma_engine_id;
    486	uint32_t sdma_queue_id;
    487	uint32_t sdma_vm_addr;
    488	/* Relevant only for VI */
    489	uint64_t eop_ring_buffer_address;
    490	uint32_t eop_ring_buffer_size;
    491	uint64_t ctx_save_restore_area_address;
    492	uint32_t ctx_save_restore_area_size;
    493	uint32_t ctl_stack_size;
    494	uint64_t tba_addr;
    495	uint64_t tma_addr;
    496};
    497
    498#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 &&	\
    499			    (q).queue_address != 0 &&	\
    500			    (q).queue_percent > 0 &&	\
    501			    !(q).is_evicted)
    502
    503enum mqd_update_flag {
    504	UPDATE_FLAG_CU_MASK = 0,
    505};
    506
    507struct mqd_update_info {
    508	union {
    509		struct {
    510			uint32_t count; /* Must be a multiple of 32 */
    511			uint32_t *ptr;
    512		} cu_mask;
    513	};
    514	enum mqd_update_flag update_flag;
    515};
    516
    517/**
    518 * struct queue
    519 *
    520 * @list: Queue linked list.
    521 *
    522 * @mqd: The queue MQD (memory queue descriptor).
    523 *
    524 * @mqd_mem_obj: The MQD local gpu memory object.
    525 *
    526 * @gart_mqd_addr: The MQD gart mc address.
    527 *
    528 * @properties: The queue properties.
    529 *
    530 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
    531 *	 that the queue should be executed on.
    532 *
    533 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
    534 *	  id.
    535 *
    536 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
    537 *
    538 * @process: The kfd process that created this queue.
    539 *
    540 * @device: The kfd device that created this queue.
    541 *
    542 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
    543 * otherwise.
    544 *
    545 * This structure represents user mode compute queues.
    546 * It contains all the necessary data to handle such queues.
    547 *
    548 */
    549
    550struct queue {
    551	struct list_head list;
    552	void *mqd;
    553	struct kfd_mem_obj *mqd_mem_obj;
    554	uint64_t gart_mqd_addr;
    555	struct queue_properties properties;
    556
    557	uint32_t mec;
    558	uint32_t pipe;
    559	uint32_t queue;
    560
    561	unsigned int sdma_id;
    562	unsigned int doorbell_id;
    563
    564	struct kfd_process	*process;
    565	struct kfd_dev		*device;
    566	void *gws;
    567
    568	/* procfs */
    569	struct kobject kobj;
    570
    571	void *gang_ctx_bo;
    572	uint64_t gang_ctx_gpu_addr;
    573	void *gang_ctx_cpu_ptr;
    574};
    575
    576enum KFD_MQD_TYPE {
    577	KFD_MQD_TYPE_HIQ = 0,		/* for hiq */
    578	KFD_MQD_TYPE_CP,		/* for cp queues and diq */
    579	KFD_MQD_TYPE_SDMA,		/* for sdma queues */
    580	KFD_MQD_TYPE_DIQ,		/* for diq */
    581	KFD_MQD_TYPE_MAX
    582};
    583
    584enum KFD_PIPE_PRIORITY {
    585	KFD_PIPE_PRIORITY_CS_LOW = 0,
    586	KFD_PIPE_PRIORITY_CS_MEDIUM,
    587	KFD_PIPE_PRIORITY_CS_HIGH
    588};
    589
    590struct scheduling_resources {
    591	unsigned int vmid_mask;
    592	enum kfd_queue_type type;
    593	uint64_t queue_mask;
    594	uint64_t gws_mask;
    595	uint32_t oac_mask;
    596	uint32_t gds_heap_base;
    597	uint32_t gds_heap_size;
    598};
    599
    600struct process_queue_manager {
    601	/* data */
    602	struct kfd_process	*process;
    603	struct list_head	queues;
    604	unsigned long		*queue_slot_bitmap;
    605};
    606
    607struct qcm_process_device {
    608	/* The Device Queue Manager that owns this data */
    609	struct device_queue_manager *dqm;
    610	struct process_queue_manager *pqm;
    611	/* Queues list */
    612	struct list_head queues_list;
    613	struct list_head priv_queue_list;
    614
    615	unsigned int queue_count;
    616	unsigned int vmid;
    617	bool is_debug;
    618	unsigned int evicted; /* eviction counter, 0=active */
    619
    620	/* This flag tells if we should reset all wavefronts on
    621	 * process termination
    622	 */
    623	bool reset_wavefronts;
    624
    625	/* This flag tells us if this process has a GWS-capable
    626	 * queue that will be mapped into the runlist. It's
    627	 * possible to request a GWS BO, but not have the queue
    628	 * currently mapped, and this changes how the MAP_PROCESS
    629	 * PM4 packet is configured.
    630	 */
    631	bool mapped_gws_queue;
    632
    633	/* All the memory management data should be here too */
    634	uint64_t gds_context_area;
    635	/* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
    636	uint64_t page_table_base;
    637	uint32_t sh_mem_config;
    638	uint32_t sh_mem_bases;
    639	uint32_t sh_mem_ape1_base;
    640	uint32_t sh_mem_ape1_limit;
    641	uint32_t gds_size;
    642	uint32_t num_gws;
    643	uint32_t num_oac;
    644	uint32_t sh_hidden_private_base;
    645
    646	/* CWSR memory */
    647	struct kgd_mem *cwsr_mem;
    648	void *cwsr_kaddr;
    649	uint64_t cwsr_base;
    650	uint64_t tba_addr;
    651	uint64_t tma_addr;
    652
    653	/* IB memory */
    654	struct kgd_mem *ib_mem;
    655	uint64_t ib_base;
    656	void *ib_kaddr;
    657
    658	/* doorbell resources per process per device */
    659	unsigned long *doorbell_bitmap;
    660};
    661
    662/* KFD Memory Eviction */
    663
    664/* Approx. wait time before attempting to restore evicted BOs */
    665#define PROCESS_RESTORE_TIME_MS 100
    666/* Approx. back off time if restore fails due to lack of memory */
    667#define PROCESS_BACK_OFF_TIME_MS 100
    668/* Approx. time before evicting the process again */
    669#define PROCESS_ACTIVE_TIME_MS 10
    670
    671/* 8 byte handle containing GPU ID in the most significant 4 bytes and
    672 * idr_handle in the least significant 4 bytes
    673 */
    674#define MAKE_HANDLE(gpu_id, idr_handle) \
    675	(((uint64_t)(gpu_id) << 32) + idr_handle)
    676#define GET_GPU_ID(handle) (handle >> 32)
    677#define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
    678
    679enum kfd_pdd_bound {
    680	PDD_UNBOUND = 0,
    681	PDD_BOUND,
    682	PDD_BOUND_SUSPENDED,
    683};
    684
    685#define MAX_SYSFS_FILENAME_LEN 15
    686
    687/*
    688 * SDMA counter runs at 100MHz frequency.
    689 * We display SDMA activity in microsecond granularity in sysfs.
    690 * As a result, the divisor is 100.
    691 */
    692#define SDMA_ACTIVITY_DIVISOR  100
    693
    694/* Data that is per-process-per device. */
    695struct kfd_process_device {
    696	/* The device that owns this data. */
    697	struct kfd_dev *dev;
    698
    699	/* The process that owns this kfd_process_device. */
    700	struct kfd_process *process;
    701
    702	/* per-process-per device QCM data structure */
    703	struct qcm_process_device qpd;
    704
    705	/*Apertures*/
    706	uint64_t lds_base;
    707	uint64_t lds_limit;
    708	uint64_t gpuvm_base;
    709	uint64_t gpuvm_limit;
    710	uint64_t scratch_base;
    711	uint64_t scratch_limit;
    712
    713	/* VM context for GPUVM allocations */
    714	struct file *drm_file;
    715	void *drm_priv;
    716	atomic64_t tlb_seq;
    717
    718	/* GPUVM allocations storage */
    719	struct idr alloc_idr;
    720
    721	/* Flag used to tell the pdd has dequeued from the dqm.
    722	 * This is used to prevent dev->dqm->ops.process_termination() from
    723	 * being called twice when it is already called in IOMMU callback
    724	 * function.
    725	 */
    726	bool already_dequeued;
    727	bool runtime_inuse;
    728
    729	/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
    730	enum kfd_pdd_bound bound;
    731
    732	/* VRAM usage */
    733	uint64_t vram_usage;
    734	struct attribute attr_vram;
    735	char vram_filename[MAX_SYSFS_FILENAME_LEN];
    736
    737	/* SDMA activity tracking */
    738	uint64_t sdma_past_activity_counter;
    739	struct attribute attr_sdma;
    740	char sdma_filename[MAX_SYSFS_FILENAME_LEN];
    741
    742	/* Eviction activity tracking */
    743	uint64_t last_evict_timestamp;
    744	atomic64_t evict_duration_counter;
    745	struct attribute attr_evict;
    746
    747	struct kobject *kobj_stats;
    748	unsigned int doorbell_index;
    749
    750	/*
    751	 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
    752	 * that is associated with device encoded by "this" struct instance. The
    753	 * value reflects CU usage by all of the waves launched by this process
    754	 * on this device. A very important property of occupancy parameter is
    755	 * that its value is a snapshot of current use.
    756	 *
    757	 * Following is to be noted regarding how this parameter is reported:
    758	 *
    759	 *  The number of waves that a CU can launch is limited by couple of
    760	 *  parameters. These are encoded by struct amdgpu_cu_info instance
    761	 *  that is part of every device definition. For GFX9 devices this
    762	 *  translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
    763	 *  do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
    764	 *  when they do use scratch memory. This could change for future
    765	 *  devices and therefore this example should be considered as a guide.
    766	 *
    767	 *  All CU's of a device are available for the process. This may not be true
    768	 *  under certain conditions - e.g. CU masking.
    769	 *
    770	 *  Finally number of CU's that are occupied by a process is affected by both
    771	 *  number of CU's a device has along with number of other competing processes
    772	 */
    773	struct attribute attr_cu_occupancy;
    774
    775	/* sysfs counters for GPU retry fault and page migration tracking */
    776	struct kobject *kobj_counters;
    777	struct attribute attr_faults;
    778	struct attribute attr_page_in;
    779	struct attribute attr_page_out;
    780	uint64_t faults;
    781	uint64_t page_in;
    782	uint64_t page_out;
    783	/*
    784	 * If this process has been checkpointed before, then the user
    785	 * application will use the original gpu_id on the
    786	 * checkpointed node to refer to this device.
    787	 */
    788	uint32_t user_gpu_id;
    789
    790	void *proc_ctx_bo;
    791	uint64_t proc_ctx_gpu_addr;
    792	void *proc_ctx_cpu_ptr;
    793};
    794
    795#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
    796
    797struct svm_range_list {
    798	struct mutex			lock;
    799	struct rb_root_cached		objects;
    800	struct list_head		list;
    801	struct work_struct		deferred_list_work;
    802	struct list_head		deferred_range_list;
    803	struct list_head                criu_svm_metadata_list;
    804	spinlock_t			deferred_list_lock;
    805	atomic_t			evicted_ranges;
    806	atomic_t			drain_pagefaults;
    807	struct delayed_work		restore_work;
    808	DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
    809	struct task_struct		*faulting_task;
    810};
    811
    812/* Process data */
    813struct kfd_process {
    814	/*
    815	 * kfd_process are stored in an mm_struct*->kfd_process*
    816	 * hash table (kfd_processes in kfd_process.c)
    817	 */
    818	struct hlist_node kfd_processes;
    819
    820	/*
    821	 * Opaque pointer to mm_struct. We don't hold a reference to
    822	 * it so it should never be dereferenced from here. This is
    823	 * only used for looking up processes by their mm.
    824	 */
    825	void *mm;
    826
    827	struct kref ref;
    828	struct work_struct release_work;
    829
    830	struct mutex mutex;
    831
    832	/*
    833	 * In any process, the thread that started main() is the lead
    834	 * thread and outlives the rest.
    835	 * It is here because amd_iommu_bind_pasid wants a task_struct.
    836	 * It can also be used for safely getting a reference to the
    837	 * mm_struct of the process.
    838	 */
    839	struct task_struct *lead_thread;
    840
    841	/* We want to receive a notification when the mm_struct is destroyed */
    842	struct mmu_notifier mmu_notifier;
    843
    844	u32 pasid;
    845
    846	/*
    847	 * Array of kfd_process_device pointers,
    848	 * one for each device the process is using.
    849	 */
    850	struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
    851	uint32_t n_pdds;
    852
    853	struct process_queue_manager pqm;
    854
    855	/*Is the user space process 32 bit?*/
    856	bool is_32bit_user_mode;
    857
    858	/* Event-related data */
    859	struct mutex event_mutex;
    860	/* Event ID allocator and lookup */
    861	struct idr event_idr;
    862	/* Event page */
    863	u64 signal_handle;
    864	struct kfd_signal_page *signal_page;
    865	size_t signal_mapped_size;
    866	size_t signal_event_count;
    867	bool signal_event_limit_reached;
    868
    869	/* Information used for memory eviction */
    870	void *kgd_process_info;
    871	/* Eviction fence that is attached to all the BOs of this process. The
    872	 * fence will be triggered during eviction and new one will be created
    873	 * during restore
    874	 */
    875	struct dma_fence *ef;
    876
    877	/* Work items for evicting and restoring BOs */
    878	struct delayed_work eviction_work;
    879	struct delayed_work restore_work;
    880	/* seqno of the last scheduled eviction */
    881	unsigned int last_eviction_seqno;
    882	/* Approx. the last timestamp (in jiffies) when the process was
    883	 * restored after an eviction
    884	 */
    885	unsigned long last_restore_timestamp;
    886
    887	/* Kobj for our procfs */
    888	struct kobject *kobj;
    889	struct kobject *kobj_queues;
    890	struct attribute attr_pasid;
    891
    892	/* shared virtual memory registered by this process */
    893	struct svm_range_list svms;
    894
    895	bool xnack_enabled;
    896
    897	atomic_t poison;
    898	/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
    899	bool queues_paused;
    900};
    901
    902#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
    903extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
    904extern struct srcu_struct kfd_processes_srcu;
    905
    906/**
    907 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer.
    908 *
    909 * @filep: pointer to file structure.
    910 * @p: amdkfd process pointer.
    911 * @data: pointer to arg that was copied from user.
    912 *
    913 * Return: returns ioctl completion code.
    914 */
    915typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
    916				void *data);
    917
    918struct amdkfd_ioctl_desc {
    919	unsigned int cmd;
    920	int flags;
    921	amdkfd_ioctl_t *func;
    922	unsigned int cmd_drv;
    923	const char *name;
    924};
    925bool kfd_dev_is_large_bar(struct kfd_dev *dev);
    926
    927int kfd_process_create_wq(void);
    928void kfd_process_destroy_wq(void);
    929struct kfd_process *kfd_create_process(struct file *filep);
    930struct kfd_process *kfd_get_process(const struct task_struct *task);
    931struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
    932struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
    933
    934int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
    935int kfd_process_gpuid_from_adev(struct kfd_process *p,
    936			       struct amdgpu_device *adev, uint32_t *gpuid,
    937			       uint32_t *gpuidx);
    938static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
    939				uint32_t gpuidx, uint32_t *gpuid) {
    940	return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL;
    941}
    942static inline struct kfd_process_device *kfd_process_device_from_gpuidx(
    943				struct kfd_process *p, uint32_t gpuidx) {
    944	return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL;
    945}
    946
    947void kfd_unref_process(struct kfd_process *p);
    948int kfd_process_evict_queues(struct kfd_process *p);
    949int kfd_process_restore_queues(struct kfd_process *p);
    950void kfd_suspend_all_processes(void);
    951int kfd_resume_all_processes(void);
    952
    953struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process,
    954							 uint32_t gpu_id);
    955
    956int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
    957
    958int kfd_process_device_init_vm(struct kfd_process_device *pdd,
    959			       struct file *drm_file);
    960struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
    961						struct kfd_process *p);
    962struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
    963							struct kfd_process *p);
    964struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
    965							struct kfd_process *p);
    966
    967bool kfd_process_xnack_mode(struct kfd_process *p, bool supported);
    968
    969int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
    970			  struct vm_area_struct *vma);
    971
    972/* KFD process API for creating and translating handles */
    973int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
    974					void *mem);
    975void *kfd_process_device_translate_handle(struct kfd_process_device *p,
    976					int handle);
    977void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
    978					int handle);
    979struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
    980
    981/* PASIDs */
    982int kfd_pasid_init(void);
    983void kfd_pasid_exit(void);
    984bool kfd_set_pasid_limit(unsigned int new_limit);
    985unsigned int kfd_get_pasid_limit(void);
    986u32 kfd_pasid_alloc(void);
    987void kfd_pasid_free(u32 pasid);
    988
    989/* Doorbells */
    990size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
    991int kfd_doorbell_init(struct kfd_dev *kfd);
    992void kfd_doorbell_fini(struct kfd_dev *kfd);
    993int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
    994		      struct vm_area_struct *vma);
    995void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
    996					unsigned int *doorbell_off);
    997void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
    998u32 read_kernel_doorbell(u32 __iomem *db);
    999void write_kernel_doorbell(void __iomem *db, u32 value);
   1000void write_kernel_doorbell64(void __iomem *db, u64 value);
   1001unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
   1002					struct kfd_process_device *pdd,
   1003					unsigned int doorbell_id);
   1004phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
   1005int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
   1006				unsigned int *doorbell_index);
   1007void kfd_free_process_doorbells(struct kfd_dev *kfd,
   1008				unsigned int doorbell_index);
   1009/* GTT Sub-Allocator */
   1010
   1011int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
   1012			struct kfd_mem_obj **mem_obj);
   1013
   1014int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
   1015
   1016extern struct device *kfd_device;
   1017
   1018/* KFD's procfs */
   1019void kfd_procfs_init(void);
   1020void kfd_procfs_shutdown(void);
   1021int kfd_procfs_add_queue(struct queue *q);
   1022void kfd_procfs_del_queue(struct queue *q);
   1023
   1024/* Topology */
   1025int kfd_topology_init(void);
   1026void kfd_topology_shutdown(void);
   1027int kfd_topology_add_device(struct kfd_dev *gpu);
   1028int kfd_topology_remove_device(struct kfd_dev *gpu);
   1029struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
   1030						uint32_t proximity_domain);
   1031struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
   1032						uint32_t proximity_domain);
   1033struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
   1034struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
   1035struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
   1036struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev);
   1037int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
   1038int kfd_numa_node_to_apic_id(int numa_node_id);
   1039void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
   1040
   1041/* Interrupts */
   1042int kfd_interrupt_init(struct kfd_dev *dev);
   1043void kfd_interrupt_exit(struct kfd_dev *dev);
   1044bool enqueue_ih_ring_entry(struct kfd_dev *kfd,	const void *ih_ring_entry);
   1045bool interrupt_is_wanted(struct kfd_dev *dev,
   1046				const uint32_t *ih_ring_entry,
   1047				uint32_t *patched_ihre, bool *flag);
   1048
   1049/* amdkfd Apertures */
   1050int kfd_init_apertures(struct kfd_process *process);
   1051
   1052void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
   1053				  uint64_t tba_addr,
   1054				  uint64_t tma_addr);
   1055
   1056/* CRIU */
   1057/*
   1058 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private
   1059 * structures:
   1060 * kfd_criu_process_priv_data
   1061 * kfd_criu_device_priv_data
   1062 * kfd_criu_bo_priv_data
   1063 * kfd_criu_queue_priv_data
   1064 * kfd_criu_event_priv_data
   1065 * kfd_criu_svm_range_priv_data
   1066 */
   1067
   1068#define KFD_CRIU_PRIV_VERSION 1
   1069
   1070struct kfd_criu_process_priv_data {
   1071	uint32_t version;
   1072	uint32_t xnack_mode;
   1073};
   1074
   1075struct kfd_criu_device_priv_data {
   1076	/* For future use */
   1077	uint64_t reserved;
   1078};
   1079
   1080struct kfd_criu_bo_priv_data {
   1081	uint64_t user_addr;
   1082	uint32_t idr_handle;
   1083	uint32_t mapped_gpuids[MAX_GPU_INSTANCE];
   1084};
   1085
   1086/*
   1087 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data,
   1088 * kfd_criu_svm_range_priv_data is the object type
   1089 */
   1090enum kfd_criu_object_type {
   1091	KFD_CRIU_OBJECT_TYPE_QUEUE,
   1092	KFD_CRIU_OBJECT_TYPE_EVENT,
   1093	KFD_CRIU_OBJECT_TYPE_SVM_RANGE,
   1094};
   1095
   1096struct kfd_criu_svm_range_priv_data {
   1097	uint32_t object_type;
   1098	uint64_t start_addr;
   1099	uint64_t size;
   1100	/* Variable length array of attributes */
   1101	struct kfd_ioctl_svm_attribute attrs[];
   1102};
   1103
   1104struct kfd_criu_queue_priv_data {
   1105	uint32_t object_type;
   1106	uint64_t q_address;
   1107	uint64_t q_size;
   1108	uint64_t read_ptr_addr;
   1109	uint64_t write_ptr_addr;
   1110	uint64_t doorbell_off;
   1111	uint64_t eop_ring_buffer_address;
   1112	uint64_t ctx_save_restore_area_address;
   1113	uint32_t gpu_id;
   1114	uint32_t type;
   1115	uint32_t format;
   1116	uint32_t q_id;
   1117	uint32_t priority;
   1118	uint32_t q_percent;
   1119	uint32_t doorbell_id;
   1120	uint32_t gws;
   1121	uint32_t sdma_id;
   1122	uint32_t eop_ring_buffer_size;
   1123	uint32_t ctx_save_restore_area_size;
   1124	uint32_t ctl_stack_size;
   1125	uint32_t mqd_size;
   1126};
   1127
   1128struct kfd_criu_event_priv_data {
   1129	uint32_t object_type;
   1130	uint64_t user_handle;
   1131	uint32_t event_id;
   1132	uint32_t auto_reset;
   1133	uint32_t type;
   1134	uint32_t signaled;
   1135
   1136	union {
   1137		struct kfd_hsa_memory_exception_data memory_exception_data;
   1138		struct kfd_hsa_hw_exception_data hw_exception_data;
   1139	};
   1140};
   1141
   1142int kfd_process_get_queue_info(struct kfd_process *p,
   1143			       uint32_t *num_queues,
   1144			       uint64_t *priv_data_sizes);
   1145
   1146int kfd_criu_checkpoint_queues(struct kfd_process *p,
   1147			 uint8_t __user *user_priv_data,
   1148			 uint64_t *priv_data_offset);
   1149
   1150int kfd_criu_restore_queue(struct kfd_process *p,
   1151			   uint8_t __user *user_priv_data,
   1152			   uint64_t *priv_data_offset,
   1153			   uint64_t max_priv_data_size);
   1154
   1155int kfd_criu_checkpoint_events(struct kfd_process *p,
   1156			 uint8_t __user *user_priv_data,
   1157			 uint64_t *priv_data_offset);
   1158
   1159int kfd_criu_restore_event(struct file *devkfd,
   1160			   struct kfd_process *p,
   1161			   uint8_t __user *user_priv_data,
   1162			   uint64_t *priv_data_offset,
   1163			   uint64_t max_priv_data_size);
   1164/* CRIU - End */
   1165
   1166/* Queue Context Management */
   1167int init_queue(struct queue **q, const struct queue_properties *properties);
   1168void uninit_queue(struct queue *q);
   1169void print_queue_properties(struct queue_properties *q);
   1170void print_queue(struct queue *q);
   1171
   1172struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
   1173		struct kfd_dev *dev);
   1174struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
   1175		struct kfd_dev *dev);
   1176struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
   1177		struct kfd_dev *dev);
   1178struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
   1179		struct kfd_dev *dev);
   1180struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
   1181		struct kfd_dev *dev);
   1182struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
   1183		struct kfd_dev *dev);
   1184struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
   1185		struct kfd_dev *dev);
   1186struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
   1187void device_queue_manager_uninit(struct device_queue_manager *dqm);
   1188struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
   1189					enum kfd_queue_type type);
   1190void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
   1191int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
   1192
   1193/* Process Queue Manager */
   1194struct process_queue_node {
   1195	struct queue *q;
   1196	struct kernel_queue *kq;
   1197	struct list_head process_queue_list;
   1198};
   1199
   1200void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
   1201void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
   1202int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
   1203void pqm_uninit(struct process_queue_manager *pqm);
   1204int pqm_create_queue(struct process_queue_manager *pqm,
   1205			    struct kfd_dev *dev,
   1206			    struct file *f,
   1207			    struct queue_properties *properties,
   1208			    unsigned int *qid,
   1209			    const struct kfd_criu_queue_priv_data *q_data,
   1210			    const void *restore_mqd,
   1211			    const void *restore_ctl_stack,
   1212			    uint32_t *p_doorbell_offset_in_process);
   1213int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
   1214int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
   1215			struct queue_properties *p);
   1216int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
   1217			struct mqd_update_info *minfo);
   1218int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
   1219			void *gws);
   1220struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
   1221						unsigned int qid);
   1222struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
   1223						unsigned int qid);
   1224int pqm_get_wave_state(struct process_queue_manager *pqm,
   1225		       unsigned int qid,
   1226		       void __user *ctl_stack,
   1227		       u32 *ctl_stack_used_size,
   1228		       u32 *save_area_used_size);
   1229
   1230int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
   1231			      uint64_t fence_value,
   1232			      unsigned int timeout_ms);
   1233
   1234int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
   1235				  unsigned int qid,
   1236				  u32 *mqd_size,
   1237				  u32 *ctl_stack_size);
   1238/* Packet Manager */
   1239
   1240#define KFD_FENCE_COMPLETED (100)
   1241#define KFD_FENCE_INIT   (10)
   1242
   1243struct packet_manager {
   1244	struct device_queue_manager *dqm;
   1245	struct kernel_queue *priv_queue;
   1246	struct mutex lock;
   1247	bool allocated;
   1248	struct kfd_mem_obj *ib_buffer_obj;
   1249	unsigned int ib_size_bytes;
   1250	bool is_over_subscription;
   1251
   1252	const struct packet_manager_funcs *pmf;
   1253};
   1254
   1255struct packet_manager_funcs {
   1256	/* Support ASIC-specific packet formats for PM4 packets */
   1257	int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
   1258			struct qcm_process_device *qpd);
   1259	int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
   1260			uint64_t ib, size_t ib_size_in_dwords, bool chain);
   1261	int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
   1262			struct scheduling_resources *res);
   1263	int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
   1264			struct queue *q, bool is_static);
   1265	int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
   1266			enum kfd_unmap_queues_filter mode,
   1267			uint32_t filter_param, bool reset);
   1268	int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
   1269			uint64_t fence_address,	uint64_t fence_value);
   1270	int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
   1271
   1272	/* Packet sizes */
   1273	int map_process_size;
   1274	int runlist_size;
   1275	int set_resources_size;
   1276	int map_queues_size;
   1277	int unmap_queues_size;
   1278	int query_status_size;
   1279	int release_mem_size;
   1280};
   1281
   1282extern const struct packet_manager_funcs kfd_vi_pm_funcs;
   1283extern const struct packet_manager_funcs kfd_v9_pm_funcs;
   1284extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs;
   1285
   1286int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
   1287void pm_uninit(struct packet_manager *pm, bool hanging);
   1288int pm_send_set_resources(struct packet_manager *pm,
   1289				struct scheduling_resources *res);
   1290int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
   1291int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
   1292				uint64_t fence_value);
   1293
   1294int pm_send_unmap_queue(struct packet_manager *pm,
   1295			enum kfd_unmap_queues_filter mode,
   1296			uint32_t filter_param, bool reset);
   1297
   1298void pm_release_ib(struct packet_manager *pm);
   1299
   1300/* Following PM funcs can be shared among VI and AI */
   1301unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
   1302
   1303uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
   1304
   1305/* Events */
   1306extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
   1307extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
   1308extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
   1309
   1310extern const struct kfd_device_global_init_class device_global_init_class_cik;
   1311
   1312int kfd_event_init_process(struct kfd_process *p);
   1313void kfd_event_free_process(struct kfd_process *p);
   1314int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
   1315int kfd_wait_on_events(struct kfd_process *p,
   1316		       uint32_t num_events, void __user *data,
   1317		       bool all, uint32_t user_timeout_ms,
   1318		       uint32_t *wait_result);
   1319void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
   1320				uint32_t valid_id_bits);
   1321void kfd_signal_iommu_event(struct kfd_dev *dev,
   1322			    u32 pasid, unsigned long address,
   1323			    bool is_write_requested, bool is_execute_requested);
   1324void kfd_signal_hw_exception_event(u32 pasid);
   1325int kfd_set_event(struct kfd_process *p, uint32_t event_id);
   1326int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
   1327int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset);
   1328
   1329int kfd_event_create(struct file *devkfd, struct kfd_process *p,
   1330		     uint32_t event_type, bool auto_reset, uint32_t node_id,
   1331		     uint32_t *event_id, uint32_t *event_trigger_data,
   1332		     uint64_t *event_page_offset, uint32_t *event_slot_index);
   1333
   1334int kfd_get_num_events(struct kfd_process *p);
   1335int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
   1336
   1337void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
   1338				struct kfd_vm_fault_info *info);
   1339
   1340void kfd_signal_reset_event(struct kfd_dev *dev);
   1341
   1342void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
   1343
   1344void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
   1345
   1346static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
   1347{
   1348	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
   1349	       (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
   1350	       dev->adev->sdma.instance[0].fw_version >= 18) ||
   1351	       KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
   1352}
   1353
   1354bool kfd_is_locked(void);
   1355
   1356/* Compute profile */
   1357void kfd_inc_compute_active(struct kfd_dev *dev);
   1358void kfd_dec_compute_active(struct kfd_dev *dev);
   1359
   1360/* Cgroup Support */
   1361/* Check with device cgroup if @kfd device is accessible */
   1362static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
   1363{
   1364#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
   1365	struct drm_device *ddev = kfd->ddev;
   1366
   1367	return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
   1368					  ddev->render->index,
   1369					  DEVCG_ACC_WRITE | DEVCG_ACC_READ);
   1370#else
   1371	return 0;
   1372#endif
   1373}
   1374
   1375/* Debugfs */
   1376#if defined(CONFIG_DEBUG_FS)
   1377
   1378void kfd_debugfs_init(void);
   1379void kfd_debugfs_fini(void);
   1380int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
   1381int pqm_debugfs_mqds(struct seq_file *m, void *data);
   1382int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
   1383int dqm_debugfs_hqds(struct seq_file *m, void *data);
   1384int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
   1385int pm_debugfs_runlist(struct seq_file *m, void *data);
   1386
   1387int kfd_debugfs_hang_hws(struct kfd_dev *dev);
   1388int pm_debugfs_hang_hws(struct packet_manager *pm);
   1389int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
   1390
   1391#else
   1392
   1393static inline void kfd_debugfs_init(void) {}
   1394static inline void kfd_debugfs_fini(void) {}
   1395
   1396#endif
   1397
   1398#endif