cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kfd_packet_manager_v9.c (12847B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/*
      3 * Copyright 2016-2022 Advanced Micro Devices, Inc.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice shall be included in
     13 * all copies or substantial portions of the Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21 * OTHER DEALINGS IN THE SOFTWARE.
     22 *
     23 */
     24
     25#include "kfd_kernel_queue.h"
     26#include "kfd_device_queue_manager.h"
     27#include "kfd_pm4_headers_ai.h"
     28#include "kfd_pm4_headers_aldebaran.h"
     29#include "kfd_pm4_opcodes.h"
     30#include "gc/gc_10_1_0_sh_mask.h"
     31
     32static int pm_map_process_v9(struct packet_manager *pm,
     33		uint32_t *buffer, struct qcm_process_device *qpd)
     34{
     35	struct pm4_mes_map_process *packet;
     36	uint64_t vm_page_table_base_addr = qpd->page_table_base;
     37
     38	packet = (struct pm4_mes_map_process *)buffer;
     39	memset(buffer, 0, sizeof(struct pm4_mes_map_process));
     40	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
     41					sizeof(struct pm4_mes_map_process));
     42	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
     43	packet->bitfields2.process_quantum = 10;
     44	packet->bitfields2.pasid = qpd->pqm->process->pasid;
     45	packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
     46	packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
     47	packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
     48	packet->bitfields14.num_oac = qpd->num_oac;
     49	packet->bitfields14.sdma_enable = 1;
     50	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
     51
     52	packet->sh_mem_config = qpd->sh_mem_config;
     53	packet->sh_mem_bases = qpd->sh_mem_bases;
     54	if (qpd->tba_addr) {
     55		packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
     56		/* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
     57		 * not defined, so setting it won't do any harm.
     58		 */
     59		packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
     60				| 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
     61
     62		packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
     63		packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
     64	}
     65
     66	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
     67	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
     68
     69	packet->vm_context_page_table_base_addr_lo32 =
     70			lower_32_bits(vm_page_table_base_addr);
     71	packet->vm_context_page_table_base_addr_hi32 =
     72			upper_32_bits(vm_page_table_base_addr);
     73
     74	return 0;
     75}
     76
     77static int pm_map_process_aldebaran(struct packet_manager *pm,
     78		uint32_t *buffer, struct qcm_process_device *qpd)
     79{
     80	struct pm4_mes_map_process_aldebaran *packet;
     81	uint64_t vm_page_table_base_addr = qpd->page_table_base;
     82
     83	packet = (struct pm4_mes_map_process_aldebaran *)buffer;
     84	memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
     85	packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
     86			sizeof(struct pm4_mes_map_process_aldebaran));
     87	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
     88	packet->bitfields2.process_quantum = 10;
     89	packet->bitfields2.pasid = qpd->pqm->process->pasid;
     90	packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
     91	packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
     92	packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
     93	packet->bitfields14.num_oac = qpd->num_oac;
     94	packet->bitfields14.sdma_enable = 1;
     95	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
     96
     97	packet->sh_mem_config = qpd->sh_mem_config;
     98	packet->sh_mem_bases = qpd->sh_mem_bases;
     99	if (qpd->tba_addr) {
    100		packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
    101		packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
    102		packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
    103	}
    104
    105	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
    106	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
    107
    108	packet->vm_context_page_table_base_addr_lo32 =
    109			lower_32_bits(vm_page_table_base_addr);
    110	packet->vm_context_page_table_base_addr_hi32 =
    111			upper_32_bits(vm_page_table_base_addr);
    112
    113	return 0;
    114}
    115
    116static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
    117			uint64_t ib, size_t ib_size_in_dwords, bool chain)
    118{
    119	struct pm4_mes_runlist *packet;
    120
    121	int concurrent_proc_cnt = 0;
    122	struct kfd_dev *kfd = pm->dqm->dev;
    123
    124	/* Determine the number of processes to map together to HW:
    125	 * it can not exceed the number of VMIDs available to the
    126	 * scheduler, and it is determined by the smaller of the number
    127	 * of processes in the runlist and kfd module parameter
    128	 * hws_max_conc_proc.
    129	 * Note: the arbitration between the number of VMIDs and
    130	 * hws_max_conc_proc has been done in
    131	 * kgd2kfd_device_init().
    132	 */
    133	concurrent_proc_cnt = min(pm->dqm->processes_count,
    134			kfd->max_proc_per_quantum);
    135
    136	packet = (struct pm4_mes_runlist *)buffer;
    137
    138	memset(buffer, 0, sizeof(struct pm4_mes_runlist));
    139	packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
    140						sizeof(struct pm4_mes_runlist));
    141
    142	packet->bitfields4.ib_size = ib_size_in_dwords;
    143	packet->bitfields4.chain = chain ? 1 : 0;
    144	packet->bitfields4.offload_polling = 0;
    145	packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
    146	packet->bitfields4.valid = 1;
    147	packet->bitfields4.process_cnt = concurrent_proc_cnt;
    148	packet->ordinal2 = lower_32_bits(ib);
    149	packet->ib_base_hi = upper_32_bits(ib);
    150
    151	return 0;
    152}
    153
    154static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
    155				struct scheduling_resources *res)
    156{
    157	struct pm4_mes_set_resources *packet;
    158
    159	packet = (struct pm4_mes_set_resources *)buffer;
    160	memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
    161
    162	packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
    163					sizeof(struct pm4_mes_set_resources));
    164
    165	packet->bitfields2.queue_type =
    166			queue_type__mes_set_resources__hsa_interface_queue_hiq;
    167	packet->bitfields2.vmid_mask = res->vmid_mask;
    168	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
    169	packet->bitfields7.oac_mask = res->oac_mask;
    170	packet->bitfields8.gds_heap_base = res->gds_heap_base;
    171	packet->bitfields8.gds_heap_size = res->gds_heap_size;
    172
    173	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
    174	packet->gws_mask_hi = upper_32_bits(res->gws_mask);
    175
    176	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
    177	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
    178
    179	return 0;
    180}
    181
    182static inline bool pm_use_ext_eng(struct kfd_dev *dev)
    183{
    184	return dev->adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 2, 0);
    185}
    186
    187static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
    188		struct queue *q, bool is_static)
    189{
    190	struct pm4_mes_map_queues *packet;
    191	bool use_static = is_static;
    192
    193	packet = (struct pm4_mes_map_queues *)buffer;
    194	memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
    195
    196	packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
    197					sizeof(struct pm4_mes_map_queues));
    198	packet->bitfields2.num_queues = 1;
    199	packet->bitfields2.queue_sel =
    200		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
    201
    202	packet->bitfields2.engine_sel =
    203		engine_sel__mes_map_queues__compute_vi;
    204	packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
    205	packet->bitfields2.extended_engine_sel =
    206		extended_engine_sel__mes_map_queues__legacy_engine_sel;
    207	packet->bitfields2.queue_type =
    208		queue_type__mes_map_queues__normal_compute_vi;
    209
    210	switch (q->properties.type) {
    211	case KFD_QUEUE_TYPE_COMPUTE:
    212		if (use_static)
    213			packet->bitfields2.queue_type =
    214		queue_type__mes_map_queues__normal_latency_static_queue_vi;
    215		break;
    216	case KFD_QUEUE_TYPE_DIQ:
    217		packet->bitfields2.queue_type =
    218			queue_type__mes_map_queues__debug_interface_queue_vi;
    219		break;
    220	case KFD_QUEUE_TYPE_SDMA:
    221	case KFD_QUEUE_TYPE_SDMA_XGMI:
    222		use_static = false; /* no static queues under SDMA */
    223		if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device))
    224			packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
    225				engine_sel__mes_map_queues__sdma0_vi;
    226		else {
    227			packet->bitfields2.extended_engine_sel =
    228				extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
    229			packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
    230		}
    231		break;
    232	default:
    233		WARN(1, "queue type %d", q->properties.type);
    234		return -EINVAL;
    235	}
    236	packet->bitfields3.doorbell_offset =
    237			q->properties.doorbell_off;
    238
    239	packet->mqd_addr_lo =
    240			lower_32_bits(q->gart_mqd_addr);
    241
    242	packet->mqd_addr_hi =
    243			upper_32_bits(q->gart_mqd_addr);
    244
    245	packet->wptr_addr_lo =
    246			lower_32_bits((uint64_t)q->properties.write_ptr);
    247
    248	packet->wptr_addr_hi =
    249			upper_32_bits((uint64_t)q->properties.write_ptr);
    250
    251	return 0;
    252}
    253
    254static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
    255			enum kfd_unmap_queues_filter filter,
    256			uint32_t filter_param, bool reset)
    257{
    258	struct pm4_mes_unmap_queues *packet;
    259
    260	packet = (struct pm4_mes_unmap_queues *)buffer;
    261	memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
    262
    263	packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
    264					sizeof(struct pm4_mes_unmap_queues));
    265
    266	packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ?
    267		extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel :
    268		extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
    269
    270	packet->bitfields2.engine_sel =
    271		engine_sel__mes_unmap_queues__compute;
    272
    273	if (reset)
    274		packet->bitfields2.action =
    275			action__mes_unmap_queues__reset_queues;
    276	else
    277		packet->bitfields2.action =
    278			action__mes_unmap_queues__preempt_queues;
    279
    280	switch (filter) {
    281	case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
    282		packet->bitfields2.queue_sel =
    283			queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
    284		packet->bitfields3a.pasid = filter_param;
    285		break;
    286	case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
    287		packet->bitfields2.queue_sel =
    288			queue_sel__mes_unmap_queues__unmap_all_queues;
    289		break;
    290	case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
    291		/* in this case, we do not preempt static queues */
    292		packet->bitfields2.queue_sel =
    293			queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
    294		break;
    295	default:
    296		WARN(1, "filter %d", filter);
    297		return -EINVAL;
    298	}
    299
    300	return 0;
    301
    302}
    303
    304static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
    305			uint64_t fence_address,	uint64_t fence_value)
    306{
    307	struct pm4_mes_query_status *packet;
    308
    309	packet = (struct pm4_mes_query_status *)buffer;
    310	memset(buffer, 0, sizeof(struct pm4_mes_query_status));
    311
    312
    313	packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
    314					sizeof(struct pm4_mes_query_status));
    315
    316	packet->bitfields2.context_id = 0;
    317	packet->bitfields2.interrupt_sel =
    318			interrupt_sel__mes_query_status__completion_status;
    319	packet->bitfields2.command =
    320			command__mes_query_status__fence_only_after_write_ack;
    321
    322	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
    323	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
    324	packet->data_hi = upper_32_bits((uint64_t)fence_value);
    325	packet->data_lo = lower_32_bits((uint64_t)fence_value);
    326
    327	return 0;
    328}
    329
    330const struct packet_manager_funcs kfd_v9_pm_funcs = {
    331	.map_process		= pm_map_process_v9,
    332	.runlist		= pm_runlist_v9,
    333	.set_resources		= pm_set_resources_v9,
    334	.map_queues		= pm_map_queues_v9,
    335	.unmap_queues		= pm_unmap_queues_v9,
    336	.query_status		= pm_query_status_v9,
    337	.release_mem		= NULL,
    338	.map_process_size	= sizeof(struct pm4_mes_map_process),
    339	.runlist_size		= sizeof(struct pm4_mes_runlist),
    340	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
    341	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
    342	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
    343	.query_status_size	= sizeof(struct pm4_mes_query_status),
    344	.release_mem_size	= 0,
    345};
    346
    347const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
    348	.map_process		= pm_map_process_aldebaran,
    349	.runlist		= pm_runlist_v9,
    350	.set_resources		= pm_set_resources_v9,
    351	.map_queues		= pm_map_queues_v9,
    352	.unmap_queues		= pm_unmap_queues_v9,
    353	.query_status		= pm_query_status_v9,
    354	.release_mem		= NULL,
    355	.map_process_size	= sizeof(struct pm4_mes_map_process_aldebaran),
    356	.runlist_size		= sizeof(struct pm4_mes_runlist),
    357	.set_resources_size	= sizeof(struct pm4_mes_set_resources),
    358	.map_queues_size	= sizeof(struct pm4_mes_map_queues),
    359	.unmap_queues_size	= sizeof(struct pm4_mes_unmap_queues),
    360	.query_status_size	= sizeof(struct pm4_mes_query_status),
    361	.release_mem_size	= 0,
    362};