cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

habanalabs_ioctl.c (27056B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3/*
      4 * Copyright 2016-2022 HabanaLabs, Ltd.
      5 * All Rights Reserved.
      6 */
      7
      8#define pr_fmt(fmt)	"habanalabs: " fmt
      9
     10#include <uapi/misc/habanalabs.h>
     11#include "habanalabs.h"
     12
     13#include <linux/kernel.h>
     14#include <linux/fs.h>
     15#include <linux/uaccess.h>
     16#include <linux/slab.h>
     17
     18static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
     19	[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
     20	[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
     21	[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
     22	[HL_DEBUG_OP_FUNNEL] = 0,
     23	[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
     24	[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
     25	[HL_DEBUG_OP_TIMESTAMP] = 0
     26
     27};
     28
     29static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
     30{
     31	struct hl_info_device_status dev_stat = {0};
     32	u32 size = args->return_size;
     33	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
     34
     35	if ((!size) || (!out))
     36		return -EINVAL;
     37
     38	dev_stat.status = hl_device_status(hdev);
     39
     40	return copy_to_user(out, &dev_stat,
     41			min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
     42}
     43
     44static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
     45{
     46	struct hl_info_hw_ip_info hw_ip = {0};
     47	u32 size = args->return_size;
     48	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
     49	struct asic_fixed_properties *prop = &hdev->asic_prop;
     50	u64 sram_kmd_size, dram_kmd_size;
     51
     52	if ((!size) || (!out))
     53		return -EINVAL;
     54
     55	sram_kmd_size = (prop->sram_user_base_address -
     56				prop->sram_base_address);
     57	dram_kmd_size = (prop->dram_user_base_address -
     58				prop->dram_base_address);
     59
     60	hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
     61	hw_ip.sram_base_address = prop->sram_user_base_address;
     62	hw_ip.dram_base_address =
     63			hdev->mmu_enable && prop->dram_supports_virtual_memory ?
     64			prop->dmmu.start_addr : prop->dram_user_base_address;
     65	hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
     66	hw_ip.sram_size = prop->sram_size - sram_kmd_size;
     67
     68	if (hdev->mmu_enable)
     69		hw_ip.dram_size =
     70			DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
     71						prop->dram_page_size) *
     72							prop->dram_page_size;
     73	else
     74		hw_ip.dram_size = prop->dram_size - dram_kmd_size;
     75
     76	if (hw_ip.dram_size > PAGE_SIZE)
     77		hw_ip.dram_enabled = 1;
     78	hw_ip.dram_page_size = prop->dram_page_size;
     79	hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
     80	hw_ip.num_of_events = prop->num_of_events;
     81
     82	memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
     83		min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
     84
     85	memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
     86		min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
     87
     88	hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
     89	hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
     90
     91	hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
     92	hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
     93	hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
     94	hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
     95
     96	hw_ip.first_available_interrupt_id = prop->first_available_user_msix_interrupt;
     97	hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
     98	hw_ip.server_type = prop->server_type;
     99
    100	return copy_to_user(out, &hw_ip,
    101		min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
    102}
    103
    104static int hw_events_info(struct hl_device *hdev, bool aggregate,
    105			struct hl_info_args *args)
    106{
    107	u32 size, max_size = args->return_size;
    108	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    109	void *arr;
    110
    111	if ((!max_size) || (!out))
    112		return -EINVAL;
    113
    114	arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
    115
    116	return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
    117}
    118
    119static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    120{
    121	u32 max_size = args->return_size;
    122	u64 events_mask;
    123	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    124
    125	if ((max_size < sizeof(u64)) || (!out))
    126		return -EINVAL;
    127
    128	mutex_lock(&hpriv->notifier_event.lock);
    129	events_mask = hpriv->notifier_event.events_mask;
    130	hpriv->notifier_event.events_mask = 0;
    131	mutex_unlock(&hpriv->notifier_event.lock);
    132
    133	return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
    134}
    135
    136static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    137{
    138	struct hl_device *hdev = hpriv->hdev;
    139	struct hl_info_dram_usage dram_usage = {0};
    140	u32 max_size = args->return_size;
    141	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    142	struct asic_fixed_properties *prop = &hdev->asic_prop;
    143	u64 dram_kmd_size;
    144
    145	if ((!max_size) || (!out))
    146		return -EINVAL;
    147
    148	dram_kmd_size = (prop->dram_user_base_address -
    149				prop->dram_base_address);
    150	dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
    151					atomic64_read(&hdev->dram_used_mem);
    152	if (hpriv->ctx)
    153		dram_usage.ctx_dram_mem =
    154			atomic64_read(&hpriv->ctx->dram_phys_mem);
    155
    156	return copy_to_user(out, &dram_usage,
    157		min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
    158}
    159
    160static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
    161{
    162	struct hl_info_hw_idle hw_idle = {0};
    163	u32 max_size = args->return_size;
    164	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    165
    166	if ((!max_size) || (!out))
    167		return -EINVAL;
    168
    169	hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
    170					hw_idle.busy_engines_mask_ext,
    171					HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
    172	hw_idle.busy_engines_mask =
    173			lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
    174
    175	return copy_to_user(out, &hw_idle,
    176		min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
    177}
    178
    179static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
    180{
    181	struct hl_debug_params *params;
    182	void *input = NULL, *output = NULL;
    183	int rc;
    184
    185	params = kzalloc(sizeof(*params), GFP_KERNEL);
    186	if (!params)
    187		return -ENOMEM;
    188
    189	params->reg_idx = args->reg_idx;
    190	params->enable = args->enable;
    191	params->op = args->op;
    192
    193	if (args->input_ptr && args->input_size) {
    194		input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
    195		if (!input) {
    196			rc = -ENOMEM;
    197			goto out;
    198		}
    199
    200		if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
    201					args->input_size)) {
    202			rc = -EFAULT;
    203			dev_err(hdev->dev, "failed to copy input debug data\n");
    204			goto out;
    205		}
    206
    207		params->input = input;
    208	}
    209
    210	if (args->output_ptr && args->output_size) {
    211		output = kzalloc(args->output_size, GFP_KERNEL);
    212		if (!output) {
    213			rc = -ENOMEM;
    214			goto out;
    215		}
    216
    217		params->output = output;
    218		params->output_size = args->output_size;
    219	}
    220
    221	rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
    222	if (rc) {
    223		dev_err(hdev->dev,
    224			"debug coresight operation failed %d\n", rc);
    225		goto out;
    226	}
    227
    228	if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
    229					output, args->output_size)) {
    230		dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
    231		rc = -EFAULT;
    232		goto out;
    233	}
    234
    235
    236out:
    237	kfree(params);
    238	kfree(output);
    239	kfree(input);
    240
    241	return rc;
    242}
    243
    244static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
    245{
    246	struct hl_info_device_utilization device_util = {0};
    247	u32 max_size = args->return_size;
    248	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    249	int rc;
    250
    251	if ((!max_size) || (!out))
    252		return -EINVAL;
    253
    254	rc = hl_device_utilization(hdev, &device_util.utilization);
    255	if (rc)
    256		return -EINVAL;
    257
    258	return copy_to_user(out, &device_util,
    259		min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
    260}
    261
    262static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
    263{
    264	struct hl_info_clk_rate clk_rate = {0};
    265	u32 max_size = args->return_size;
    266	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    267	int rc;
    268
    269	if ((!max_size) || (!out))
    270		return -EINVAL;
    271
    272	rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
    273	if (rc)
    274		return rc;
    275
    276	return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
    277										? -EFAULT : 0;
    278}
    279
    280static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
    281{
    282	struct hl_info_reset_count reset_count = {0};
    283	u32 max_size = args->return_size;
    284	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    285
    286	if ((!max_size) || (!out))
    287		return -EINVAL;
    288
    289	reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
    290	reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
    291
    292	return copy_to_user(out, &reset_count,
    293		min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
    294}
    295
    296static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
    297{
    298	struct hl_info_time_sync time_sync = {0};
    299	u32 max_size = args->return_size;
    300	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    301
    302	if ((!max_size) || (!out))
    303		return -EINVAL;
    304
    305	time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
    306	time_sync.host_time = ktime_get_raw_ns();
    307
    308	return copy_to_user(out, &time_sync,
    309		min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
    310}
    311
    312static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    313{
    314	struct hl_device *hdev = hpriv->hdev;
    315	struct hl_info_pci_counters pci_counters = {0};
    316	u32 max_size = args->return_size;
    317	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    318	int rc;
    319
    320	if ((!max_size) || (!out))
    321		return -EINVAL;
    322
    323	rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
    324	if (rc)
    325		return rc;
    326
    327	return copy_to_user(out, &pci_counters,
    328		min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
    329}
    330
    331static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    332{
    333	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    334	struct hl_device *hdev = hpriv->hdev;
    335	struct hl_info_clk_throttle clk_throttle = {0};
    336	ktime_t end_time, zero_time = ktime_set(0, 0);
    337	u32 max_size = args->return_size;
    338	int i;
    339
    340	if ((!max_size) || (!out))
    341		return -EINVAL;
    342
    343	mutex_lock(&hdev->clk_throttling.lock);
    344
    345	clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
    346
    347	for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
    348		if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
    349			continue;
    350
    351		clk_throttle.clk_throttling_timestamp_us[i] =
    352			ktime_to_us(hdev->clk_throttling.timestamp[i].start);
    353
    354		if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
    355			end_time = hdev->clk_throttling.timestamp[i].end;
    356		else
    357			end_time = ktime_get();
    358
    359		clk_throttle.clk_throttling_duration_ns[i] =
    360			ktime_to_ns(ktime_sub(end_time,
    361				hdev->clk_throttling.timestamp[i].start));
    362
    363	}
    364	mutex_unlock(&hdev->clk_throttling.lock);
    365
    366	return copy_to_user(out, &clk_throttle,
    367		min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
    368}
    369
    370static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    371{
    372	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    373	struct hl_info_cs_counters cs_counters = {0};
    374	struct hl_device *hdev = hpriv->hdev;
    375	struct hl_cs_counters_atomic *cntr;
    376	u32 max_size = args->return_size;
    377
    378	cntr = &hdev->aggregated_cs_counters;
    379
    380	if ((!max_size) || (!out))
    381		return -EINVAL;
    382
    383	cs_counters.total_out_of_mem_drop_cnt =
    384			atomic64_read(&cntr->out_of_mem_drop_cnt);
    385	cs_counters.total_parsing_drop_cnt =
    386			atomic64_read(&cntr->parsing_drop_cnt);
    387	cs_counters.total_queue_full_drop_cnt =
    388			atomic64_read(&cntr->queue_full_drop_cnt);
    389	cs_counters.total_device_in_reset_drop_cnt =
    390			atomic64_read(&cntr->device_in_reset_drop_cnt);
    391	cs_counters.total_max_cs_in_flight_drop_cnt =
    392			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
    393	cs_counters.total_validation_drop_cnt =
    394			atomic64_read(&cntr->validation_drop_cnt);
    395
    396	if (hpriv->ctx) {
    397		cs_counters.ctx_out_of_mem_drop_cnt =
    398				atomic64_read(
    399				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
    400		cs_counters.ctx_parsing_drop_cnt =
    401				atomic64_read(
    402				&hpriv->ctx->cs_counters.parsing_drop_cnt);
    403		cs_counters.ctx_queue_full_drop_cnt =
    404				atomic64_read(
    405				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
    406		cs_counters.ctx_device_in_reset_drop_cnt =
    407				atomic64_read(
    408			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
    409		cs_counters.ctx_max_cs_in_flight_drop_cnt =
    410				atomic64_read(
    411			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
    412		cs_counters.ctx_validation_drop_cnt =
    413				atomic64_read(
    414				&hpriv->ctx->cs_counters.validation_drop_cnt);
    415	}
    416
    417	return copy_to_user(out, &cs_counters,
    418		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
    419}
    420
    421static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    422{
    423	struct hl_device *hdev = hpriv->hdev;
    424	struct asic_fixed_properties *prop = &hdev->asic_prop;
    425	struct hl_info_sync_manager sm_info = {0};
    426	u32 max_size = args->return_size;
    427	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    428
    429	if ((!max_size) || (!out))
    430		return -EINVAL;
    431
    432	if (args->dcore_id >= HL_MAX_DCORES)
    433		return -EINVAL;
    434
    435	sm_info.first_available_sync_object =
    436			prop->first_available_user_sob[args->dcore_id];
    437	sm_info.first_available_monitor =
    438			prop->first_available_user_mon[args->dcore_id];
    439	sm_info.first_available_cq =
    440			prop->first_available_cq[args->dcore_id];
    441
    442	return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
    443			sizeof(sm_info))) ? -EFAULT : 0;
    444}
    445
    446static int total_energy_consumption_info(struct hl_fpriv *hpriv,
    447			struct hl_info_args *args)
    448{
    449	struct hl_device *hdev = hpriv->hdev;
    450	struct hl_info_energy total_energy = {0};
    451	u32 max_size = args->return_size;
    452	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    453	int rc;
    454
    455	if ((!max_size) || (!out))
    456		return -EINVAL;
    457
    458	rc = hl_fw_cpucp_total_energy_get(hdev,
    459			&total_energy.total_energy_consumption);
    460	if (rc)
    461		return rc;
    462
    463	return copy_to_user(out, &total_energy,
    464		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
    465}
    466
    467static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    468{
    469	struct hl_device *hdev = hpriv->hdev;
    470	struct hl_pll_frequency_info freq_info = { {0} };
    471	u32 max_size = args->return_size;
    472	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    473	int rc;
    474
    475	if ((!max_size) || (!out))
    476		return -EINVAL;
    477
    478	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
    479	if (rc)
    480		return rc;
    481
    482	return copy_to_user(out, &freq_info,
    483		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
    484}
    485
    486static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    487{
    488	struct hl_device *hdev = hpriv->hdev;
    489	u32 max_size = args->return_size;
    490	struct hl_power_info power_info = {0};
    491	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    492	int rc;
    493
    494	if ((!max_size) || (!out))
    495		return -EINVAL;
    496
    497	rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
    498	if (rc)
    499		return rc;
    500
    501	return copy_to_user(out, &power_info,
    502		min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
    503}
    504
    505static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    506{
    507	struct hl_device *hdev = hpriv->hdev;
    508	u32 max_size = args->return_size;
    509	struct hl_open_stats_info open_stats_info = {0};
    510	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    511
    512	if ((!max_size) || (!out))
    513		return -EINVAL;
    514
    515	open_stats_info.last_open_period_ms = jiffies64_to_msecs(
    516		hdev->last_open_session_duration_jif);
    517	open_stats_info.open_counter = hdev->open_counter;
    518	open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
    519	open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
    520
    521	return copy_to_user(out, &open_stats_info,
    522		min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
    523}
    524
    525static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    526{
    527	struct hl_device *hdev = hpriv->hdev;
    528	u32 max_size = args->return_size;
    529	u32 pend_rows_num = 0;
    530	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    531	int rc;
    532
    533	if ((!max_size) || (!out))
    534		return -EINVAL;
    535
    536	rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
    537	if (rc)
    538		return rc;
    539
    540	return copy_to_user(out, &pend_rows_num,
    541			min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
    542}
    543
    544static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    545{
    546	struct hl_device *hdev = hpriv->hdev;
    547	u32 max_size = args->return_size;
    548	struct cpucp_hbm_row_info info = {0};
    549	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    550	int rc;
    551
    552	if ((!max_size) || (!out))
    553		return -EINVAL;
    554
    555	rc = hl_fw_dram_replaced_row_get(hdev, &info);
    556	if (rc)
    557		return rc;
    558
    559	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
    560}
    561
    562static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    563{
    564	struct hl_info_last_err_open_dev_time info = {0};
    565	struct hl_device *hdev = hpriv->hdev;
    566	u32 max_size = args->return_size;
    567	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    568
    569	if ((!max_size) || (!out))
    570		return -EINVAL;
    571
    572	info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
    573
    574	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
    575}
    576
    577static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    578{
    579	struct hl_info_cs_timeout_event info = {0};
    580	struct hl_device *hdev = hpriv->hdev;
    581	u32 max_size = args->return_size;
    582	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    583
    584	if ((!max_size) || (!out))
    585		return -EINVAL;
    586
    587	info.seq = hdev->last_error.cs_timeout.seq;
    588	info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
    589
    590	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
    591}
    592
    593static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    594{
    595	struct hl_device *hdev = hpriv->hdev;
    596	u32 max_size = args->return_size;
    597	struct hl_info_razwi_event info = {0};
    598	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    599
    600	if ((!max_size) || (!out))
    601		return -EINVAL;
    602
    603	info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
    604	info.addr = hdev->last_error.razwi.addr;
    605	info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
    606	info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
    607	info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
    608	info.error_type = hdev->last_error.razwi.type;
    609
    610	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
    611}
    612
    613static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
    614{
    615	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
    616	struct hl_info_dev_memalloc_page_sizes info = {0};
    617	struct hl_device *hdev = hpriv->hdev;
    618	u32 max_size = args->return_size;
    619
    620	if ((!max_size) || (!out))
    621		return -EINVAL;
    622
    623	/*
    624	 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
    625	 * pages (unlike some of the ASICs before supporting multiple page sizes).
    626	 * For this reason for all ASICs that not support multiple page size the function will
    627	 * return an empty bitmask indicating that multiple page sizes is not supported.
    628	 */
    629	hdev->asic_funcs->get_valid_dram_page_orders(&info);
    630
    631	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
    632}
    633
    634static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
    635{
    636	int rc;
    637
    638	/* check if there is already a registered on that process */
    639	mutex_lock(&hpriv->notifier_event.lock);
    640	if (hpriv->notifier_event.eventfd) {
    641		mutex_unlock(&hpriv->notifier_event.lock);
    642		return -EINVAL;
    643	}
    644
    645	hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
    646	if (IS_ERR(hpriv->notifier_event.eventfd)) {
    647		rc = PTR_ERR(hpriv->notifier_event.eventfd);
    648		hpriv->notifier_event.eventfd = NULL;
    649		mutex_unlock(&hpriv->notifier_event.lock);
    650		return rc;
    651	}
    652
    653	mutex_unlock(&hpriv->notifier_event.lock);
    654	return 0;
    655}
    656
    657static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
    658{
    659	mutex_lock(&hpriv->notifier_event.lock);
    660	if (!hpriv->notifier_event.eventfd) {
    661		mutex_unlock(&hpriv->notifier_event.lock);
    662		return -EINVAL;
    663	}
    664
    665	eventfd_ctx_put(hpriv->notifier_event.eventfd);
    666	hpriv->notifier_event.eventfd = NULL;
    667	mutex_unlock(&hpriv->notifier_event.lock);
    668	return 0;
    669}
    670
    671static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
    672				struct device *dev)
    673{
    674	enum hl_device_status status;
    675	struct hl_info_args *args = data;
    676	struct hl_device *hdev = hpriv->hdev;
    677
    678	int rc;
    679
    680	/*
    681	 * Information is returned for the following opcodes even if the device
    682	 * is disabled or in reset.
    683	 */
    684	switch (args->op) {
    685	case HL_INFO_HW_IP_INFO:
    686		return hw_ip_info(hdev, args);
    687
    688	case HL_INFO_DEVICE_STATUS:
    689		return device_status_info(hdev, args);
    690
    691	case HL_INFO_RESET_COUNT:
    692		return get_reset_count(hdev, args);
    693
    694	case HL_INFO_HW_EVENTS:
    695		return hw_events_info(hdev, false, args);
    696
    697	case HL_INFO_HW_EVENTS_AGGREGATE:
    698		return hw_events_info(hdev, true, args);
    699
    700	case HL_INFO_CS_COUNTERS:
    701		return cs_counters_info(hpriv, args);
    702
    703	case HL_INFO_CLK_THROTTLE_REASON:
    704		return clk_throttle_info(hpriv, args);
    705
    706	case HL_INFO_SYNC_MANAGER:
    707		return sync_manager_info(hpriv, args);
    708
    709	case HL_INFO_OPEN_STATS:
    710		return open_stats_info(hpriv, args);
    711
    712	case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
    713		return last_err_open_dev_info(hpriv, args);
    714
    715	case HL_INFO_CS_TIMEOUT_EVENT:
    716		return cs_timeout_info(hpriv, args);
    717
    718	case HL_INFO_RAZWI_EVENT:
    719		return razwi_info(hpriv, args);
    720
    721	case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
    722		return dev_mem_alloc_page_sizes_info(hpriv, args);
    723
    724	case HL_INFO_GET_EVENTS:
    725		return events_info(hpriv, args);
    726
    727	default:
    728		break;
    729	}
    730
    731	if (!hl_device_operational(hdev, &status)) {
    732		dev_warn_ratelimited(dev,
    733			"Device is %s. Can't execute INFO IOCTL\n",
    734			hdev->status[status]);
    735		return -EBUSY;
    736	}
    737
    738	switch (args->op) {
    739	case HL_INFO_DRAM_USAGE:
    740		rc = dram_usage_info(hpriv, args);
    741		break;
    742
    743	case HL_INFO_HW_IDLE:
    744		rc = hw_idle(hdev, args);
    745		break;
    746
    747	case HL_INFO_DEVICE_UTILIZATION:
    748		rc = device_utilization(hdev, args);
    749		break;
    750
    751	case HL_INFO_CLK_RATE:
    752		rc = get_clk_rate(hdev, args);
    753		break;
    754
    755	case HL_INFO_TIME_SYNC:
    756		return time_sync_info(hdev, args);
    757
    758	case HL_INFO_PCI_COUNTERS:
    759		return pci_counters_info(hpriv, args);
    760
    761	case HL_INFO_TOTAL_ENERGY:
    762		return total_energy_consumption_info(hpriv, args);
    763
    764	case HL_INFO_PLL_FREQUENCY:
    765		return pll_frequency_info(hpriv, args);
    766
    767	case HL_INFO_POWER:
    768		return power_info(hpriv, args);
    769
    770
    771	case HL_INFO_DRAM_REPLACED_ROWS:
    772		return dram_replaced_rows_info(hpriv, args);
    773
    774	case HL_INFO_DRAM_PENDING_ROWS:
    775		return dram_pending_rows_info(hpriv, args);
    776
    777	case HL_INFO_REGISTER_EVENTFD:
    778		return eventfd_register(hpriv, args);
    779
    780	case HL_INFO_UNREGISTER_EVENTFD:
    781		return eventfd_unregister(hpriv, args);
    782
    783	default:
    784		dev_err(dev, "Invalid request %d\n", args->op);
    785		rc = -EINVAL;
    786		break;
    787	}
    788
    789	return rc;
    790}
    791
    792static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
    793{
    794	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
    795}
    796
    797static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
    798{
    799	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
    800}
    801
    802static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
    803{
    804	struct hl_debug_args *args = data;
    805	struct hl_device *hdev = hpriv->hdev;
    806	enum hl_device_status status;
    807
    808	int rc = 0;
    809
    810	if (!hl_device_operational(hdev, &status)) {
    811		dev_warn_ratelimited(hdev->dev,
    812			"Device is %s. Can't execute DEBUG IOCTL\n",
    813			hdev->status[status]);
    814		return -EBUSY;
    815	}
    816
    817	switch (args->op) {
    818	case HL_DEBUG_OP_ETR:
    819	case HL_DEBUG_OP_ETF:
    820	case HL_DEBUG_OP_STM:
    821	case HL_DEBUG_OP_FUNNEL:
    822	case HL_DEBUG_OP_BMON:
    823	case HL_DEBUG_OP_SPMU:
    824	case HL_DEBUG_OP_TIMESTAMP:
    825		if (!hdev->in_debug) {
    826			dev_err_ratelimited(hdev->dev,
    827				"Rejecting debug configuration request because device not in debug mode\n");
    828			return -EFAULT;
    829		}
    830		args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
    831		rc = debug_coresight(hdev, hpriv->ctx, args);
    832		break;
    833
    834	case HL_DEBUG_OP_SET_MODE:
    835		rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
    836		break;
    837
    838	default:
    839		dev_err(hdev->dev, "Invalid request %d\n", args->op);
    840		rc = -EINVAL;
    841		break;
    842	}
    843
    844	return rc;
    845}
    846
    847#define HL_IOCTL_DEF(ioctl, _func) \
    848	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
    849
    850static const struct hl_ioctl_desc hl_ioctls[] = {
    851	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
    852	HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
    853	HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
    854	HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
    855	HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
    856	HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
    857};
    858
    859static const struct hl_ioctl_desc hl_ioctls_control[] = {
    860	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
    861};
    862
    863static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
    864		const struct hl_ioctl_desc *ioctl, struct device *dev)
    865{
    866	struct hl_fpriv *hpriv = filep->private_data;
    867	unsigned int nr = _IOC_NR(cmd);
    868	char stack_kdata[128] = {0};
    869	char *kdata = NULL;
    870	unsigned int usize, asize;
    871	hl_ioctl_t *func;
    872	u32 hl_size;
    873	int retcode;
    874
    875	/* Do not trust userspace, use our own definition */
    876	func = ioctl->func;
    877
    878	if (unlikely(!func)) {
    879		dev_dbg(dev, "no function\n");
    880		retcode = -ENOTTY;
    881		goto out_err;
    882	}
    883
    884	hl_size = _IOC_SIZE(ioctl->cmd);
    885	usize = asize = _IOC_SIZE(cmd);
    886	if (hl_size > asize)
    887		asize = hl_size;
    888
    889	cmd = ioctl->cmd;
    890
    891	if (cmd & (IOC_IN | IOC_OUT)) {
    892		if (asize <= sizeof(stack_kdata)) {
    893			kdata = stack_kdata;
    894		} else {
    895			kdata = kzalloc(asize, GFP_KERNEL);
    896			if (!kdata) {
    897				retcode = -ENOMEM;
    898				goto out_err;
    899			}
    900		}
    901	}
    902
    903	if (cmd & IOC_IN) {
    904		if (copy_from_user(kdata, (void __user *)arg, usize)) {
    905			retcode = -EFAULT;
    906			goto out_err;
    907		}
    908	} else if (cmd & IOC_OUT) {
    909		memset(kdata, 0, usize);
    910	}
    911
    912	retcode = func(hpriv, kdata);
    913
    914	if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
    915		retcode = -EFAULT;
    916
    917out_err:
    918	if (retcode)
    919		dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
    920			  task_pid_nr(current), cmd, nr);
    921
    922	if (kdata != stack_kdata)
    923		kfree(kdata);
    924
    925	return retcode;
    926}
    927
    928long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
    929{
    930	struct hl_fpriv *hpriv = filep->private_data;
    931	struct hl_device *hdev = hpriv->hdev;
    932	const struct hl_ioctl_desc *ioctl = NULL;
    933	unsigned int nr = _IOC_NR(cmd);
    934
    935	if (!hdev) {
    936		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
    937		return -ENODEV;
    938	}
    939
    940	if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
    941		ioctl = &hl_ioctls[nr];
    942	} else {
    943		dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
    944			task_pid_nr(current), nr);
    945		return -ENOTTY;
    946	}
    947
    948	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
    949}
    950
    951long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
    952{
    953	struct hl_fpriv *hpriv = filep->private_data;
    954	struct hl_device *hdev = hpriv->hdev;
    955	const struct hl_ioctl_desc *ioctl = NULL;
    956	unsigned int nr = _IOC_NR(cmd);
    957
    958	if (!hdev) {
    959		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
    960		return -ENODEV;
    961	}
    962
    963	if (nr == _IOC_NR(HL_IOCTL_INFO)) {
    964		ioctl = &hl_ioctls_control[nr];
    965	} else {
    966		dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
    967			task_pid_nr(current), nr);
    968		return -ENOTTY;
    969	}
    970
    971	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
    972}