cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_v1.c (21736B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3/*
      4 * Copyright 2016-2019 HabanaLabs, Ltd.
      5 * All Rights Reserved.
      6 */
      7
      8#include "../habanalabs.h"
      9#include "../../include/hw_ip/mmu/mmu_general.h"
     10
     11#include <linux/slab.h>
     12
     13#define MMU_V1_MAX_HOPS	(MMU_HOP4 + 1)
     14
     15static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
     16
     17static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
     18{
     19	struct pgt_info *pgt_info = NULL;
     20
     21	hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
     22				(unsigned long) hop_addr)
     23		if (hop_addr == pgt_info->shadow_addr)
     24			break;
     25
     26	return pgt_info;
     27}
     28
     29static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
     30{
     31	struct hl_device *hdev = ctx->hdev;
     32
     33	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
     34			hdev->asic_prop.mmu_hop_table_size);
     35	hash_del(&pgt_info->node);
     36	kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
     37	kfree(pgt_info);
     38}
     39
     40static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
     41{
     42	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
     43
     44	_free_hop(ctx, pgt_info);
     45}
     46
     47static u64 alloc_hop(struct hl_ctx *ctx)
     48{
     49	struct hl_device *hdev = ctx->hdev;
     50	struct asic_fixed_properties *prop = &hdev->asic_prop;
     51	struct pgt_info *pgt_info;
     52	u64 phys_addr, shadow_addr;
     53
     54	pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
     55	if (!pgt_info)
     56		return ULLONG_MAX;
     57
     58	phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
     59					prop->mmu_hop_table_size);
     60	if (!phys_addr) {
     61		dev_err(hdev->dev, "failed to allocate page\n");
     62		goto pool_add_err;
     63	}
     64
     65	shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
     66						GFP_KERNEL);
     67	if (!shadow_addr)
     68		goto shadow_err;
     69
     70	pgt_info->phys_addr = phys_addr;
     71	pgt_info->shadow_addr = shadow_addr;
     72	pgt_info->ctx = ctx;
     73	pgt_info->num_of_ptes = 0;
     74	hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
     75
     76	return shadow_addr;
     77
     78shadow_err:
     79	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
     80			prop->mmu_hop_table_size);
     81pool_add_err:
     82	kfree(pgt_info);
     83
     84	return ULLONG_MAX;
     85}
     86
     87static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
     88{
     89	return ctx->hdev->asic_prop.mmu_pgt_addr +
     90			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
     91}
     92
     93static inline u64 get_hop0_addr(struct hl_ctx *ctx)
     94{
     95	return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
     96			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
     97}
     98
     99static void flush(struct hl_ctx *ctx)
    100{
    101	/* flush all writes from all cores to reach PCI */
    102	mb();
    103	ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
    104}
    105
    106/* transform the value to physical address when writing to H/W */
    107static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
    108{
    109	/*
    110	 * The value to write is actually the address of the next shadow hop +
    111	 * flags at the 12 LSBs.
    112	 * Hence in order to get the value to write to the physical PTE, we
    113	 * clear the 12 LSBs and translate the shadow hop to its associated
    114	 * physical hop, and add back the original 12 LSBs.
    115	 */
    116	u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
    117				(val & FLAGS_MASK);
    118
    119	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
    120					get_phys_addr(ctx, shadow_pte_addr),
    121					phys_val);
    122
    123	*(u64 *) (uintptr_t) shadow_pte_addr = val;
    124}
    125
    126/* do not transform the value to physical address when writing to H/W */
    127static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
    128					u64 val)
    129{
    130	ctx->hdev->asic_funcs->write_pte(ctx->hdev,
    131					get_phys_addr(ctx, shadow_pte_addr),
    132					val);
    133	*(u64 *) (uintptr_t) shadow_pte_addr = val;
    134}
    135
    136/* clear the last and present bits */
    137static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
    138{
    139	/* no need to transform the value to physical address */
    140	write_final_pte(ctx, pte_addr, 0);
    141}
    142
    143static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
    144{
    145	get_pgt_info(ctx, hop_addr)->num_of_ptes++;
    146}
    147
    148/*
    149 * put_pte - decrement the num of ptes and free the hop if possible
    150 *
    151 * @ctx: pointer to the context structure
    152 * @hop_addr: addr of the hop
    153 *
    154 * This function returns the number of ptes left on this hop. If the number is
    155 * 0, it means the pte was freed.
    156 */
    157static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
    158{
    159	struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
    160	int num_of_ptes_left;
    161
    162	pgt_info->num_of_ptes--;
    163
    164	/*
    165	 * Need to save the number of ptes left because free_hop might free
    166	 * the pgt_info
    167	 */
    168	num_of_ptes_left = pgt_info->num_of_ptes;
    169	if (!num_of_ptes_left)
    170		_free_hop(ctx, pgt_info);
    171
    172	return num_of_ptes_left;
    173}
    174
    175static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
    176					u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx)
    177{
    178	u64 mask, shift;
    179
    180	mask = mmu_prop->hop_masks[hop_idx];
    181	shift = mmu_prop->hop_shifts[hop_idx];
    182	return hop_addr_arr[hop_idx] +
    183			ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
    184}
    185
    186static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
    187						bool *is_new_hop)
    188{
    189	u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
    190
    191	if (hop_addr == ULLONG_MAX) {
    192		hop_addr = alloc_hop(ctx);
    193		*is_new_hop = (hop_addr != ULLONG_MAX);
    194	}
    195
    196	return hop_addr;
    197}
    198
    199/* translates shadow address inside hop to a physical address */
    200static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
    201{
    202	u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
    203	u64 shadow_hop_addr = shadow_addr & ~page_mask;
    204	u64 pte_offset = shadow_addr & page_mask;
    205	u64 phys_hop_addr;
    206
    207	if (shadow_hop_addr != get_hop0_addr(ctx))
    208		phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
    209	else
    210		phys_hop_addr = get_phys_hop0_addr(ctx);
    211
    212	return phys_hop_addr + pte_offset;
    213}
    214
    215static int dram_default_mapping_init(struct hl_ctx *ctx)
    216{
    217	struct hl_device *hdev = ctx->hdev;
    218	struct asic_fixed_properties *prop = &hdev->asic_prop;
    219	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
    220		hop2_pte_addr, hop3_pte_addr, pte_val;
    221	int rc, i, j, hop3_allocated = 0;
    222
    223	if ((!prop->dram_supports_virtual_memory) ||
    224			(!hdev->dram_default_page_mapping) ||
    225			(ctx->asid == HL_KERNEL_ASID_ID))
    226		return 0;
    227
    228	num_of_hop3 = prop->dram_size_for_default_page_mapping;
    229	do_div(num_of_hop3, prop->dram_page_size);
    230	do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
    231
    232	/* add hop1 and hop2 */
    233	total_hops = num_of_hop3 + 2;
    234
    235	ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops,  GFP_KERNEL);
    236	if (!ctx->dram_default_hops)
    237		return -ENOMEM;
    238
    239	hop0_addr = get_hop0_addr(ctx);
    240
    241	hop1_addr = alloc_hop(ctx);
    242	if (hop1_addr == ULLONG_MAX) {
    243		dev_err(hdev->dev, "failed to alloc hop 1\n");
    244		rc = -ENOMEM;
    245		goto hop1_err;
    246	}
    247
    248	ctx->dram_default_hops[total_hops - 1] = hop1_addr;
    249
    250	hop2_addr = alloc_hop(ctx);
    251	if (hop2_addr == ULLONG_MAX) {
    252		dev_err(hdev->dev, "failed to alloc hop 2\n");
    253		rc = -ENOMEM;
    254		goto hop2_err;
    255	}
    256
    257	ctx->dram_default_hops[total_hops - 2] = hop2_addr;
    258
    259	for (i = 0 ; i < num_of_hop3 ; i++) {
    260		ctx->dram_default_hops[i] = alloc_hop(ctx);
    261		if (ctx->dram_default_hops[i] == ULLONG_MAX) {
    262			dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
    263			rc = -ENOMEM;
    264			goto hop3_err;
    265		}
    266		hop3_allocated++;
    267	}
    268
    269	/* need only pte 0 in hops 0 and 1 */
    270	pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
    271	write_pte(ctx, hop0_addr, pte_val);
    272
    273	pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
    274	write_pte(ctx, hop1_addr, pte_val);
    275	get_pte(ctx, hop1_addr);
    276
    277	hop2_pte_addr = hop2_addr;
    278	for (i = 0 ; i < num_of_hop3 ; i++) {
    279		pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
    280				PAGE_PRESENT_MASK;
    281		write_pte(ctx, hop2_pte_addr, pte_val);
    282		get_pte(ctx, hop2_addr);
    283		hop2_pte_addr += HL_PTE_SIZE;
    284	}
    285
    286	pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
    287			LAST_MASK | PAGE_PRESENT_MASK;
    288
    289	for (i = 0 ; i < num_of_hop3 ; i++) {
    290		hop3_pte_addr = ctx->dram_default_hops[i];
    291		for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
    292			write_final_pte(ctx, hop3_pte_addr, pte_val);
    293			get_pte(ctx, ctx->dram_default_hops[i]);
    294			hop3_pte_addr += HL_PTE_SIZE;
    295		}
    296	}
    297
    298	flush(ctx);
    299
    300	return 0;
    301
    302hop3_err:
    303	for (i = 0 ; i < hop3_allocated ; i++)
    304		free_hop(ctx, ctx->dram_default_hops[i]);
    305
    306	free_hop(ctx, hop2_addr);
    307hop2_err:
    308	free_hop(ctx, hop1_addr);
    309hop1_err:
    310	kfree(ctx->dram_default_hops);
    311
    312	return rc;
    313}
    314
    315static void dram_default_mapping_fini(struct hl_ctx *ctx)
    316{
    317	struct hl_device *hdev = ctx->hdev;
    318	struct asic_fixed_properties *prop = &hdev->asic_prop;
    319	u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
    320		hop2_pte_addr, hop3_pte_addr;
    321	int i, j;
    322
    323	if ((!prop->dram_supports_virtual_memory) ||
    324			(!hdev->dram_default_page_mapping) ||
    325			(ctx->asid == HL_KERNEL_ASID_ID))
    326		return;
    327
    328	num_of_hop3 = prop->dram_size_for_default_page_mapping;
    329	do_div(num_of_hop3, prop->dram_page_size);
    330	do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
    331
    332	hop0_addr = get_hop0_addr(ctx);
    333	/* add hop1 and hop2 */
    334	total_hops = num_of_hop3 + 2;
    335	hop1_addr = ctx->dram_default_hops[total_hops - 1];
    336	hop2_addr = ctx->dram_default_hops[total_hops - 2];
    337
    338	for (i = 0 ; i < num_of_hop3 ; i++) {
    339		hop3_pte_addr = ctx->dram_default_hops[i];
    340		for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
    341			clear_pte(ctx, hop3_pte_addr);
    342			put_pte(ctx, ctx->dram_default_hops[i]);
    343			hop3_pte_addr += HL_PTE_SIZE;
    344		}
    345	}
    346
    347	hop2_pte_addr = hop2_addr;
    348	hop2_pte_addr = hop2_addr;
    349	for (i = 0 ; i < num_of_hop3 ; i++) {
    350		clear_pte(ctx, hop2_pte_addr);
    351		put_pte(ctx, hop2_addr);
    352		hop2_pte_addr += HL_PTE_SIZE;
    353	}
    354
    355	clear_pte(ctx, hop1_addr);
    356	put_pte(ctx, hop1_addr);
    357	clear_pte(ctx, hop0_addr);
    358
    359	kfree(ctx->dram_default_hops);
    360
    361	flush(ctx);
    362}
    363
    364/**
    365 * hl_mmu_v1_init() - initialize the MMU module.
    366 * @hdev: habanalabs device structure.
    367 *
    368 * This function does the following:
    369 * - Create a pool of pages for pgt_infos.
    370 * - Create a shadow table for pgt
    371 *
    372 * Return: 0 for success, non-zero for failure.
    373 */
    374static int hl_mmu_v1_init(struct hl_device *hdev)
    375{
    376	struct asic_fixed_properties *prop = &hdev->asic_prop;
    377	int rc;
    378
    379	hdev->mmu_priv.dr.mmu_pgt_pool =
    380			gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
    381
    382	if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
    383		dev_err(hdev->dev, "Failed to create page gen pool\n");
    384		return -ENOMEM;
    385	}
    386
    387	rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
    388			prop->mmu_hop0_tables_total_size,
    389			prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
    390			-1);
    391	if (rc) {
    392		dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
    393		goto err_pool_add;
    394	}
    395
    396	hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
    397						prop->mmu_hop_table_size,
    398						GFP_KERNEL | __GFP_ZERO);
    399	if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
    400		rc = -ENOMEM;
    401		goto err_pool_add;
    402	}
    403
    404	/* MMU H/W init will be done in device hw_init() */
    405
    406	return 0;
    407
    408err_pool_add:
    409	gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
    410
    411	return rc;
    412}
    413
    414/**
    415 * hl_mmu_fini() - release the MMU module.
    416 * @hdev: habanalabs device structure.
    417 *
    418 * This function does the following:
    419 * - Disable MMU in H/W.
    420 * - Free the pgt_infos pool.
    421 *
    422 * All contexts should be freed before calling this function.
    423 */
    424static void hl_mmu_v1_fini(struct hl_device *hdev)
    425{
    426	/* MMU H/W fini was already done in device hw_fini() */
    427
    428	if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
    429		kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
    430		gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
    431
    432		/* Make sure that if we arrive here again without init was
    433		 * called we won't cause kernel panic. This can happen for
    434		 * example if we fail during hard reset code at certain points
    435		 */
    436		hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
    437	}
    438}
    439
    440/**
    441 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
    442 * @ctx: pointer to the context structure to initialize.
    443 *
    444 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
    445 * page tables hops related to this context.
    446 * Return: 0 on success, non-zero otherwise.
    447 */
    448static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
    449{
    450	hash_init(ctx->mmu_shadow_hash);
    451	return dram_default_mapping_init(ctx);
    452}
    453
    454/*
    455 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
    456 *
    457 * @ctx: pointer to the context structure
    458 *
    459 * This function does the following:
    460 * - Free any pgts which were not freed yet
    461 * - Free the mutex
    462 * - Free DRAM default page mapping hops
    463 */
    464static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
    465{
    466	struct hl_device *hdev = ctx->hdev;
    467	struct pgt_info *pgt_info;
    468	struct hlist_node *tmp;
    469	int i;
    470
    471	dram_default_mapping_fini(ctx);
    472
    473	if (!hash_empty(ctx->mmu_shadow_hash))
    474		dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
    475			ctx->asid);
    476
    477	hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
    478		dev_err_ratelimited(hdev->dev,
    479			"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
    480			pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
    481		_free_hop(ctx, pgt_info);
    482	}
    483}
    484
    485static int hl_mmu_v1_unmap(struct hl_ctx *ctx,
    486				u64 virt_addr, bool is_dram_addr)
    487{
    488	u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
    489	struct hl_device *hdev = ctx->hdev;
    490	struct asic_fixed_properties *prop = &hdev->asic_prop;
    491	struct hl_mmu_properties *mmu_prop;
    492	bool is_huge, clear_hop3 = true;
    493	int hop_idx;
    494
    495	/* shifts and masks are the same in PMMU and HPMMU, use one of them */
    496	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
    497
    498	for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) {
    499		if (hop_idx == MMU_HOP0) {
    500			hop_addr[hop_idx] = get_hop0_addr(ctx);
    501		} else {
    502			hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
    503			if (hop_addr[hop_idx] == ULLONG_MAX)
    504				goto not_mapped;
    505		}
    506
    507		hop_pte_addr[hop_idx] =
    508				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
    509
    510		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
    511	}
    512
    513	is_huge = curr_pte & mmu_prop->last_mask;
    514
    515	if (is_dram_addr && !is_huge) {
    516		dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
    517		return -EFAULT;
    518	}
    519
    520	if (!is_huge) {
    521		hop_idx = MMU_HOP4;
    522		hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
    523		if (hop_addr[hop_idx] == ULLONG_MAX)
    524			goto not_mapped;
    525
    526		hop_pte_addr[hop_idx] =
    527				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
    528		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
    529		clear_hop3 = false;
    530	}
    531
    532	if (hdev->dram_default_page_mapping && is_dram_addr) {
    533		u64 default_pte = (prop->mmu_dram_default_page_addr &
    534				HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
    535					PAGE_PRESENT_MASK;
    536		if (curr_pte == default_pte) {
    537			dev_err(hdev->dev,
    538				"DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
    539					virt_addr);
    540			goto not_mapped;
    541		}
    542
    543		if (!(curr_pte & PAGE_PRESENT_MASK)) {
    544			dev_err(hdev->dev,
    545				"DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
    546					virt_addr);
    547			goto not_mapped;
    548		}
    549
    550		hop_idx = MMU_HOP3;
    551		write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte);
    552		put_pte(ctx, hop_addr[hop_idx]);
    553	} else {
    554		if (!(curr_pte & PAGE_PRESENT_MASK))
    555			goto not_mapped;
    556
    557		if (hop_addr[MMU_HOP4])
    558			clear_pte(ctx, hop_pte_addr[MMU_HOP4]);
    559		else
    560			clear_pte(ctx, hop_pte_addr[MMU_HOP3]);
    561
    562		if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4]))
    563			clear_hop3 = true;
    564
    565		if (!clear_hop3)
    566			goto mapped;
    567
    568		for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) {
    569			clear_pte(ctx, hop_pte_addr[hop_idx]);
    570
    571			if (hop_idx == MMU_HOP0)
    572				break;
    573
    574			if (put_pte(ctx, hop_addr[hop_idx]))
    575				goto mapped;
    576		}
    577	}
    578
    579mapped:
    580	return 0;
    581
    582not_mapped:
    583	dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
    584		virt_addr);
    585
    586	return -EINVAL;
    587}
    588
    589static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
    590			u32 page_size, bool is_dram_addr)
    591{
    592	u64 hop_addr[MMU_V1_MAX_HOPS] = {0}, hop_pte_addr[MMU_V1_MAX_HOPS] = {0}, curr_pte = 0;
    593	struct hl_device *hdev = ctx->hdev;
    594	struct asic_fixed_properties *prop = &hdev->asic_prop;
    595	struct hl_mmu_properties *mmu_prop;
    596	bool is_huge, hop_new[MMU_V1_MAX_HOPS] = {false};
    597	int num_hops, hop_idx, prev_hop, rc = -ENOMEM;
    598
    599	/*
    600	 * This mapping function can map a page or a huge page. For huge page
    601	 * there are only 3 hops rather than 4. Currently the DRAM allocation
    602	 * uses huge pages only but user memory could have been allocated with
    603	 * one of the two page sizes. Since this is a common code for all the
    604	 * three cases, we need this hugs page check.
    605	 */
    606	if (is_dram_addr) {
    607		mmu_prop = &prop->dmmu;
    608		is_huge = true;
    609	} else if (page_size == prop->pmmu_huge.page_size) {
    610		mmu_prop = &prop->pmmu_huge;
    611		is_huge = true;
    612	} else {
    613		mmu_prop = &prop->pmmu;
    614		is_huge = false;
    615	}
    616
    617	num_hops = is_huge ? (MMU_V1_MAX_HOPS - 1) : MMU_V1_MAX_HOPS;
    618
    619	for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) {
    620		if (hop_idx == MMU_HOP0) {
    621			hop_addr[hop_idx] = get_hop0_addr(ctx);
    622		} else {
    623			hop_addr[hop_idx] =
    624					get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]);
    625			if (hop_addr[hop_idx] == ULLONG_MAX)
    626				goto err;
    627		}
    628
    629		hop_pte_addr[hop_idx] =
    630				get_hop_pte_addr(ctx, mmu_prop, hop_addr, virt_addr, hop_idx);
    631		curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[hop_idx];
    632	}
    633
    634	if (hdev->dram_default_page_mapping && is_dram_addr) {
    635		u64 default_pte = (prop->mmu_dram_default_page_addr &
    636					HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
    637						PAGE_PRESENT_MASK;
    638
    639		if (curr_pte != default_pte) {
    640			dev_err(hdev->dev,
    641				"DRAM: mapping already exists for virt_addr 0x%llx\n",
    642					virt_addr);
    643			rc = -EINVAL;
    644			goto err;
    645		}
    646
    647		for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
    648			if (hop_new[hop_idx]) {
    649				dev_err(hdev->dev, "DRAM mapping should not allocate more hops\n");
    650				rc = -EFAULT;
    651				goto err;
    652			}
    653		}
    654	} else if (curr_pte & PAGE_PRESENT_MASK) {
    655		dev_err(hdev->dev,
    656			"mapping already exists for virt_addr 0x%llx\n",
    657				virt_addr);
    658
    659		for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++)
    660			dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", hop_idx,
    661					*(u64 *) (uintptr_t) hop_pte_addr[hop_idx],
    662					hop_pte_addr[hop_idx]);
    663
    664		rc = -EINVAL;
    665		goto err;
    666	}
    667
    668	curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
    669			| PAGE_PRESENT_MASK;
    670
    671	write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte);
    672
    673	for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) {
    674		prev_hop = hop_idx - 1;
    675
    676		if (hop_new[hop_idx]) {
    677			curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
    678			write_pte(ctx, hop_pte_addr[prev_hop], curr_pte);
    679			if (hop_idx != MMU_HOP1)
    680				get_pte(ctx, hop_addr[prev_hop]);
    681		}
    682	}
    683
    684	get_pte(ctx, hop_addr[num_hops - 1]);
    685
    686	return 0;
    687
    688err:
    689	for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) {
    690		if (hop_new[hop_idx])
    691			free_hop(ctx, hop_addr[hop_idx]);
    692	}
    693
    694	return rc;
    695}
    696
    697/*
    698 * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
    699 *
    700 * @ctx: pointer to the context structure
    701 *
    702 */
    703static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
    704{
    705
    706}
    707
    708/*
    709 * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
    710 *
    711 * @ctx: pointer to the context structure
    712 *
    713 */
    714static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
    715{
    716
    717}
    718
    719static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
    720				struct hl_mmu_hop_info *hops)
    721{
    722	struct hl_device *hdev = ctx->hdev;
    723	struct asic_fixed_properties *prop = &hdev->asic_prop;
    724	struct hl_mmu_properties *mmu_prop;
    725	bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
    726	int i, used_hops;
    727
    728	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
    729						prop->dmmu.start_addr,
    730						prop->dmmu.end_addr);
    731	is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
    732						prop->pmmu.start_addr,
    733						prop->pmmu.end_addr);
    734	is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
    735						prop->pmmu_huge.page_size,
    736						prop->pmmu_huge.start_addr,
    737						prop->pmmu_huge.end_addr);
    738	if (is_dram_addr) {
    739		mmu_prop = &prop->dmmu;
    740		is_huge = true;
    741	} else if (is_pmmu_addr) {
    742		mmu_prop = &prop->pmmu;
    743		is_huge = false;
    744	} else if (is_pmmu_h_addr) {
    745		mmu_prop = &prop->pmmu_huge;
    746		is_huge = true;
    747	} else {
    748		return -EINVAL;
    749	}
    750
    751	used_hops = mmu_prop->num_hops;
    752
    753	/* huge pages use lesser hops */
    754	if (is_huge)
    755		used_hops--;
    756
    757	hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
    758	hops->hop_info[0].hop_pte_addr =
    759			hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
    760					hops->hop_info[0].hop_addr, virt_addr);
    761	hops->hop_info[0].hop_pte_val =
    762			hdev->asic_funcs->read_pte(hdev,
    763						hops->hop_info[0].hop_pte_addr);
    764
    765	for (i = 1 ; i < used_hops ; i++) {
    766		hops->hop_info[i].hop_addr =
    767			hl_mmu_get_next_hop_addr(ctx,
    768					hops->hop_info[i - 1].hop_pte_val);
    769		if (hops->hop_info[i].hop_addr == ULLONG_MAX)
    770			return -EFAULT;
    771
    772		hops->hop_info[i].hop_pte_addr =
    773				hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
    774						hops->hop_info[i].hop_addr,
    775						virt_addr);
    776		hops->hop_info[i].hop_pte_val =
    777				hdev->asic_funcs->read_pte(hdev,
    778						hops->hop_info[i].hop_pte_addr);
    779
    780		if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
    781			return -EFAULT;
    782
    783		if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
    784			break;
    785	}
    786
    787	/* if passed over all hops then no last hop was found */
    788	if (i == mmu_prop->num_hops)
    789		return -EFAULT;
    790
    791	if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
    792		return -EFAULT;
    793
    794	hops->used_hops = i + 1;
    795
    796	return 0;
    797}
    798
    799/*
    800 * hl_mmu_v1_prepare - prepare mmu  for working with mmu v1
    801 *
    802 * @hdev: pointer to the device structure
    803 */
    804void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
    805{
    806	mmu->init = hl_mmu_v1_init;
    807	mmu->fini = hl_mmu_v1_fini;
    808	mmu->ctx_init = hl_mmu_v1_ctx_init;
    809	mmu->ctx_fini = hl_mmu_v1_ctx_fini;
    810	mmu->map = hl_mmu_v1_map;
    811	mmu->unmap = hl_mmu_v1_unmap;
    812	mmu->flush = flush;
    813	mmu->swap_out = hl_mmu_v1_swap_out;
    814	mmu->swap_in = hl_mmu_v1_swap_in;
    815	mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
    816}