cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ctxt-info.c (7160B)


      1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
      2/*
      3 * Copyright (C) 2017 Intel Deutschland GmbH
      4 * Copyright (C) 2018-2021 Intel Corporation
      5 */
      6#include "iwl-trans.h"
      7#include "iwl-fh.h"
      8#include "iwl-context-info.h"
      9#include "internal.h"
     10#include "iwl-prph.h"
     11
     12static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
     13						    size_t size,
     14						    dma_addr_t *phys,
     15						    int depth)
     16{
     17	void *result;
     18
     19	if (WARN(depth > 2,
     20		 "failed to allocate DMA memory not crossing 2^32 boundary"))
     21		return NULL;
     22
     23	result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
     24
     25	if (!result)
     26		return NULL;
     27
     28	if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
     29		void *old = result;
     30		dma_addr_t oldphys = *phys;
     31
     32		result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
     33								phys,
     34								depth + 1);
     35		dma_free_coherent(trans->dev, size, old, oldphys);
     36	}
     37
     38	return result;
     39}
     40
     41static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
     42						   size_t size,
     43						   dma_addr_t *phys)
     44{
     45	return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
     46}
     47
     48int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
     49				 const void *data, u32 len,
     50				 struct iwl_dram_data *dram)
     51{
     52	dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
     53							    &dram->physical);
     54	if (!dram->block)
     55		return -ENOMEM;
     56
     57	dram->size = len;
     58	memcpy(dram->block, data, len);
     59
     60	return 0;
     61}
     62
     63void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
     64{
     65	struct iwl_self_init_dram *dram = &trans->init_dram;
     66	int i;
     67
     68	if (!dram->paging) {
     69		WARN_ON(dram->paging_cnt);
     70		return;
     71	}
     72
     73	/* free paging*/
     74	for (i = 0; i < dram->paging_cnt; i++)
     75		dma_free_coherent(trans->dev, dram->paging[i].size,
     76				  dram->paging[i].block,
     77				  dram->paging[i].physical);
     78
     79	kfree(dram->paging);
     80	dram->paging_cnt = 0;
     81	dram->paging = NULL;
     82}
     83
     84int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
     85			 const struct fw_img *fw,
     86			 struct iwl_context_info_dram *ctxt_dram)
     87{
     88	struct iwl_self_init_dram *dram = &trans->init_dram;
     89	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
     90
     91	if (WARN(dram->paging,
     92		 "paging shouldn't already be initialized (%d pages)\n",
     93		 dram->paging_cnt))
     94		iwl_pcie_ctxt_info_free_paging(trans);
     95
     96	lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
     97	/* add 1 due to separator */
     98	umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
     99	/* add 2 due to separators */
    100	paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
    101
    102	dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
    103	if (!dram->fw)
    104		return -ENOMEM;
    105	dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
    106	if (!dram->paging)
    107		return -ENOMEM;
    108
    109	/* initialize lmac sections */
    110	for (i = 0; i < lmac_cnt; i++) {
    111		ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
    112						   fw->sec[i].len,
    113						   &dram->fw[dram->fw_cnt]);
    114		if (ret)
    115			return ret;
    116		ctxt_dram->lmac_img[i] =
    117			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
    118		dram->fw_cnt++;
    119	}
    120
    121	/* initialize umac sections */
    122	for (i = 0; i < umac_cnt; i++) {
    123		/* access FW with +1 to make up for lmac separator */
    124		ret = iwl_pcie_ctxt_info_alloc_dma(trans,
    125						   fw->sec[dram->fw_cnt + 1].data,
    126						   fw->sec[dram->fw_cnt + 1].len,
    127						   &dram->fw[dram->fw_cnt]);
    128		if (ret)
    129			return ret;
    130		ctxt_dram->umac_img[i] =
    131			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
    132		dram->fw_cnt++;
    133	}
    134
    135	/*
    136	 * Initialize paging.
    137	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
    138	 * stored separately.
    139	 * This is since the timing of its release is different -
    140	 * while fw memory can be released on alive, the paging memory can be
    141	 * freed only when the device goes down.
    142	 * Given that, the logic here in accessing the fw image is a bit
    143	 * different - fw_cnt isn't changing so loop counter is added to it.
    144	 */
    145	for (i = 0; i < paging_cnt; i++) {
    146		/* access FW with +2 to make up for lmac & umac separators */
    147		int fw_idx = dram->fw_cnt + i + 2;
    148
    149		ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
    150						   fw->sec[fw_idx].len,
    151						   &dram->paging[i]);
    152		if (ret)
    153			return ret;
    154
    155		ctxt_dram->virtual_img[i] =
    156			cpu_to_le64(dram->paging[i].physical);
    157		dram->paging_cnt++;
    158	}
    159
    160	return 0;
    161}
    162
    163int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
    164			    const struct fw_img *fw)
    165{
    166	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
    167	struct iwl_context_info *ctxt_info;
    168	struct iwl_context_info_rbd_cfg *rx_cfg;
    169	u32 control_flags = 0, rb_size;
    170	dma_addr_t phys;
    171	int ret;
    172
    173	ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
    174							  sizeof(*ctxt_info),
    175							  &phys);
    176	if (!ctxt_info)
    177		return -ENOMEM;
    178
    179	trans_pcie->ctxt_info_dma_addr = phys;
    180
    181	ctxt_info->version.version = 0;
    182	ctxt_info->version.mac_id =
    183		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
    184	/* size is in DWs */
    185	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
    186
    187	switch (trans_pcie->rx_buf_size) {
    188	case IWL_AMSDU_2K:
    189		rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
    190		break;
    191	case IWL_AMSDU_4K:
    192		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
    193		break;
    194	case IWL_AMSDU_8K:
    195		rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
    196		break;
    197	case IWL_AMSDU_12K:
    198		rb_size = IWL_CTXT_INFO_RB_SIZE_16K;
    199		break;
    200	default:
    201		WARN_ON(1);
    202		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
    203	}
    204
    205	WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
    206	control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
    207	control_flags |=
    208		u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
    209				IWL_CTXT_INFO_RB_CB_SIZE);
    210	control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
    211	ctxt_info->control.control_flags = cpu_to_le32(control_flags);
    212
    213	/* initialize RX default queue */
    214	rx_cfg = &ctxt_info->rbd_cfg;
    215	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
    216	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
    217	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
    218
    219	/* initialize TX command queue */
    220	ctxt_info->hcmd_cfg.cmd_queue_addr =
    221		cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
    222	ctxt_info->hcmd_cfg.cmd_queue_size =
    223		TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
    224
    225	/* allocate ucode sections in dram and set addresses */
    226	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
    227	if (ret) {
    228		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
    229				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
    230		return ret;
    231	}
    232
    233	trans_pcie->ctxt_info = ctxt_info;
    234
    235	iwl_enable_fw_load_int_ctx_info(trans);
    236
    237	/* Configure debug, if exists */
    238	if (iwl_pcie_dbg_on(trans))
    239		iwl_pcie_apply_destination(trans);
    240
    241	/* kick FW self load */
    242	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
    243
    244	/* Context info will be released upon alive or failure to get one */
    245
    246	return 0;
    247}
    248
    249void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
    250{
    251	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
    252
    253	if (!trans_pcie->ctxt_info)
    254		return;
    255
    256	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
    257			  trans_pcie->ctxt_info,
    258			  trans_pcie->ctxt_info_dma_addr);
    259	trans_pcie->ctxt_info_dma_addr = 0;
    260	trans_pcie->ctxt_info = NULL;
    261
    262	iwl_pcie_ctxt_info_free_fw_img(trans);
    263}