cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

coresight-etm4x-core.c (62944B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
      4 */
      5
      6#include <linux/bitops.h>
      7#include <linux/kernel.h>
      8#include <linux/moduleparam.h>
      9#include <linux/init.h>
     10#include <linux/types.h>
     11#include <linux/device.h>
     12#include <linux/io.h>
     13#include <linux/err.h>
     14#include <linux/fs.h>
     15#include <linux/slab.h>
     16#include <linux/delay.h>
     17#include <linux/smp.h>
     18#include <linux/sysfs.h>
     19#include <linux/stat.h>
     20#include <linux/clk.h>
     21#include <linux/cpu.h>
     22#include <linux/cpu_pm.h>
     23#include <linux/coresight.h>
     24#include <linux/coresight-pmu.h>
     25#include <linux/pm_wakeup.h>
     26#include <linux/amba/bus.h>
     27#include <linux/seq_file.h>
     28#include <linux/uaccess.h>
     29#include <linux/perf_event.h>
     30#include <linux/platform_device.h>
     31#include <linux/pm_runtime.h>
     32#include <linux/property.h>
     33
     34#include <asm/barrier.h>
     35#include <asm/sections.h>
     36#include <asm/sysreg.h>
     37#include <asm/local.h>
     38#include <asm/virt.h>
     39
     40#include "coresight-etm4x.h"
     41#include "coresight-etm-perf.h"
     42#include "coresight-etm4x-cfg.h"
     43#include "coresight-self-hosted-trace.h"
     44#include "coresight-syscfg.h"
     45
     46static int boot_enable;
     47module_param(boot_enable, int, 0444);
     48MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
     49
     50#define PARAM_PM_SAVE_FIRMWARE	  0 /* save self-hosted state as per firmware */
     51#define PARAM_PM_SAVE_NEVER	  1 /* never save any state */
     52#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
     53
     54static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
     55module_param(pm_save_enable, int, 0444);
     56MODULE_PARM_DESC(pm_save_enable,
     57	"Save/restore state on power down: 1 = never, 2 = self-hosted");
     58
     59static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
     60static void etm4_set_default_config(struct etmv4_config *config);
     61static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
     62				  struct perf_event *event);
     63static u64 etm4_get_access_type(struct etmv4_config *config);
     64
     65static enum cpuhp_state hp_online;
     66
     67struct etm4_init_arg {
     68	unsigned int		pid;
     69	struct etmv4_drvdata	*drvdata;
     70	struct csdev_access	*csa;
     71};
     72
     73/*
     74 * Check if TRCSSPCICRn(i) is implemented for a given instance.
     75 *
     76 * TRCSSPCICRn is implemented only if :
     77 *	TRCSSPCICR<n> is present only if all of the following are true:
     78 *		TRCIDR4.NUMSSCC > n.
     79 *		TRCIDR4.NUMPC > 0b0000 .
     80 *		TRCSSCSR<n>.PC == 0b1
     81 */
     82static inline bool etm4x_sspcicrn_present(struct etmv4_drvdata *drvdata, int n)
     83{
     84	return (n < drvdata->nr_ss_cmp) &&
     85	       drvdata->nr_pe &&
     86	       (drvdata->config.ss_status[n] & TRCSSCSRn_PC);
     87}
     88
     89u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
     90{
     91	u64 res = 0;
     92
     93	switch (offset) {
     94	ETM4x_READ_SYSREG_CASES(res)
     95	default :
     96		pr_warn_ratelimited("etm4x: trying to read unsupported register @%x\n",
     97			 offset);
     98	}
     99
    100	if (!_relaxed)
    101		__iormb(res);	/* Imitate the !relaxed I/O helpers */
    102
    103	return res;
    104}
    105
    106void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
    107{
    108	if (!_relaxed)
    109		__iowmb();	/* Imitate the !relaxed I/O helpers */
    110	if (!_64bit)
    111		val &= GENMASK(31, 0);
    112
    113	switch (offset) {
    114	ETM4x_WRITE_SYSREG_CASES(val)
    115	default :
    116		pr_warn_ratelimited("etm4x: trying to write to unsupported register @%x\n",
    117			offset);
    118	}
    119}
    120
    121static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
    122{
    123	u64 res = 0;
    124
    125	switch (offset) {
    126	ETE_READ_CASES(res)
    127	default :
    128		pr_warn_ratelimited("ete: trying to read unsupported register @%x\n",
    129				    offset);
    130	}
    131
    132	if (!_relaxed)
    133		__iormb(res);	/* Imitate the !relaxed I/O helpers */
    134
    135	return res;
    136}
    137
    138static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
    139{
    140	if (!_relaxed)
    141		__iowmb();	/* Imitate the !relaxed I/O helpers */
    142	if (!_64bit)
    143		val &= GENMASK(31, 0);
    144
    145	switch (offset) {
    146	ETE_WRITE_CASES(val)
    147	default :
    148		pr_warn_ratelimited("ete: trying to write to unsupported register @%x\n",
    149				    offset);
    150	}
    151}
    152
    153static void etm_detect_os_lock(struct etmv4_drvdata *drvdata,
    154			       struct csdev_access *csa)
    155{
    156	u32 oslsr = etm4x_relaxed_read32(csa, TRCOSLSR);
    157
    158	drvdata->os_lock_model = ETM_OSLSR_OSLM(oslsr);
    159}
    160
    161static void etm_write_os_lock(struct etmv4_drvdata *drvdata,
    162			      struct csdev_access *csa, u32 val)
    163{
    164	val = !!val;
    165
    166	switch (drvdata->os_lock_model) {
    167	case ETM_OSLOCK_PRESENT:
    168		etm4x_relaxed_write32(csa, val, TRCOSLAR);
    169		break;
    170	case ETM_OSLOCK_PE:
    171		write_sysreg_s(val, SYS_OSLAR_EL1);
    172		break;
    173	default:
    174		pr_warn_once("CPU%d: Unsupported Trace OSLock model: %x\n",
    175			     smp_processor_id(), drvdata->os_lock_model);
    176		fallthrough;
    177	case ETM_OSLOCK_NI:
    178		return;
    179	}
    180	isb();
    181}
    182
    183static inline void etm4_os_unlock_csa(struct etmv4_drvdata *drvdata,
    184				      struct csdev_access *csa)
    185{
    186	WARN_ON(drvdata->cpu != smp_processor_id());
    187
    188	/* Writing 0 to OS Lock unlocks the trace unit registers */
    189	etm_write_os_lock(drvdata, csa, 0x0);
    190	drvdata->os_unlock = true;
    191}
    192
    193static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
    194{
    195	if (!WARN_ON(!drvdata->csdev))
    196		etm4_os_unlock_csa(drvdata, &drvdata->csdev->access);
    197}
    198
    199static void etm4_os_lock(struct etmv4_drvdata *drvdata)
    200{
    201	if (WARN_ON(!drvdata->csdev))
    202		return;
    203	/* Writing 0x1 to OS Lock locks the trace registers */
    204	etm_write_os_lock(drvdata, &drvdata->csdev->access, 0x1);
    205	drvdata->os_unlock = false;
    206}
    207
    208static void etm4_cs_lock(struct etmv4_drvdata *drvdata,
    209			 struct csdev_access *csa)
    210{
    211	/* Software Lock is only accessible via memory mapped interface */
    212	if (csa->io_mem)
    213		CS_LOCK(csa->base);
    214}
    215
    216static void etm4_cs_unlock(struct etmv4_drvdata *drvdata,
    217			   struct csdev_access *csa)
    218{
    219	if (csa->io_mem)
    220		CS_UNLOCK(csa->base);
    221}
    222
    223static int etm4_cpu_id(struct coresight_device *csdev)
    224{
    225	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    226
    227	return drvdata->cpu;
    228}
    229
    230static int etm4_trace_id(struct coresight_device *csdev)
    231{
    232	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    233
    234	return drvdata->trcid;
    235}
    236
    237struct etm4_enable_arg {
    238	struct etmv4_drvdata *drvdata;
    239	int rc;
    240};
    241
    242/*
    243 * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs.
    244 * When the CPU supports FEAT_TRF, we could move the ETM to a trace
    245 * prohibited state by filtering the Exception levels via TRFCR_EL1.
    246 */
    247static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
    248{
    249	/* If the CPU doesn't support FEAT_TRF, nothing to do */
    250	if (!drvdata->trfcr)
    251		return;
    252	cpu_prohibit_trace();
    253}
    254
    255/*
    256 * etm4x_allow_trace - Allow CPU tracing in the respective ELs,
    257 * as configured by the drvdata->config.mode for the current
    258 * session. Even though we have TRCVICTLR bits to filter the
    259 * trace in the ELs, it doesn't prevent the ETM from generating
    260 * a packet (e.g, TraceInfo) that might contain the addresses from
    261 * the excluded levels. Thus we use the additional controls provided
    262 * via the Trace Filtering controls (FEAT_TRF) to make sure no trace
    263 * is generated for the excluded ELs.
    264 */
    265static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
    266{
    267	u64 trfcr = drvdata->trfcr;
    268
    269	/* If the CPU doesn't support FEAT_TRF, nothing to do */
    270	if (!trfcr)
    271		return;
    272
    273	if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
    274		trfcr &= ~TRFCR_ELx_ExTRE;
    275	if (drvdata->config.mode & ETM_MODE_EXCL_USER)
    276		trfcr &= ~TRFCR_ELx_E0TRE;
    277
    278	write_trfcr(trfcr);
    279}
    280
    281#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
    282
    283#define HISI_HIP08_AMBA_ID		0x000b6d01
    284#define ETM4_AMBA_MASK			0xfffff
    285#define HISI_HIP08_CORE_COMMIT_MASK	0x3000
    286#define HISI_HIP08_CORE_COMMIT_SHIFT	12
    287#define HISI_HIP08_CORE_COMMIT_FULL	0b00
    288#define HISI_HIP08_CORE_COMMIT_LVL_1	0b01
    289#define HISI_HIP08_CORE_COMMIT_REG	sys_reg(3, 1, 15, 2, 5)
    290
    291struct etm4_arch_features {
    292	void (*arch_callback)(bool enable);
    293};
    294
    295static bool etm4_hisi_match_pid(unsigned int id)
    296{
    297	return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID;
    298}
    299
    300static void etm4_hisi_config_core_commit(bool enable)
    301{
    302	u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 :
    303		    HISI_HIP08_CORE_COMMIT_FULL;
    304	u64 val;
    305
    306	/*
    307	 * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together
    308	 * to set core-commit, 2'b00 means cpu is at full speed, 2'b01,
    309	 * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1
    310	 * speed(minimun value). So bit 12 and 13 should be cleared together.
    311	 */
    312	val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG);
    313	val &= ~HISI_HIP08_CORE_COMMIT_MASK;
    314	val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT;
    315	write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG);
    316}
    317
    318static struct etm4_arch_features etm4_features[] = {
    319	[ETM4_IMPDEF_HISI_CORE_COMMIT] = {
    320		.arch_callback = etm4_hisi_config_core_commit,
    321	},
    322	{},
    323};
    324
    325static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
    326{
    327	struct etm4_arch_features *ftr;
    328	int bit;
    329
    330	for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
    331		ftr = &etm4_features[bit];
    332
    333		if (ftr->arch_callback)
    334			ftr->arch_callback(true);
    335	}
    336}
    337
    338static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
    339{
    340	struct etm4_arch_features *ftr;
    341	int bit;
    342
    343	for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
    344		ftr = &etm4_features[bit];
    345
    346		if (ftr->arch_callback)
    347			ftr->arch_callback(false);
    348	}
    349}
    350
    351static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
    352				      unsigned int id)
    353{
    354	if (etm4_hisi_match_pid(id))
    355		set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
    356}
    357#else
    358static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
    359{
    360}
    361
    362static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
    363{
    364}
    365
    366static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
    367				     unsigned int id)
    368{
    369}
    370#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
    371
    372static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
    373{
    374	int i, rc;
    375	struct etmv4_config *config = &drvdata->config;
    376	struct coresight_device *csdev = drvdata->csdev;
    377	struct device *etm_dev = &csdev->dev;
    378	struct csdev_access *csa = &csdev->access;
    379
    380
    381	etm4_cs_unlock(drvdata, csa);
    382	etm4_enable_arch_specific(drvdata);
    383
    384	etm4_os_unlock(drvdata);
    385
    386	rc = coresight_claim_device_unlocked(csdev);
    387	if (rc)
    388		goto done;
    389
    390	/* Disable the trace unit before programming trace registers */
    391	etm4x_relaxed_write32(csa, 0, TRCPRGCTLR);
    392
    393	/*
    394	 * If we use system instructions, we need to synchronize the
    395	 * write to the TRCPRGCTLR, before accessing the TRCSTATR.
    396	 * See ARM IHI0064F, section
    397	 * "4.3.7 Synchronization of register updates"
    398	 */
    399	if (!csa->io_mem)
    400		isb();
    401
    402	/* wait for TRCSTATR.IDLE to go up */
    403	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
    404		dev_err(etm_dev,
    405			"timeout while waiting for Idle Trace Status\n");
    406	if (drvdata->nr_pe)
    407		etm4x_relaxed_write32(csa, config->pe_sel, TRCPROCSELR);
    408	etm4x_relaxed_write32(csa, config->cfg, TRCCONFIGR);
    409	/* nothing specific implemented */
    410	etm4x_relaxed_write32(csa, 0x0, TRCAUXCTLR);
    411	etm4x_relaxed_write32(csa, config->eventctrl0, TRCEVENTCTL0R);
    412	etm4x_relaxed_write32(csa, config->eventctrl1, TRCEVENTCTL1R);
    413	if (drvdata->stallctl)
    414		etm4x_relaxed_write32(csa, config->stall_ctrl, TRCSTALLCTLR);
    415	etm4x_relaxed_write32(csa, config->ts_ctrl, TRCTSCTLR);
    416	etm4x_relaxed_write32(csa, config->syncfreq, TRCSYNCPR);
    417	etm4x_relaxed_write32(csa, config->ccctlr, TRCCCCTLR);
    418	etm4x_relaxed_write32(csa, config->bb_ctrl, TRCBBCTLR);
    419	etm4x_relaxed_write32(csa, drvdata->trcid, TRCTRACEIDR);
    420	etm4x_relaxed_write32(csa, config->vinst_ctrl, TRCVICTLR);
    421	etm4x_relaxed_write32(csa, config->viiectlr, TRCVIIECTLR);
    422	etm4x_relaxed_write32(csa, config->vissctlr, TRCVISSCTLR);
    423	if (drvdata->nr_pe_cmp)
    424		etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
    425	for (i = 0; i < drvdata->nrseqstate - 1; i++)
    426		etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
    427	etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
    428	etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
    429	etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
    430	for (i = 0; i < drvdata->nr_cntr; i++) {
    431		etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
    432		etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
    433		etm4x_relaxed_write32(csa, config->cntr_val[i], TRCCNTVRn(i));
    434	}
    435
    436	/*
    437	 * Resource selector pair 0 is always implemented and reserved.  As
    438	 * such start at 2.
    439	 */
    440	for (i = 2; i < drvdata->nr_resource * 2; i++)
    441		etm4x_relaxed_write32(csa, config->res_ctrl[i], TRCRSCTLRn(i));
    442
    443	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
    444		/* always clear status bit on restart if using single-shot */
    445		if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
    446			config->ss_status[i] &= ~TRCSSCSRn_STATUS;
    447		etm4x_relaxed_write32(csa, config->ss_ctrl[i], TRCSSCCRn(i));
    448		etm4x_relaxed_write32(csa, config->ss_status[i], TRCSSCSRn(i));
    449		if (etm4x_sspcicrn_present(drvdata, i))
    450			etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
    451	}
    452	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
    453		etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
    454		etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
    455	}
    456	for (i = 0; i < drvdata->numcidc; i++)
    457		etm4x_relaxed_write64(csa, config->ctxid_pid[i], TRCCIDCVRn(i));
    458	etm4x_relaxed_write32(csa, config->ctxid_mask0, TRCCIDCCTLR0);
    459	if (drvdata->numcidc > 4)
    460		etm4x_relaxed_write32(csa, config->ctxid_mask1, TRCCIDCCTLR1);
    461
    462	for (i = 0; i < drvdata->numvmidc; i++)
    463		etm4x_relaxed_write64(csa, config->vmid_val[i], TRCVMIDCVRn(i));
    464	etm4x_relaxed_write32(csa, config->vmid_mask0, TRCVMIDCCTLR0);
    465	if (drvdata->numvmidc > 4)
    466		etm4x_relaxed_write32(csa, config->vmid_mask1, TRCVMIDCCTLR1);
    467
    468	if (!drvdata->skip_power_up) {
    469		u32 trcpdcr = etm4x_relaxed_read32(csa, TRCPDCR);
    470
    471		/*
    472		 * Request to keep the trace unit powered and also
    473		 * emulation of powerdown
    474		 */
    475		etm4x_relaxed_write32(csa, trcpdcr | TRCPDCR_PU, TRCPDCR);
    476	}
    477
    478	/*
    479	 * ETE mandates that the TRCRSR is written to before
    480	 * enabling it.
    481	 */
    482	if (etm4x_is_ete(drvdata))
    483		etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
    484
    485	etm4x_allow_trace(drvdata);
    486	/* Enable the trace unit */
    487	etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
    488
    489	/* Synchronize the register updates for sysreg access */
    490	if (!csa->io_mem)
    491		isb();
    492
    493	/* wait for TRCSTATR.IDLE to go back down to '0' */
    494	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
    495		dev_err(etm_dev,
    496			"timeout while waiting for Idle Trace Status\n");
    497
    498	/*
    499	 * As recommended by section 4.3.7 ("Synchronization when using the
    500	 * memory-mapped interface") of ARM IHI 0064D
    501	 */
    502	dsb(sy);
    503	isb();
    504
    505done:
    506	etm4_cs_lock(drvdata, csa);
    507
    508	dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
    509		drvdata->cpu, rc);
    510	return rc;
    511}
    512
    513static void etm4_enable_hw_smp_call(void *info)
    514{
    515	struct etm4_enable_arg *arg = info;
    516
    517	if (WARN_ON(!arg))
    518		return;
    519	arg->rc = etm4_enable_hw(arg->drvdata);
    520}
    521
    522/*
    523 * The goal of function etm4_config_timestamp_event() is to configure a
    524 * counter that will tell the tracer to emit a timestamp packet when it
    525 * reaches zero.  This is done in order to get a more fine grained idea
    526 * of when instructions are executed so that they can be correlated
    527 * with execution on other CPUs.
    528 *
    529 * To do this the counter itself is configured to self reload and
    530 * TRCRSCTLR1 (always true) used to get the counter to decrement.  From
    531 * there a resource selector is configured with the counter and the
    532 * timestamp control register to use the resource selector to trigger the
    533 * event that will insert a timestamp packet in the stream.
    534 */
    535static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
    536{
    537	int ctridx, ret = -EINVAL;
    538	int counter, rselector;
    539	u32 val = 0;
    540	struct etmv4_config *config = &drvdata->config;
    541
    542	/* No point in trying if we don't have at least one counter */
    543	if (!drvdata->nr_cntr)
    544		goto out;
    545
    546	/* Find a counter that hasn't been initialised */
    547	for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
    548		if (config->cntr_val[ctridx] == 0)
    549			break;
    550
    551	/* All the counters have been configured already, bail out */
    552	if (ctridx == drvdata->nr_cntr) {
    553		pr_debug("%s: no available counter found\n", __func__);
    554		ret = -ENOSPC;
    555		goto out;
    556	}
    557
    558	/*
    559	 * Searching for an available resource selector to use, starting at
    560	 * '2' since every implementation has at least 2 resource selector.
    561	 * ETMIDR4 gives the number of resource selector _pairs_,
    562	 * hence multiply by 2.
    563	 */
    564	for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
    565		if (!config->res_ctrl[rselector])
    566			break;
    567
    568	if (rselector == drvdata->nr_resource * 2) {
    569		pr_debug("%s: no available resource selector found\n",
    570			 __func__);
    571		ret = -ENOSPC;
    572		goto out;
    573	}
    574
    575	/* Remember what counter we used */
    576	counter = 1 << ctridx;
    577
    578	/*
    579	 * Initialise original and reload counter value to the smallest
    580	 * possible value in order to get as much precision as we can.
    581	 */
    582	config->cntr_val[ctridx] = 1;
    583	config->cntrldvr[ctridx] = 1;
    584
    585	/* Set the trace counter control register */
    586	val =  0x1 << 16	|  /* Bit 16, reload counter automatically */
    587	       0x0 << 7		|  /* Select single resource selector */
    588	       0x1;		   /* Resource selector 1, i.e always true */
    589
    590	config->cntr_ctrl[ctridx] = val;
    591
    592	val = 0x2 << 16		| /* Group 0b0010 - Counter and sequencers */
    593	      counter << 0;	  /* Counter to use */
    594
    595	config->res_ctrl[rselector] = val;
    596
    597	val = 0x0 << 7		| /* Select single resource selector */
    598	      rselector;	  /* Resource selector */
    599
    600	config->ts_ctrl = val;
    601
    602	ret = 0;
    603out:
    604	return ret;
    605}
    606
    607static int etm4_parse_event_config(struct coresight_device *csdev,
    608				   struct perf_event *event)
    609{
    610	int ret = 0;
    611	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    612	struct etmv4_config *config = &drvdata->config;
    613	struct perf_event_attr *attr = &event->attr;
    614	unsigned long cfg_hash;
    615	int preset;
    616
    617	/* Clear configuration from previous run */
    618	memset(config, 0, sizeof(struct etmv4_config));
    619
    620	if (attr->exclude_kernel)
    621		config->mode = ETM_MODE_EXCL_KERN;
    622
    623	if (attr->exclude_user)
    624		config->mode = ETM_MODE_EXCL_USER;
    625
    626	/* Always start from the default config */
    627	etm4_set_default_config(config);
    628
    629	/* Configure filters specified on the perf cmd line, if any. */
    630	ret = etm4_set_event_filters(drvdata, event);
    631	if (ret)
    632		goto out;
    633
    634	/* Go from generic option to ETMv4 specifics */
    635	if (attr->config & BIT(ETM_OPT_CYCACC)) {
    636		config->cfg |= TRCCONFIGR_CCI;
    637		/* TRM: Must program this for cycacc to work */
    638		config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
    639	}
    640	if (attr->config & BIT(ETM_OPT_TS)) {
    641		/*
    642		 * Configure timestamps to be emitted at regular intervals in
    643		 * order to correlate instructions executed on different CPUs
    644		 * (CPU-wide trace scenarios).
    645		 */
    646		ret = etm4_config_timestamp_event(drvdata);
    647
    648		/*
    649		 * No need to go further if timestamp intervals can't
    650		 * be configured.
    651		 */
    652		if (ret)
    653			goto out;
    654
    655		/* bit[11], Global timestamp tracing bit */
    656		config->cfg |= TRCCONFIGR_TS;
    657	}
    658
    659	/* Only trace contextID when runs in root PID namespace */
    660	if ((attr->config & BIT(ETM_OPT_CTXTID)) &&
    661	    task_is_in_init_pid_ns(current))
    662		/* bit[6], Context ID tracing bit */
    663		config->cfg |= TRCCONFIGR_CID;
    664
    665	/*
    666	 * If set bit ETM_OPT_CTXTID2 in perf config, this asks to trace VMID
    667	 * for recording CONTEXTIDR_EL2.  Do not enable VMID tracing if the
    668	 * kernel is not running in EL2.
    669	 */
    670	if (attr->config & BIT(ETM_OPT_CTXTID2)) {
    671		if (!is_kernel_in_hyp_mode()) {
    672			ret = -EINVAL;
    673			goto out;
    674		}
    675		/* Only trace virtual contextID when runs in root PID namespace */
    676		if (task_is_in_init_pid_ns(current))
    677			config->cfg |= TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT;
    678	}
    679
    680	/* return stack - enable if selected and supported */
    681	if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
    682		/* bit[12], Return stack enable bit */
    683		config->cfg |= TRCCONFIGR_RS;
    684
    685	/*
    686	 * Set any selected configuration and preset.
    687	 *
    688	 * This extracts the values of PMU_FORMAT_ATTR(configid) and PMU_FORMAT_ATTR(preset)
    689	 * in the perf attributes defined in coresight-etm-perf.c.
    690	 * configid uses bits 63:32 of attr->config2, preset uses bits 3:0 of attr->config.
    691	 * A zero configid means no configuration active, preset = 0 means no preset selected.
    692	 */
    693	if (attr->config2 & GENMASK_ULL(63, 32)) {
    694		cfg_hash = (u32)(attr->config2 >> 32);
    695		preset = attr->config & 0xF;
    696		ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
    697	}
    698
    699out:
    700	return ret;
    701}
    702
    703static int etm4_enable_perf(struct coresight_device *csdev,
    704			    struct perf_event *event)
    705{
    706	int ret = 0;
    707	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    708
    709	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
    710		ret = -EINVAL;
    711		goto out;
    712	}
    713
    714	/* Configure the tracer based on the session's specifics */
    715	ret = etm4_parse_event_config(csdev, event);
    716	if (ret)
    717		goto out;
    718	/* And enable it */
    719	ret = etm4_enable_hw(drvdata);
    720
    721out:
    722	return ret;
    723}
    724
    725static int etm4_enable_sysfs(struct coresight_device *csdev)
    726{
    727	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    728	struct etm4_enable_arg arg = { };
    729	unsigned long cfg_hash;
    730	int ret, preset;
    731
    732	/* enable any config activated by configfs */
    733	cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset);
    734	if (cfg_hash) {
    735		ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
    736		if (ret)
    737			return ret;
    738	}
    739
    740	spin_lock(&drvdata->spinlock);
    741
    742	/*
    743	 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
    744	 * ensures that register writes occur when cpu is powered.
    745	 */
    746	arg.drvdata = drvdata;
    747	ret = smp_call_function_single(drvdata->cpu,
    748				       etm4_enable_hw_smp_call, &arg, 1);
    749	if (!ret)
    750		ret = arg.rc;
    751	if (!ret)
    752		drvdata->sticky_enable = true;
    753	spin_unlock(&drvdata->spinlock);
    754
    755	if (!ret)
    756		dev_dbg(&csdev->dev, "ETM tracing enabled\n");
    757	return ret;
    758}
    759
    760static int etm4_enable(struct coresight_device *csdev,
    761		       struct perf_event *event, u32 mode)
    762{
    763	int ret;
    764	u32 val;
    765	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    766
    767	val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
    768
    769	/* Someone is already using the tracer */
    770	if (val)
    771		return -EBUSY;
    772
    773	switch (mode) {
    774	case CS_MODE_SYSFS:
    775		ret = etm4_enable_sysfs(csdev);
    776		break;
    777	case CS_MODE_PERF:
    778		ret = etm4_enable_perf(csdev, event);
    779		break;
    780	default:
    781		ret = -EINVAL;
    782	}
    783
    784	/* The tracer didn't start */
    785	if (ret)
    786		local_set(&drvdata->mode, CS_MODE_DISABLED);
    787
    788	return ret;
    789}
    790
    791static void etm4_disable_hw(void *info)
    792{
    793	u32 control;
    794	struct etmv4_drvdata *drvdata = info;
    795	struct etmv4_config *config = &drvdata->config;
    796	struct coresight_device *csdev = drvdata->csdev;
    797	struct device *etm_dev = &csdev->dev;
    798	struct csdev_access *csa = &csdev->access;
    799	int i;
    800
    801	etm4_cs_unlock(drvdata, csa);
    802	etm4_disable_arch_specific(drvdata);
    803
    804	if (!drvdata->skip_power_up) {
    805		/* power can be removed from the trace unit now */
    806		control = etm4x_relaxed_read32(csa, TRCPDCR);
    807		control &= ~TRCPDCR_PU;
    808		etm4x_relaxed_write32(csa, control, TRCPDCR);
    809	}
    810
    811	control = etm4x_relaxed_read32(csa, TRCPRGCTLR);
    812
    813	/* EN, bit[0] Trace unit enable bit */
    814	control &= ~0x1;
    815
    816	/*
    817	 * If the CPU supports v8.4 Trace filter Control,
    818	 * set the ETM to trace prohibited region.
    819	 */
    820	etm4x_prohibit_trace(drvdata);
    821	/*
    822	 * Make sure everything completes before disabling, as recommended
    823	 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
    824	 * SSTATUS") of ARM IHI 0064D
    825	 */
    826	dsb(sy);
    827	isb();
    828	/* Trace synchronization barrier, is a nop if not supported */
    829	tsb_csync();
    830	etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
    831
    832	/* wait for TRCSTATR.PMSTABLE to go to '1' */
    833	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
    834		dev_err(etm_dev,
    835			"timeout while waiting for PM stable Trace Status\n");
    836	/* read the status of the single shot comparators */
    837	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
    838		config->ss_status[i] =
    839			etm4x_relaxed_read32(csa, TRCSSCSRn(i));
    840	}
    841
    842	/* read back the current counter values */
    843	for (i = 0; i < drvdata->nr_cntr; i++) {
    844		config->cntr_val[i] =
    845			etm4x_relaxed_read32(csa, TRCCNTVRn(i));
    846	}
    847
    848	coresight_disclaim_device_unlocked(csdev);
    849	etm4_cs_lock(drvdata, csa);
    850
    851	dev_dbg(&drvdata->csdev->dev,
    852		"cpu: %d disable smp call done\n", drvdata->cpu);
    853}
    854
    855static int etm4_disable_perf(struct coresight_device *csdev,
    856			     struct perf_event *event)
    857{
    858	u32 control;
    859	struct etm_filters *filters = event->hw.addr_filters;
    860	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    861	struct perf_event_attr *attr = &event->attr;
    862
    863	if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
    864		return -EINVAL;
    865
    866	etm4_disable_hw(drvdata);
    867	/*
    868	 * The config_id occupies bits 63:32 of the config2 perf event attr
    869	 * field. If this is non-zero then we will have enabled a config.
    870	 */
    871	if (attr->config2 & GENMASK_ULL(63, 32))
    872		cscfg_csdev_disable_active_config(csdev);
    873
    874	/*
    875	 * Check if the start/stop logic was active when the unit was stopped.
    876	 * That way we can re-enable the start/stop logic when the process is
    877	 * scheduled again.  Configuration of the start/stop logic happens in
    878	 * function etm4_set_event_filters().
    879	 */
    880	control = etm4x_relaxed_read32(&csdev->access, TRCVICTLR);
    881	/* TRCVICTLR::SSSTATUS, bit[9] */
    882	filters->ssstatus = (control & BIT(9));
    883
    884	return 0;
    885}
    886
    887static void etm4_disable_sysfs(struct coresight_device *csdev)
    888{
    889	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    890
    891	/*
    892	 * Taking hotplug lock here protects from clocks getting disabled
    893	 * with tracing being left on (crash scenario) if user disable occurs
    894	 * after cpu online mask indicates the cpu is offline but before the
    895	 * DYING hotplug callback is serviced by the ETM driver.
    896	 */
    897	cpus_read_lock();
    898	spin_lock(&drvdata->spinlock);
    899
    900	/*
    901	 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
    902	 * ensures that register writes occur when cpu is powered.
    903	 */
    904	smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
    905
    906	spin_unlock(&drvdata->spinlock);
    907	cpus_read_unlock();
    908
    909	dev_dbg(&csdev->dev, "ETM tracing disabled\n");
    910}
    911
    912static void etm4_disable(struct coresight_device *csdev,
    913			 struct perf_event *event)
    914{
    915	u32 mode;
    916	struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
    917
    918	/*
    919	 * For as long as the tracer isn't disabled another entity can't
    920	 * change its status.  As such we can read the status here without
    921	 * fearing it will change under us.
    922	 */
    923	mode = local_read(&drvdata->mode);
    924
    925	switch (mode) {
    926	case CS_MODE_DISABLED:
    927		break;
    928	case CS_MODE_SYSFS:
    929		etm4_disable_sysfs(csdev);
    930		break;
    931	case CS_MODE_PERF:
    932		etm4_disable_perf(csdev, event);
    933		break;
    934	}
    935
    936	if (mode)
    937		local_set(&drvdata->mode, CS_MODE_DISABLED);
    938}
    939
    940static const struct coresight_ops_source etm4_source_ops = {
    941	.cpu_id		= etm4_cpu_id,
    942	.trace_id	= etm4_trace_id,
    943	.enable		= etm4_enable,
    944	.disable	= etm4_disable,
    945};
    946
    947static const struct coresight_ops etm4_cs_ops = {
    948	.source_ops	= &etm4_source_ops,
    949};
    950
    951static inline bool cpu_supports_sysreg_trace(void)
    952{
    953	u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
    954
    955	return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
    956}
    957
    958static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
    959				    struct csdev_access *csa)
    960{
    961	u32 devarch;
    962
    963	if (!cpu_supports_sysreg_trace())
    964		return false;
    965
    966	/*
    967	 * ETMs implementing sysreg access must implement TRCDEVARCH.
    968	 */
    969	devarch = read_etm4x_sysreg_const_offset(TRCDEVARCH);
    970	switch (devarch & ETM_DEVARCH_ID_MASK) {
    971	case ETM_DEVARCH_ETMv4x_ARCH:
    972		*csa = (struct csdev_access) {
    973			.io_mem	= false,
    974			.read	= etm4x_sysreg_read,
    975			.write	= etm4x_sysreg_write,
    976		};
    977		break;
    978	case ETM_DEVARCH_ETE_ARCH:
    979		*csa = (struct csdev_access) {
    980			.io_mem	= false,
    981			.read	= ete_sysreg_read,
    982			.write	= ete_sysreg_write,
    983		};
    984		break;
    985	default:
    986		return false;
    987	}
    988
    989	drvdata->arch = etm_devarch_to_arch(devarch);
    990	return true;
    991}
    992
    993static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
    994				   struct csdev_access *csa)
    995{
    996	u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
    997	u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
    998
    999	/*
   1000	 * All ETMs must implement TRCDEVARCH to indicate that
   1001	 * the component is an ETMv4. To support any broken
   1002	 * implementations we fall back to TRCIDR1 check, which
   1003	 * is not really reliable.
   1004	 */
   1005	if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
   1006		drvdata->arch = etm_devarch_to_arch(devarch);
   1007	} else {
   1008		pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
   1009			smp_processor_id(), devarch);
   1010
   1011		if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
   1012			return false;
   1013		drvdata->arch = etm_trcidr_to_arch(idr1);
   1014	}
   1015
   1016	*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
   1017	return true;
   1018}
   1019
   1020static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
   1021				   struct csdev_access *csa)
   1022{
   1023	/*
   1024	 * Always choose the memory mapped io, if there is
   1025	 * a memory map to prevent sysreg access on broken
   1026	 * systems.
   1027	 */
   1028	if (drvdata->base)
   1029		return etm4_init_iomem_access(drvdata, csa);
   1030
   1031	if (etm4_init_sysreg_access(drvdata, csa))
   1032		return true;
   1033
   1034	return false;
   1035}
   1036
   1037static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
   1038{
   1039	u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
   1040	u64 trfcr;
   1041
   1042	drvdata->trfcr = 0;
   1043	if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
   1044		return;
   1045
   1046	/*
   1047	 * If the CPU supports v8.4 SelfHosted Tracing, enable
   1048	 * tracing at the kernel EL and EL0, forcing to use the
   1049	 * virtual time as the timestamp.
   1050	 */
   1051	trfcr = (TRFCR_ELx_TS_VIRTUAL |
   1052		 TRFCR_ELx_ExTRE |
   1053		 TRFCR_ELx_E0TRE);
   1054
   1055	/* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
   1056	if (is_kernel_in_hyp_mode())
   1057		trfcr |= TRFCR_EL2_CX;
   1058
   1059	drvdata->trfcr = trfcr;
   1060}
   1061
   1062static void etm4_init_arch_data(void *info)
   1063{
   1064	u32 etmidr0;
   1065	u32 etmidr2;
   1066	u32 etmidr3;
   1067	u32 etmidr4;
   1068	u32 etmidr5;
   1069	struct etm4_init_arg *init_arg = info;
   1070	struct etmv4_drvdata *drvdata;
   1071	struct csdev_access *csa;
   1072	int i;
   1073
   1074	drvdata = init_arg->drvdata;
   1075	csa = init_arg->csa;
   1076
   1077	/*
   1078	 * If we are unable to detect the access mechanism,
   1079	 * or unable to detect the trace unit type, fail
   1080	 * early.
   1081	 */
   1082	if (!etm4_init_csdev_access(drvdata, csa))
   1083		return;
   1084
   1085	/* Detect the support for OS Lock before we actually use it */
   1086	etm_detect_os_lock(drvdata, csa);
   1087
   1088	/* Make sure all registers are accessible */
   1089	etm4_os_unlock_csa(drvdata, csa);
   1090	etm4_cs_unlock(drvdata, csa);
   1091
   1092	etm4_check_arch_features(drvdata, init_arg->pid);
   1093
   1094	/* find all capabilities of the tracing unit */
   1095	etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
   1096
   1097	/* INSTP0, bits[2:1] P0 tracing support field */
   1098	drvdata->instrp0 = !!(FIELD_GET(TRCIDR0_INSTP0_MASK, etmidr0) == 0b11);
   1099	/* TRCBB, bit[5] Branch broadcast tracing support bit */
   1100	drvdata->trcbb = !!(etmidr0 & TRCIDR0_TRCBB);
   1101	/* TRCCOND, bit[6] Conditional instruction tracing support bit */
   1102	drvdata->trccond = !!(etmidr0 & TRCIDR0_TRCCOND);
   1103	/* TRCCCI, bit[7] Cycle counting instruction bit */
   1104	drvdata->trccci = !!(etmidr0 & TRCIDR0_TRCCCI);
   1105	/* RETSTACK, bit[9] Return stack bit */
   1106	drvdata->retstack = !!(etmidr0 & TRCIDR0_RETSTACK);
   1107	/* NUMEVENT, bits[11:10] Number of events field */
   1108	drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
   1109	/* QSUPP, bits[16:15] Q element support field */
   1110	drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
   1111	/* TSSIZE, bits[28:24] Global timestamp size field */
   1112	drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
   1113
   1114	/* maximum size of resources */
   1115	etmidr2 = etm4x_relaxed_read32(csa, TRCIDR2);
   1116	/* CIDSIZE, bits[9:5] Indicates the Context ID size */
   1117	drvdata->ctxid_size = FIELD_GET(TRCIDR2_CIDSIZE_MASK, etmidr2);
   1118	/* VMIDSIZE, bits[14:10] Indicates the VMID size */
   1119	drvdata->vmid_size = FIELD_GET(TRCIDR2_VMIDSIZE_MASK, etmidr2);
   1120	/* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
   1121	drvdata->ccsize = FIELD_GET(TRCIDR2_CCSIZE_MASK, etmidr2);
   1122
   1123	etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
   1124	/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
   1125	drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
   1126	/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
   1127	drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
   1128	drvdata->config.s_ex_level = drvdata->s_ex_level;
   1129	/* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
   1130	drvdata->ns_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_NS_MASK, etmidr3);
   1131	/*
   1132	 * TRCERR, bit[24] whether a trace unit can trace a
   1133	 * system error exception.
   1134	 */
   1135	drvdata->trc_error = !!(etmidr3 & TRCIDR3_TRCERR);
   1136	/* SYNCPR, bit[25] implementation has a fixed synchronization period? */
   1137	drvdata->syncpr = !!(etmidr3 & TRCIDR3_SYNCPR);
   1138	/* STALLCTL, bit[26] is stall control implemented? */
   1139	drvdata->stallctl = !!(etmidr3 & TRCIDR3_STALLCTL);
   1140	/* SYSSTALL, bit[27] implementation can support stall control? */
   1141	drvdata->sysstall = !!(etmidr3 & TRCIDR3_SYSSTALL);
   1142	/*
   1143	 * NUMPROC - the number of PEs available for tracing, 5bits
   1144	 *         = TRCIDR3.bits[13:12]bits[30:28]
   1145	 *  bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
   1146	 *  bits[3:0] = TRCIDR3.bits[30:28]
   1147	 */
   1148	drvdata->nr_pe =  (FIELD_GET(TRCIDR3_NUMPROC_HI_MASK, etmidr3) << 3) |
   1149			   FIELD_GET(TRCIDR3_NUMPROC_LO_MASK, etmidr3);
   1150	/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
   1151	drvdata->nooverflow = !!(etmidr3 & TRCIDR3_NOOVERFLOW);
   1152
   1153	/* number of resources trace unit supports */
   1154	etmidr4 = etm4x_relaxed_read32(csa, TRCIDR4);
   1155	/* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
   1156	drvdata->nr_addr_cmp = FIELD_GET(TRCIDR4_NUMACPAIRS_MASK, etmidr4);
   1157	/* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
   1158	drvdata->nr_pe_cmp = FIELD_GET(TRCIDR4_NUMPC_MASK, etmidr4);
   1159	/*
   1160	 * NUMRSPAIR, bits[19:16]
   1161	 * The number of resource pairs conveyed by the HW starts at 0, i.e a
   1162	 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
   1163	 * As such add 1 to the value of NUMRSPAIR for a better representation.
   1164	 *
   1165	 * For ETM v4.3 and later, 0x0 means 0, and no pairs are available -
   1166	 * the default TRUE and FALSE resource selectors are omitted.
   1167	 * Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
   1168	 */
   1169	drvdata->nr_resource = FIELD_GET(TRCIDR4_NUMRSPAIR_MASK, etmidr4);
   1170	if ((drvdata->arch < ETM_ARCH_V4_3) || (drvdata->nr_resource > 0))
   1171		drvdata->nr_resource += 1;
   1172	/*
   1173	 * NUMSSCC, bits[23:20] the number of single-shot
   1174	 * comparator control for tracing. Read any status regs as these
   1175	 * also contain RO capability data.
   1176	 */
   1177	drvdata->nr_ss_cmp = FIELD_GET(TRCIDR4_NUMSSCC_MASK, etmidr4);
   1178	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
   1179		drvdata->config.ss_status[i] =
   1180			etm4x_relaxed_read32(csa, TRCSSCSRn(i));
   1181	}
   1182	/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
   1183	drvdata->numcidc = FIELD_GET(TRCIDR4_NUMCIDC_MASK, etmidr4);
   1184	/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
   1185	drvdata->numvmidc = FIELD_GET(TRCIDR4_NUMVMIDC_MASK, etmidr4);
   1186
   1187	etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
   1188	/* NUMEXTIN, bits[8:0] number of external inputs implemented */
   1189	drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
   1190	/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
   1191	drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
   1192	/* ATBTRIG, bit[22] implementation can support ATB triggers? */
   1193	drvdata->atbtrig = !!(etmidr5 & TRCIDR5_ATBTRIG);
   1194	/*
   1195	 * LPOVERRIDE, bit[23] implementation supports
   1196	 * low-power state override
   1197	 */
   1198	drvdata->lpoverride = (etmidr5 & TRCIDR5_LPOVERRIDE) && (!drvdata->skip_power_up);
   1199	/* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
   1200	drvdata->nrseqstate = FIELD_GET(TRCIDR5_NUMSEQSTATE_MASK, etmidr5);
   1201	/* NUMCNTR, bits[30:28] number of counters available for tracing */
   1202	drvdata->nr_cntr = FIELD_GET(TRCIDR5_NUMCNTR_MASK, etmidr5);
   1203	etm4_cs_lock(drvdata, csa);
   1204	cpu_detect_trace_filtering(drvdata);
   1205}
   1206
   1207static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
   1208{
   1209	return etm4_get_access_type(config) << __bf_shf(TRCVICTLR_EXLEVEL_MASK);
   1210}
   1211
   1212/* Set ELx trace filter access in the TRCVICTLR register */
   1213static void etm4_set_victlr_access(struct etmv4_config *config)
   1214{
   1215	config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_MASK;
   1216	config->vinst_ctrl |= etm4_get_victlr_access_type(config);
   1217}
   1218
   1219static void etm4_set_default_config(struct etmv4_config *config)
   1220{
   1221	/* disable all events tracing */
   1222	config->eventctrl0 = 0x0;
   1223	config->eventctrl1 = 0x0;
   1224
   1225	/* disable stalling */
   1226	config->stall_ctrl = 0x0;
   1227
   1228	/* enable trace synchronization every 4096 bytes, if available */
   1229	config->syncfreq = 0xC;
   1230
   1231	/* disable timestamp event */
   1232	config->ts_ctrl = 0x0;
   1233
   1234	/* TRCVICTLR::EVENT = 0x01, select the always on logic */
   1235	config->vinst_ctrl = FIELD_PREP(TRCVICTLR_EVENT_MASK, 0x01);
   1236
   1237	/* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
   1238	etm4_set_victlr_access(config);
   1239}
   1240
   1241static u64 etm4_get_ns_access_type(struct etmv4_config *config)
   1242{
   1243	u64 access_type = 0;
   1244
   1245	/*
   1246	 * EXLEVEL_NS, for NonSecure Exception levels.
   1247	 * The mask here is a generic value and must be
   1248	 * shifted to the corresponding field for the registers
   1249	 */
   1250	if (!is_kernel_in_hyp_mode()) {
   1251		/* Stay away from hypervisor mode for non-VHE */
   1252		access_type =  ETM_EXLEVEL_NS_HYP;
   1253		if (config->mode & ETM_MODE_EXCL_KERN)
   1254			access_type |= ETM_EXLEVEL_NS_OS;
   1255	} else if (config->mode & ETM_MODE_EXCL_KERN) {
   1256		access_type = ETM_EXLEVEL_NS_HYP;
   1257	}
   1258
   1259	if (config->mode & ETM_MODE_EXCL_USER)
   1260		access_type |= ETM_EXLEVEL_NS_APP;
   1261
   1262	return access_type;
   1263}
   1264
   1265/*
   1266 * Construct the exception level masks for a given config.
   1267 * This must be shifted to the corresponding register field
   1268 * for usage.
   1269 */
   1270static u64 etm4_get_access_type(struct etmv4_config *config)
   1271{
   1272	/* All Secure exception levels are excluded from the trace */
   1273	return etm4_get_ns_access_type(config) | (u64)config->s_ex_level;
   1274}
   1275
   1276static u64 etm4_get_comparator_access_type(struct etmv4_config *config)
   1277{
   1278	return etm4_get_access_type(config) << TRCACATR_EXLEVEL_SHIFT;
   1279}
   1280
   1281static void etm4_set_comparator_filter(struct etmv4_config *config,
   1282				       u64 start, u64 stop, int comparator)
   1283{
   1284	u64 access_type = etm4_get_comparator_access_type(config);
   1285
   1286	/* First half of default address comparator */
   1287	config->addr_val[comparator] = start;
   1288	config->addr_acc[comparator] = access_type;
   1289	config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
   1290
   1291	/* Second half of default address comparator */
   1292	config->addr_val[comparator + 1] = stop;
   1293	config->addr_acc[comparator + 1] = access_type;
   1294	config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
   1295
   1296	/*
   1297	 * Configure the ViewInst function to include this address range
   1298	 * comparator.
   1299	 *
   1300	 * @comparator is divided by two since it is the index in the
   1301	 * etmv4_config::addr_val array but register TRCVIIECTLR deals with
   1302	 * address range comparator _pairs_.
   1303	 *
   1304	 * Therefore:
   1305	 *	index 0 -> compatator pair 0
   1306	 *	index 2 -> comparator pair 1
   1307	 *	index 4 -> comparator pair 2
   1308	 *	...
   1309	 *	index 14 -> comparator pair 7
   1310	 */
   1311	config->viiectlr |= BIT(comparator / 2);
   1312}
   1313
   1314static void etm4_set_start_stop_filter(struct etmv4_config *config,
   1315				       u64 address, int comparator,
   1316				       enum etm_addr_type type)
   1317{
   1318	int shift;
   1319	u64 access_type = etm4_get_comparator_access_type(config);
   1320
   1321	/* Configure the comparator */
   1322	config->addr_val[comparator] = address;
   1323	config->addr_acc[comparator] = access_type;
   1324	config->addr_type[comparator] = type;
   1325
   1326	/*
   1327	 * Configure ViewInst Start-Stop control register.
   1328	 * Addresses configured to start tracing go from bit 0 to n-1,
   1329	 * while those configured to stop tracing from 16 to 16 + n-1.
   1330	 */
   1331	shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
   1332	config->vissctlr |= BIT(shift + comparator);
   1333}
   1334
   1335static void etm4_set_default_filter(struct etmv4_config *config)
   1336{
   1337	/* Trace everything 'default' filter achieved by no filtering */
   1338	config->viiectlr = 0x0;
   1339
   1340	/*
   1341	 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
   1342	 * in the started state
   1343	 */
   1344	config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
   1345	config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
   1346
   1347	/* No start-stop filtering for ViewInst */
   1348	config->vissctlr = 0x0;
   1349}
   1350
   1351static void etm4_set_default(struct etmv4_config *config)
   1352{
   1353	if (WARN_ON_ONCE(!config))
   1354		return;
   1355
   1356	/*
   1357	 * Make default initialisation trace everything
   1358	 *
   1359	 * This is done by a minimum default config sufficient to enable
   1360	 * full instruction trace - with a default filter for trace all
   1361	 * achieved by having no filtering.
   1362	 */
   1363	etm4_set_default_config(config);
   1364	etm4_set_default_filter(config);
   1365}
   1366
   1367static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
   1368{
   1369	int nr_comparator, index = 0;
   1370	struct etmv4_config *config = &drvdata->config;
   1371
   1372	/*
   1373	 * nr_addr_cmp holds the number of comparator _pair_, so time 2
   1374	 * for the total number of comparators.
   1375	 */
   1376	nr_comparator = drvdata->nr_addr_cmp * 2;
   1377
   1378	/* Go through the tally of comparators looking for a free one. */
   1379	while (index < nr_comparator) {
   1380		switch (type) {
   1381		case ETM_ADDR_TYPE_RANGE:
   1382			if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
   1383			    config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
   1384				return index;
   1385
   1386			/* Address range comparators go in pairs */
   1387			index += 2;
   1388			break;
   1389		case ETM_ADDR_TYPE_START:
   1390		case ETM_ADDR_TYPE_STOP:
   1391			if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
   1392				return index;
   1393
   1394			/* Start/stop address can have odd indexes */
   1395			index += 1;
   1396			break;
   1397		default:
   1398			return -EINVAL;
   1399		}
   1400	}
   1401
   1402	/* If we are here all the comparators have been used. */
   1403	return -ENOSPC;
   1404}
   1405
   1406static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
   1407				  struct perf_event *event)
   1408{
   1409	int i, comparator, ret = 0;
   1410	u64 address;
   1411	struct etmv4_config *config = &drvdata->config;
   1412	struct etm_filters *filters = event->hw.addr_filters;
   1413
   1414	if (!filters)
   1415		goto default_filter;
   1416
   1417	/* Sync events with what Perf got */
   1418	perf_event_addr_filters_sync(event);
   1419
   1420	/*
   1421	 * If there are no filters to deal with simply go ahead with
   1422	 * the default filter, i.e the entire address range.
   1423	 */
   1424	if (!filters->nr_filters)
   1425		goto default_filter;
   1426
   1427	for (i = 0; i < filters->nr_filters; i++) {
   1428		struct etm_filter *filter = &filters->etm_filter[i];
   1429		enum etm_addr_type type = filter->type;
   1430
   1431		/* See if a comparator is free. */
   1432		comparator = etm4_get_next_comparator(drvdata, type);
   1433		if (comparator < 0) {
   1434			ret = comparator;
   1435			goto out;
   1436		}
   1437
   1438		switch (type) {
   1439		case ETM_ADDR_TYPE_RANGE:
   1440			etm4_set_comparator_filter(config,
   1441						   filter->start_addr,
   1442						   filter->stop_addr,
   1443						   comparator);
   1444			/*
   1445			 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
   1446			 * in the started state
   1447			 */
   1448			config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
   1449
   1450			/* No start-stop filtering for ViewInst */
   1451			config->vissctlr = 0x0;
   1452			break;
   1453		case ETM_ADDR_TYPE_START:
   1454		case ETM_ADDR_TYPE_STOP:
   1455			/* Get the right start or stop address */
   1456			address = (type == ETM_ADDR_TYPE_START ?
   1457				   filter->start_addr :
   1458				   filter->stop_addr);
   1459
   1460			/* Configure comparator */
   1461			etm4_set_start_stop_filter(config, address,
   1462						   comparator, type);
   1463
   1464			/*
   1465			 * If filters::ssstatus == 1, trace acquisition was
   1466			 * started but the process was yanked away before the
   1467			 * the stop address was hit.  As such the start/stop
   1468			 * logic needs to be re-started so that tracing can
   1469			 * resume where it left.
   1470			 *
   1471			 * The start/stop logic status when a process is
   1472			 * scheduled out is checked in function
   1473			 * etm4_disable_perf().
   1474			 */
   1475			if (filters->ssstatus)
   1476				config->vinst_ctrl |= TRCVICTLR_SSSTATUS;
   1477
   1478			/* No include/exclude filtering for ViewInst */
   1479			config->viiectlr = 0x0;
   1480			break;
   1481		default:
   1482			ret = -EINVAL;
   1483			goto out;
   1484		}
   1485	}
   1486
   1487	goto out;
   1488
   1489
   1490default_filter:
   1491	etm4_set_default_filter(config);
   1492
   1493out:
   1494	return ret;
   1495}
   1496
   1497void etm4_config_trace_mode(struct etmv4_config *config)
   1498{
   1499	u32 mode;
   1500
   1501	mode = config->mode;
   1502	mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
   1503
   1504	/* excluding kernel AND user space doesn't make sense */
   1505	WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
   1506
   1507	/* nothing to do if neither flags are set */
   1508	if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
   1509		return;
   1510
   1511	etm4_set_victlr_access(config);
   1512}
   1513
   1514static int etm4_online_cpu(unsigned int cpu)
   1515{
   1516	if (!etmdrvdata[cpu])
   1517		return 0;
   1518
   1519	if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
   1520		coresight_enable(etmdrvdata[cpu]->csdev);
   1521	return 0;
   1522}
   1523
   1524static int etm4_starting_cpu(unsigned int cpu)
   1525{
   1526	if (!etmdrvdata[cpu])
   1527		return 0;
   1528
   1529	spin_lock(&etmdrvdata[cpu]->spinlock);
   1530	if (!etmdrvdata[cpu]->os_unlock)
   1531		etm4_os_unlock(etmdrvdata[cpu]);
   1532
   1533	if (local_read(&etmdrvdata[cpu]->mode))
   1534		etm4_enable_hw(etmdrvdata[cpu]);
   1535	spin_unlock(&etmdrvdata[cpu]->spinlock);
   1536	return 0;
   1537}
   1538
   1539static int etm4_dying_cpu(unsigned int cpu)
   1540{
   1541	if (!etmdrvdata[cpu])
   1542		return 0;
   1543
   1544	spin_lock(&etmdrvdata[cpu]->spinlock);
   1545	if (local_read(&etmdrvdata[cpu]->mode))
   1546		etm4_disable_hw(etmdrvdata[cpu]);
   1547	spin_unlock(&etmdrvdata[cpu]->spinlock);
   1548	return 0;
   1549}
   1550
   1551static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
   1552{
   1553	drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
   1554}
   1555
   1556static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
   1557{
   1558	int i, ret = 0;
   1559	struct etmv4_save_state *state;
   1560	struct coresight_device *csdev = drvdata->csdev;
   1561	struct csdev_access *csa;
   1562	struct device *etm_dev;
   1563
   1564	if (WARN_ON(!csdev))
   1565		return -ENODEV;
   1566
   1567	etm_dev = &csdev->dev;
   1568	csa = &csdev->access;
   1569
   1570	/*
   1571	 * As recommended by 3.4.1 ("The procedure when powering down the PE")
   1572	 * of ARM IHI 0064D
   1573	 */
   1574	dsb(sy);
   1575	isb();
   1576
   1577	etm4_cs_unlock(drvdata, csa);
   1578	/* Lock the OS lock to disable trace and external debugger access */
   1579	etm4_os_lock(drvdata);
   1580
   1581	/* wait for TRCSTATR.PMSTABLE to go up */
   1582	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) {
   1583		dev_err(etm_dev,
   1584			"timeout while waiting for PM Stable Status\n");
   1585		etm4_os_unlock(drvdata);
   1586		ret = -EBUSY;
   1587		goto out;
   1588	}
   1589
   1590	state = drvdata->save_state;
   1591
   1592	state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
   1593	if (drvdata->nr_pe)
   1594		state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
   1595	state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
   1596	state->trcauxctlr = etm4x_read32(csa, TRCAUXCTLR);
   1597	state->trceventctl0r = etm4x_read32(csa, TRCEVENTCTL0R);
   1598	state->trceventctl1r = etm4x_read32(csa, TRCEVENTCTL1R);
   1599	if (drvdata->stallctl)
   1600		state->trcstallctlr = etm4x_read32(csa, TRCSTALLCTLR);
   1601	state->trctsctlr = etm4x_read32(csa, TRCTSCTLR);
   1602	state->trcsyncpr = etm4x_read32(csa, TRCSYNCPR);
   1603	state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
   1604	state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
   1605	state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
   1606	state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
   1607
   1608	state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
   1609	state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
   1610	state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
   1611	if (drvdata->nr_pe_cmp)
   1612		state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
   1613	state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
   1614	state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
   1615	state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
   1616
   1617	for (i = 0; i < drvdata->nrseqstate - 1; i++)
   1618		state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
   1619
   1620	state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
   1621	state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
   1622	state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
   1623
   1624	for (i = 0; i < drvdata->nr_cntr; i++) {
   1625		state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
   1626		state->trccntctlr[i] = etm4x_read32(csa, TRCCNTCTLRn(i));
   1627		state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
   1628	}
   1629
   1630	for (i = 0; i < drvdata->nr_resource * 2; i++)
   1631		state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
   1632
   1633	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
   1634		state->trcssccr[i] = etm4x_read32(csa, TRCSSCCRn(i));
   1635		state->trcsscsr[i] = etm4x_read32(csa, TRCSSCSRn(i));
   1636		if (etm4x_sspcicrn_present(drvdata, i))
   1637			state->trcsspcicr[i] = etm4x_read32(csa, TRCSSPCICRn(i));
   1638	}
   1639
   1640	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
   1641		state->trcacvr[i] = etm4x_read64(csa, TRCACVRn(i));
   1642		state->trcacatr[i] = etm4x_read64(csa, TRCACATRn(i));
   1643	}
   1644
   1645	/*
   1646	 * Data trace stream is architecturally prohibited for A profile cores
   1647	 * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
   1648	 * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
   1649	 * unit") of ARM IHI 0064D.
   1650	 */
   1651
   1652	for (i = 0; i < drvdata->numcidc; i++)
   1653		state->trccidcvr[i] = etm4x_read64(csa, TRCCIDCVRn(i));
   1654
   1655	for (i = 0; i < drvdata->numvmidc; i++)
   1656		state->trcvmidcvr[i] = etm4x_read64(csa, TRCVMIDCVRn(i));
   1657
   1658	state->trccidcctlr0 = etm4x_read32(csa, TRCCIDCCTLR0);
   1659	if (drvdata->numcidc > 4)
   1660		state->trccidcctlr1 = etm4x_read32(csa, TRCCIDCCTLR1);
   1661
   1662	state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR0);
   1663	if (drvdata->numvmidc > 4)
   1664		state->trcvmidcctlr0 = etm4x_read32(csa, TRCVMIDCCTLR1);
   1665
   1666	state->trcclaimset = etm4x_read32(csa, TRCCLAIMCLR);
   1667
   1668	if (!drvdata->skip_power_up)
   1669		state->trcpdcr = etm4x_read32(csa, TRCPDCR);
   1670
   1671	/* wait for TRCSTATR.IDLE to go up */
   1672	if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
   1673		dev_err(etm_dev,
   1674			"timeout while waiting for Idle Trace Status\n");
   1675		etm4_os_unlock(drvdata);
   1676		ret = -EBUSY;
   1677		goto out;
   1678	}
   1679
   1680	drvdata->state_needs_restore = true;
   1681
   1682	/*
   1683	 * Power can be removed from the trace unit now. We do this to
   1684	 * potentially save power on systems that respect the TRCPDCR_PU
   1685	 * despite requesting software to save/restore state.
   1686	 */
   1687	if (!drvdata->skip_power_up)
   1688		etm4x_relaxed_write32(csa, (state->trcpdcr & ~TRCPDCR_PU),
   1689				      TRCPDCR);
   1690out:
   1691	etm4_cs_lock(drvdata, csa);
   1692	return ret;
   1693}
   1694
   1695static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
   1696{
   1697	int ret = 0;
   1698
   1699	/* Save the TRFCR irrespective of whether the ETM is ON */
   1700	if (drvdata->trfcr)
   1701		drvdata->save_trfcr = read_trfcr();
   1702	/*
   1703	 * Save and restore the ETM Trace registers only if
   1704	 * the ETM is active.
   1705	 */
   1706	if (local_read(&drvdata->mode) && drvdata->save_state)
   1707		ret = __etm4_cpu_save(drvdata);
   1708	return ret;
   1709}
   1710
   1711static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
   1712{
   1713	int i;
   1714	struct etmv4_save_state *state = drvdata->save_state;
   1715	struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
   1716	struct csdev_access *csa = &tmp_csa;
   1717
   1718	etm4_cs_unlock(drvdata, csa);
   1719	etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
   1720
   1721	etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
   1722	if (drvdata->nr_pe)
   1723		etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
   1724	etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
   1725	etm4x_relaxed_write32(csa, state->trcauxctlr, TRCAUXCTLR);
   1726	etm4x_relaxed_write32(csa, state->trceventctl0r, TRCEVENTCTL0R);
   1727	etm4x_relaxed_write32(csa, state->trceventctl1r, TRCEVENTCTL1R);
   1728	if (drvdata->stallctl)
   1729		etm4x_relaxed_write32(csa, state->trcstallctlr, TRCSTALLCTLR);
   1730	etm4x_relaxed_write32(csa, state->trctsctlr, TRCTSCTLR);
   1731	etm4x_relaxed_write32(csa, state->trcsyncpr, TRCSYNCPR);
   1732	etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
   1733	etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
   1734	etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
   1735	etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
   1736
   1737	etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
   1738	etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
   1739	etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
   1740	if (drvdata->nr_pe_cmp)
   1741		etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
   1742	etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
   1743	etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
   1744	etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
   1745
   1746	for (i = 0; i < drvdata->nrseqstate - 1; i++)
   1747		etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
   1748
   1749	etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
   1750	etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
   1751	etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
   1752
   1753	for (i = 0; i < drvdata->nr_cntr; i++) {
   1754		etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
   1755		etm4x_relaxed_write32(csa, state->trccntctlr[i], TRCCNTCTLRn(i));
   1756		etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
   1757	}
   1758
   1759	for (i = 0; i < drvdata->nr_resource * 2; i++)
   1760		etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
   1761
   1762	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
   1763		etm4x_relaxed_write32(csa, state->trcssccr[i], TRCSSCCRn(i));
   1764		etm4x_relaxed_write32(csa, state->trcsscsr[i], TRCSSCSRn(i));
   1765		if (etm4x_sspcicrn_present(drvdata, i))
   1766			etm4x_relaxed_write32(csa, state->trcsspcicr[i], TRCSSPCICRn(i));
   1767	}
   1768
   1769	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
   1770		etm4x_relaxed_write64(csa, state->trcacvr[i], TRCACVRn(i));
   1771		etm4x_relaxed_write64(csa, state->trcacatr[i], TRCACATRn(i));
   1772	}
   1773
   1774	for (i = 0; i < drvdata->numcidc; i++)
   1775		etm4x_relaxed_write64(csa, state->trccidcvr[i], TRCCIDCVRn(i));
   1776
   1777	for (i = 0; i < drvdata->numvmidc; i++)
   1778		etm4x_relaxed_write64(csa, state->trcvmidcvr[i], TRCVMIDCVRn(i));
   1779
   1780	etm4x_relaxed_write32(csa, state->trccidcctlr0, TRCCIDCCTLR0);
   1781	if (drvdata->numcidc > 4)
   1782		etm4x_relaxed_write32(csa, state->trccidcctlr1, TRCCIDCCTLR1);
   1783
   1784	etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR0);
   1785	if (drvdata->numvmidc > 4)
   1786		etm4x_relaxed_write32(csa, state->trcvmidcctlr0, TRCVMIDCCTLR1);
   1787
   1788	etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
   1789
   1790	if (!drvdata->skip_power_up)
   1791		etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
   1792
   1793	drvdata->state_needs_restore = false;
   1794
   1795	/*
   1796	 * As recommended by section 4.3.7 ("Synchronization when using the
   1797	 * memory-mapped interface") of ARM IHI 0064D
   1798	 */
   1799	dsb(sy);
   1800	isb();
   1801
   1802	/* Unlock the OS lock to re-enable trace and external debug access */
   1803	etm4_os_unlock(drvdata);
   1804	etm4_cs_lock(drvdata, csa);
   1805}
   1806
   1807static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
   1808{
   1809	if (drvdata->trfcr)
   1810		write_trfcr(drvdata->save_trfcr);
   1811	if (drvdata->state_needs_restore)
   1812		__etm4_cpu_restore(drvdata);
   1813}
   1814
   1815static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
   1816			      void *v)
   1817{
   1818	struct etmv4_drvdata *drvdata;
   1819	unsigned int cpu = smp_processor_id();
   1820
   1821	if (!etmdrvdata[cpu])
   1822		return NOTIFY_OK;
   1823
   1824	drvdata = etmdrvdata[cpu];
   1825
   1826	if (WARN_ON_ONCE(drvdata->cpu != cpu))
   1827		return NOTIFY_BAD;
   1828
   1829	switch (cmd) {
   1830	case CPU_PM_ENTER:
   1831		if (etm4_cpu_save(drvdata))
   1832			return NOTIFY_BAD;
   1833		break;
   1834	case CPU_PM_EXIT:
   1835	case CPU_PM_ENTER_FAILED:
   1836		etm4_cpu_restore(drvdata);
   1837		break;
   1838	default:
   1839		return NOTIFY_DONE;
   1840	}
   1841
   1842	return NOTIFY_OK;
   1843}
   1844
   1845static struct notifier_block etm4_cpu_pm_nb = {
   1846	.notifier_call = etm4_cpu_pm_notify,
   1847};
   1848
   1849/* Setup PM. Deals with error conditions and counts */
   1850static int __init etm4_pm_setup(void)
   1851{
   1852	int ret;
   1853
   1854	ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
   1855	if (ret)
   1856		return ret;
   1857
   1858	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
   1859					"arm/coresight4:starting",
   1860					etm4_starting_cpu, etm4_dying_cpu);
   1861
   1862	if (ret)
   1863		goto unregister_notifier;
   1864
   1865	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
   1866					"arm/coresight4:online",
   1867					etm4_online_cpu, NULL);
   1868
   1869	/* HP dyn state ID returned in ret on success */
   1870	if (ret > 0) {
   1871		hp_online = ret;
   1872		return 0;
   1873	}
   1874
   1875	/* failed dyn state - remove others */
   1876	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
   1877
   1878unregister_notifier:
   1879	cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
   1880	return ret;
   1881}
   1882
   1883static void etm4_pm_clear(void)
   1884{
   1885	cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
   1886	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
   1887	if (hp_online) {
   1888		cpuhp_remove_state_nocalls(hp_online);
   1889		hp_online = 0;
   1890	}
   1891}
   1892
   1893static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
   1894{
   1895	int ret;
   1896	struct coresight_platform_data *pdata = NULL;
   1897	struct etmv4_drvdata *drvdata;
   1898	struct coresight_desc desc = { 0 };
   1899	struct etm4_init_arg init_arg = { 0 };
   1900	u8 major, minor;
   1901	char *type_name;
   1902
   1903	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
   1904	if (!drvdata)
   1905		return -ENOMEM;
   1906
   1907	dev_set_drvdata(dev, drvdata);
   1908
   1909	if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
   1910		pm_save_enable = coresight_loses_context_with_cpu(dev) ?
   1911			       PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
   1912
   1913	if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
   1914		drvdata->save_state = devm_kmalloc(dev,
   1915				sizeof(struct etmv4_save_state), GFP_KERNEL);
   1916		if (!drvdata->save_state)
   1917			return -ENOMEM;
   1918	}
   1919
   1920	drvdata->base = base;
   1921
   1922	spin_lock_init(&drvdata->spinlock);
   1923
   1924	drvdata->cpu = coresight_get_cpu(dev);
   1925	if (drvdata->cpu < 0)
   1926		return drvdata->cpu;
   1927
   1928	init_arg.drvdata = drvdata;
   1929	init_arg.csa = &desc.access;
   1930	init_arg.pid = etm_pid;
   1931
   1932	if (smp_call_function_single(drvdata->cpu,
   1933				etm4_init_arch_data,  &init_arg, 1))
   1934		dev_err(dev, "ETM arch init failed\n");
   1935
   1936	if (!drvdata->arch)
   1937		return -EINVAL;
   1938
   1939	/* TRCPDCR is not accessible with system instructions. */
   1940	if (!desc.access.io_mem ||
   1941	    fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
   1942		drvdata->skip_power_up = true;
   1943
   1944	major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
   1945	minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
   1946
   1947	if (etm4x_is_ete(drvdata)) {
   1948		type_name = "ete";
   1949		/* ETE v1 has major version == 0b101. Adjust this for logging.*/
   1950		major -= 4;
   1951	} else {
   1952		type_name = "etm";
   1953	}
   1954
   1955	desc.name = devm_kasprintf(dev, GFP_KERNEL,
   1956				   "%s%d", type_name, drvdata->cpu);
   1957	if (!desc.name)
   1958		return -ENOMEM;
   1959
   1960	etm4_init_trace_id(drvdata);
   1961	etm4_set_default(&drvdata->config);
   1962
   1963	pdata = coresight_get_platform_data(dev);
   1964	if (IS_ERR(pdata))
   1965		return PTR_ERR(pdata);
   1966
   1967	dev->platform_data = pdata;
   1968
   1969	desc.type = CORESIGHT_DEV_TYPE_SOURCE;
   1970	desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
   1971	desc.ops = &etm4_cs_ops;
   1972	desc.pdata = pdata;
   1973	desc.dev = dev;
   1974	desc.groups = coresight_etmv4_groups;
   1975	drvdata->csdev = coresight_register(&desc);
   1976	if (IS_ERR(drvdata->csdev))
   1977		return PTR_ERR(drvdata->csdev);
   1978
   1979	ret = etm_perf_symlink(drvdata->csdev, true);
   1980	if (ret) {
   1981		coresight_unregister(drvdata->csdev);
   1982		return ret;
   1983	}
   1984
   1985	/* register with config infrastructure & load any current features */
   1986	ret = etm4_cscfg_register(drvdata->csdev);
   1987	if (ret) {
   1988		coresight_unregister(drvdata->csdev);
   1989		return ret;
   1990	}
   1991
   1992	etmdrvdata[drvdata->cpu] = drvdata;
   1993
   1994	dev_info(&drvdata->csdev->dev, "CPU%d: %s v%d.%d initialized\n",
   1995		 drvdata->cpu, type_name, major, minor);
   1996
   1997	if (boot_enable) {
   1998		coresight_enable(drvdata->csdev);
   1999		drvdata->boot_enable = true;
   2000	}
   2001
   2002	return 0;
   2003}
   2004
   2005static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
   2006{
   2007	void __iomem *base;
   2008	struct device *dev = &adev->dev;
   2009	struct resource *res = &adev->res;
   2010	int ret;
   2011
   2012	/* Validity for the resource is already checked by the AMBA core */
   2013	base = devm_ioremap_resource(dev, res);
   2014	if (IS_ERR(base))
   2015		return PTR_ERR(base);
   2016
   2017	ret = etm4_probe(dev, base, id->id);
   2018	if (!ret)
   2019		pm_runtime_put(&adev->dev);
   2020
   2021	return ret;
   2022}
   2023
   2024static int etm4_probe_platform_dev(struct platform_device *pdev)
   2025{
   2026	int ret;
   2027
   2028	pm_runtime_get_noresume(&pdev->dev);
   2029	pm_runtime_set_active(&pdev->dev);
   2030	pm_runtime_enable(&pdev->dev);
   2031
   2032	/*
   2033	 * System register based devices could match the
   2034	 * HW by reading appropriate registers on the HW
   2035	 * and thus we could skip the PID.
   2036	 */
   2037	ret = etm4_probe(&pdev->dev, NULL, 0);
   2038
   2039	pm_runtime_put(&pdev->dev);
   2040	return ret;
   2041}
   2042
   2043static struct amba_cs_uci_id uci_id_etm4[] = {
   2044	{
   2045		/*  ETMv4 UCI data */
   2046		.devarch	= ETM_DEVARCH_ETMv4x_ARCH,
   2047		.devarch_mask	= ETM_DEVARCH_ID_MASK,
   2048		.devtype	= 0x00000013,
   2049	}
   2050};
   2051
   2052static void clear_etmdrvdata(void *info)
   2053{
   2054	int cpu = *(int *)info;
   2055
   2056	etmdrvdata[cpu] = NULL;
   2057}
   2058
   2059static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
   2060{
   2061	etm_perf_symlink(drvdata->csdev, false);
   2062	/*
   2063	 * Taking hotplug lock here to avoid racing between etm4_remove_dev()
   2064	 * and CPU hotplug call backs.
   2065	 */
   2066	cpus_read_lock();
   2067	/*
   2068	 * The readers for etmdrvdata[] are CPU hotplug call backs
   2069	 * and PM notification call backs. Change etmdrvdata[i] on
   2070	 * CPU i ensures these call backs has consistent view
   2071	 * inside one call back function.
   2072	 */
   2073	if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
   2074		etmdrvdata[drvdata->cpu] = NULL;
   2075
   2076	cpus_read_unlock();
   2077
   2078	cscfg_unregister_csdev(drvdata->csdev);
   2079	coresight_unregister(drvdata->csdev);
   2080
   2081	return 0;
   2082}
   2083
   2084static void __exit etm4_remove_amba(struct amba_device *adev)
   2085{
   2086	struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
   2087
   2088	if (drvdata)
   2089		etm4_remove_dev(drvdata);
   2090}
   2091
   2092static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
   2093{
   2094	int ret = 0;
   2095	struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
   2096
   2097	if (drvdata)
   2098		ret = etm4_remove_dev(drvdata);
   2099	pm_runtime_disable(&pdev->dev);
   2100	return ret;
   2101}
   2102
   2103static const struct amba_id etm4_ids[] = {
   2104	CS_AMBA_ID(0x000bb95d),			/* Cortex-A53 */
   2105	CS_AMBA_ID(0x000bb95e),			/* Cortex-A57 */
   2106	CS_AMBA_ID(0x000bb95a),			/* Cortex-A72 */
   2107	CS_AMBA_ID(0x000bb959),			/* Cortex-A73 */
   2108	CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
   2109	CS_AMBA_UCI_ID(0x000bbd05, uci_id_etm4),/* Cortex-A55 */
   2110	CS_AMBA_UCI_ID(0x000bbd0a, uci_id_etm4),/* Cortex-A75 */
   2111	CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
   2112	CS_AMBA_UCI_ID(0x000bbd41, uci_id_etm4),/* Cortex-A78 */
   2113	CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
   2114	CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
   2115	CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
   2116	CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
   2117	CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
   2118	CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
   2119	CS_AMBA_UCI_ID(0x000bbd0d, uci_id_etm4),/* Qualcomm Kryo 5XX Cortex-A77 */
   2120	CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
   2121	CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
   2122	CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
   2123	{},
   2124};
   2125
   2126MODULE_DEVICE_TABLE(amba, etm4_ids);
   2127
   2128static struct amba_driver etm4x_amba_driver = {
   2129	.drv = {
   2130		.name   = "coresight-etm4x",
   2131		.owner  = THIS_MODULE,
   2132		.suppress_bind_attrs = true,
   2133	},
   2134	.probe		= etm4_probe_amba,
   2135	.remove         = etm4_remove_amba,
   2136	.id_table	= etm4_ids,
   2137};
   2138
   2139static const struct of_device_id etm4_sysreg_match[] = {
   2140	{ .compatible	= "arm,coresight-etm4x-sysreg" },
   2141	{ .compatible	= "arm,embedded-trace-extension" },
   2142	{}
   2143};
   2144
   2145static struct platform_driver etm4_platform_driver = {
   2146	.probe		= etm4_probe_platform_dev,
   2147	.remove		= etm4_remove_platform_dev,
   2148	.driver			= {
   2149		.name			= "coresight-etm4x",
   2150		.of_match_table		= etm4_sysreg_match,
   2151		.suppress_bind_attrs	= true,
   2152	},
   2153};
   2154
   2155static int __init etm4x_init(void)
   2156{
   2157	int ret;
   2158
   2159	ret = etm4_pm_setup();
   2160
   2161	/* etm4_pm_setup() does its own cleanup - exit on error */
   2162	if (ret)
   2163		return ret;
   2164
   2165	ret = amba_driver_register(&etm4x_amba_driver);
   2166	if (ret) {
   2167		pr_err("Error registering etm4x AMBA driver\n");
   2168		goto clear_pm;
   2169	}
   2170
   2171	ret = platform_driver_register(&etm4_platform_driver);
   2172	if (!ret)
   2173		return 0;
   2174
   2175	pr_err("Error registering etm4x platform driver\n");
   2176	amba_driver_unregister(&etm4x_amba_driver);
   2177
   2178clear_pm:
   2179	etm4_pm_clear();
   2180	return ret;
   2181}
   2182
   2183static void __exit etm4x_exit(void)
   2184{
   2185	amba_driver_unregister(&etm4x_amba_driver);
   2186	platform_driver_unregister(&etm4_platform_driver);
   2187	etm4_pm_clear();
   2188}
   2189
   2190module_init(etm4x_init);
   2191module_exit(etm4x_exit);
   2192
   2193MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
   2194MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
   2195MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace v4.x driver");
   2196MODULE_LICENSE("GPL v2");