cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

timer-ti-dm-systimer.c (22253B)


      1// SPDX-License-Identifier: GPL-2.0+
      2#include <linux/clk.h>
      3#include <linux/clocksource.h>
      4#include <linux/clockchips.h>
      5#include <linux/cpuhotplug.h>
      6#include <linux/interrupt.h>
      7#include <linux/io.h>
      8#include <linux/iopoll.h>
      9#include <linux/err.h>
     10#include <linux/of.h>
     11#include <linux/of_address.h>
     12#include <linux/of_irq.h>
     13#include <linux/sched_clock.h>
     14
     15#include <linux/clk/clk-conf.h>
     16
     17#include <clocksource/timer-ti-dm.h>
     18#include <dt-bindings/bus/ti-sysc.h>
     19
     20/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
     21#define DMTIMER_TYPE1_ENABLE	((1 << 9) | (SYSC_IDLE_SMART << 3) | \
     22				 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
     23#define DMTIMER_TYPE1_DISABLE	(SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
     24#define DMTIMER_TYPE2_ENABLE	(SYSC_IDLE_SMART_WKUP << 2)
     25#define DMTIMER_RESET_WAIT	100000
     26
     27#define DMTIMER_INST_DONT_CARE	~0U
     28
     29static int counter_32k;
     30static u32 clocksource;
     31static u32 clockevent;
     32
     33/*
     34 * Subset of the timer registers we use. Note that the register offsets
     35 * depend on the timer revision detected.
     36 */
     37struct dmtimer_systimer {
     38	void __iomem *base;
     39	u8 sysc;
     40	u8 irq_stat;
     41	u8 irq_ena;
     42	u8 pend;
     43	u8 load;
     44	u8 counter;
     45	u8 ctrl;
     46	u8 wakeup;
     47	u8 ifctrl;
     48	struct clk *fck;
     49	struct clk *ick;
     50	unsigned long rate;
     51};
     52
     53struct dmtimer_clockevent {
     54	struct clock_event_device dev;
     55	struct dmtimer_systimer t;
     56	u32 period;
     57};
     58
     59struct dmtimer_clocksource {
     60	struct clocksource dev;
     61	struct dmtimer_systimer t;
     62	unsigned int loadval;
     63};
     64
     65/* Assumes v1 ip if bits [31:16] are zero */
     66static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
     67{
     68	u32 tidr = readl_relaxed(t->base);
     69
     70	return !(tidr >> 16);
     71}
     72
     73static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
     74{
     75	u32 val;
     76
     77	if (dmtimer_systimer_revision1(t))
     78		val = DMTIMER_TYPE1_ENABLE;
     79	else
     80		val = DMTIMER_TYPE2_ENABLE;
     81
     82	writel_relaxed(val, t->base + t->sysc);
     83}
     84
     85static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
     86{
     87	if (!dmtimer_systimer_revision1(t))
     88		return;
     89
     90	writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
     91}
     92
     93static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
     94{
     95	void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
     96	int ret;
     97	u32 l;
     98
     99	dmtimer_systimer_enable(t);
    100	writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
    101	ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
    102					DMTIMER_RESET_WAIT);
    103
    104	return ret;
    105}
    106
    107/* Note we must use io_base instead of func_base for type2 OCP regs */
    108static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
    109{
    110	void __iomem *sysc = t->base + t->sysc;
    111	u32 l;
    112
    113	dmtimer_systimer_enable(t);
    114	l = readl_relaxed(sysc);
    115	l |= BIT(0);
    116	writel_relaxed(l, sysc);
    117
    118	return readl_poll_timeout_atomic(sysc, l, !(l & BIT(0)), 100,
    119					 DMTIMER_RESET_WAIT);
    120}
    121
    122static int __init dmtimer_systimer_reset(struct dmtimer_systimer *t)
    123{
    124	int ret;
    125
    126	if (dmtimer_systimer_revision1(t))
    127		ret = dmtimer_systimer_type1_reset(t);
    128	else
    129		ret = dmtimer_systimer_type2_reset(t);
    130	if (ret < 0) {
    131		pr_err("%s failed with %i\n", __func__, ret);
    132
    133		return ret;
    134	}
    135
    136	return 0;
    137}
    138
    139static const struct of_device_id counter_match_table[] = {
    140	{ .compatible = "ti,omap-counter32k" },
    141	{ /* Sentinel */ },
    142};
    143
    144/*
    145 * Check if the SoC als has a usable working 32 KiHz counter. The 32 KiHz
    146 * counter is handled by timer-ti-32k, but we need to detect it as it
    147 * affects the preferred dmtimer system timer configuration. There is
    148 * typically no use for a dmtimer clocksource if the 32 KiHz counter is
    149 * present, except on am437x as described below.
    150 */
    151static void __init dmtimer_systimer_check_counter32k(void)
    152{
    153	struct device_node *np;
    154
    155	if (counter_32k)
    156		return;
    157
    158	np = of_find_matching_node(NULL, counter_match_table);
    159	if (!np) {
    160		counter_32k = -ENODEV;
    161
    162		return;
    163	}
    164
    165	if (of_device_is_available(np))
    166		counter_32k = 1;
    167	else
    168		counter_32k = -ENODEV;
    169
    170	of_node_put(np);
    171}
    172
    173static const struct of_device_id dmtimer_match_table[] = {
    174	{ .compatible = "ti,omap2420-timer", },
    175	{ .compatible = "ti,omap3430-timer", },
    176	{ .compatible = "ti,omap4430-timer", },
    177	{ .compatible = "ti,omap5430-timer", },
    178	{ .compatible = "ti,am335x-timer", },
    179	{ .compatible = "ti,am335x-timer-1ms", },
    180	{ .compatible = "ti,dm814-timer", },
    181	{ .compatible = "ti,dm816-timer", },
    182	{ /* Sentinel */ },
    183};
    184
    185/*
    186 * Checks that system timers are configured to not reset and idle during
    187 * the generic timer-ti-dm device driver probe. And that the system timer
    188 * source clocks are properly configured. Also, let's not hog any DSP and
    189 * PWM capable timers unnecessarily as system timers.
    190 */
    191static bool __init dmtimer_is_preferred(struct device_node *np)
    192{
    193	if (!of_device_is_available(np))
    194		return false;
    195
    196	if (!of_property_read_bool(np->parent,
    197				   "ti,no-reset-on-init"))
    198		return false;
    199
    200	if (!of_property_read_bool(np->parent, "ti,no-idle"))
    201		return false;
    202
    203	/* Secure gptimer12 is always clocked with a fixed source */
    204	if (!of_property_read_bool(np, "ti,timer-secure")) {
    205		if (!of_property_read_bool(np, "assigned-clocks"))
    206			return false;
    207
    208		if (!of_property_read_bool(np, "assigned-clock-parents"))
    209			return false;
    210	}
    211
    212	if (of_property_read_bool(np, "ti,timer-dsp"))
    213		return false;
    214
    215	if (of_property_read_bool(np, "ti,timer-pwm"))
    216		return false;
    217
    218	return true;
    219}
    220
    221/*
    222 * Finds the first available usable always-on timer, and assigns it to either
    223 * clockevent or clocksource depending if the counter_32k is available on the
    224 * SoC or not.
    225 *
    226 * Some omap3 boards with unreliable oscillator must not use the counter_32k
    227 * or dmtimer1 with 32 KiHz source. Additionally, the boards with unreliable
    228 * oscillator should really set counter_32k as disabled, and delete dmtimer1
    229 * ti,always-on property, but let's not count on it. For these quirky cases,
    230 * we prefer using the always-on secure dmtimer12 with the internal 32 KiHz
    231 * clock as the clocksource, and any available dmtimer as clockevent.
    232 *
    233 * For am437x, we are using am335x style dmtimer clocksource. It is unclear
    234 * if this quirk handling is really needed, but let's change it separately
    235 * based on testing as it might cause side effects.
    236 */
    237static void __init dmtimer_systimer_assign_alwon(void)
    238{
    239	struct device_node *np;
    240	u32 pa = 0;
    241	bool quirk_unreliable_oscillator = false;
    242
    243	/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
    244	if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
    245		quirk_unreliable_oscillator = true;
    246		counter_32k = -ENODEV;
    247	}
    248
    249	/* Quirk am437x using am335x style dmtimer clocksource */
    250	if (of_machine_is_compatible("ti,am43"))
    251		counter_32k = -ENODEV;
    252
    253	for_each_matching_node(np, dmtimer_match_table) {
    254		if (!dmtimer_is_preferred(np))
    255			continue;
    256
    257		if (of_property_read_bool(np, "ti,timer-alwon")) {
    258			const __be32 *addr;
    259
    260			addr = of_get_address(np, 0, NULL, NULL);
    261			pa = of_translate_address(np, addr);
    262			if (pa) {
    263				/* Quirky omap3 boards must use dmtimer12 */
    264				if (quirk_unreliable_oscillator &&
    265				    pa == 0x48318000)
    266					continue;
    267
    268				of_node_put(np);
    269				break;
    270			}
    271		}
    272	}
    273
    274	/* Usually no need for dmtimer clocksource if we have counter32 */
    275	if (counter_32k >= 0) {
    276		clockevent = pa;
    277		clocksource = 0;
    278	} else {
    279		clocksource = pa;
    280		clockevent = DMTIMER_INST_DONT_CARE;
    281	}
    282}
    283
    284/* Finds the first usable dmtimer, used for the don't care case */
    285static u32 __init dmtimer_systimer_find_first_available(void)
    286{
    287	struct device_node *np;
    288	const __be32 *addr;
    289	u32 pa = 0;
    290
    291	for_each_matching_node(np, dmtimer_match_table) {
    292		if (!dmtimer_is_preferred(np))
    293			continue;
    294
    295		addr = of_get_address(np, 0, NULL, NULL);
    296		pa = of_translate_address(np, addr);
    297		if (pa) {
    298			if (pa == clocksource || pa == clockevent) {
    299				pa = 0;
    300				continue;
    301			}
    302
    303			of_node_put(np);
    304			break;
    305		}
    306	}
    307
    308	return pa;
    309}
    310
    311/* Selects the best clocksource and clockevent to use */
    312static void __init dmtimer_systimer_select_best(void)
    313{
    314	dmtimer_systimer_check_counter32k();
    315	dmtimer_systimer_assign_alwon();
    316
    317	if (clockevent == DMTIMER_INST_DONT_CARE)
    318		clockevent = dmtimer_systimer_find_first_available();
    319
    320	pr_debug("%s: counter_32k: %i clocksource: %08x clockevent: %08x\n",
    321		 __func__, counter_32k, clocksource, clockevent);
    322}
    323
    324/* Interface clocks are only available on some SoCs variants */
    325static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
    326					      struct device_node *np,
    327					      const char *name,
    328					      unsigned long *rate)
    329{
    330	struct clk *clock;
    331	unsigned long r;
    332	bool is_ick = false;
    333	int error;
    334
    335	is_ick = !strncmp(name, "ick", 3);
    336
    337	clock = of_clk_get_by_name(np, name);
    338	if ((PTR_ERR(clock) == -EINVAL) && is_ick)
    339		return 0;
    340	else if (IS_ERR(clock))
    341		return PTR_ERR(clock);
    342
    343	error = clk_prepare_enable(clock);
    344	if (error)
    345		return error;
    346
    347	r = clk_get_rate(clock);
    348	if (!r)
    349		return -ENODEV;
    350
    351	if (is_ick)
    352		t->ick = clock;
    353	else
    354		t->fck = clock;
    355
    356	*rate = r;
    357
    358	return 0;
    359}
    360
    361static int __init dmtimer_systimer_setup(struct device_node *np,
    362					 struct dmtimer_systimer *t)
    363{
    364	unsigned long rate;
    365	u8 regbase;
    366	int error;
    367
    368	if (!of_device_is_compatible(np->parent, "ti,sysc"))
    369		return -EINVAL;
    370
    371	t->base = of_iomap(np, 0);
    372	if (!t->base)
    373		return -ENXIO;
    374
    375	/*
    376	 * Enable optional assigned-clock-parents configured at the timer
    377	 * node level. For regular device drivers, this is done automatically
    378	 * by bus related code such as platform_drv_probe().
    379	 */
    380	error = of_clk_set_defaults(np, false);
    381	if (error < 0)
    382		pr_err("%s: clock source init failed: %i\n", __func__, error);
    383
    384	/* For ti-sysc, we have timer clocks at the parent module level */
    385	error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
    386	if (error)
    387		goto err_unmap;
    388
    389	t->rate = rate;
    390
    391	error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
    392	if (error)
    393		goto err_unmap;
    394
    395	if (dmtimer_systimer_revision1(t)) {
    396		t->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
    397		t->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
    398		t->pend = _OMAP_TIMER_WRITE_PEND_OFFSET;
    399		regbase = 0;
    400	} else {
    401		t->irq_stat = OMAP_TIMER_V2_IRQSTATUS;
    402		t->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET;
    403		regbase = OMAP_TIMER_V2_FUNC_OFFSET;
    404		t->pend = regbase + _OMAP_TIMER_WRITE_PEND_OFFSET;
    405	}
    406
    407	t->sysc = OMAP_TIMER_OCP_CFG_OFFSET;
    408	t->load = regbase + _OMAP_TIMER_LOAD_OFFSET;
    409	t->counter = regbase + _OMAP_TIMER_COUNTER_OFFSET;
    410	t->ctrl = regbase + _OMAP_TIMER_CTRL_OFFSET;
    411	t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
    412	t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
    413
    414	dmtimer_systimer_reset(t);
    415	dmtimer_systimer_enable(t);
    416	pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
    417		 readl_relaxed(t->base + t->sysc));
    418
    419	return 0;
    420
    421err_unmap:
    422	iounmap(t->base);
    423
    424	return error;
    425}
    426
    427/* Clockevent */
    428static struct dmtimer_clockevent *
    429to_dmtimer_clockevent(struct clock_event_device *clockevent)
    430{
    431	return container_of(clockevent, struct dmtimer_clockevent, dev);
    432}
    433
    434static irqreturn_t dmtimer_clockevent_interrupt(int irq, void *data)
    435{
    436	struct dmtimer_clockevent *clkevt = data;
    437	struct dmtimer_systimer *t = &clkevt->t;
    438
    439	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
    440	clkevt->dev.event_handler(&clkevt->dev);
    441
    442	return IRQ_HANDLED;
    443}
    444
    445static int dmtimer_set_next_event(unsigned long cycles,
    446				  struct clock_event_device *evt)
    447{
    448	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
    449	struct dmtimer_systimer *t = &clkevt->t;
    450	void __iomem *pend = t->base + t->pend;
    451
    452	while (readl_relaxed(pend) & WP_TCRR)
    453		cpu_relax();
    454	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
    455
    456	while (readl_relaxed(pend) & WP_TCLR)
    457		cpu_relax();
    458	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
    459
    460	return 0;
    461}
    462
    463static int dmtimer_clockevent_shutdown(struct clock_event_device *evt)
    464{
    465	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
    466	struct dmtimer_systimer *t = &clkevt->t;
    467	void __iomem *ctrl = t->base + t->ctrl;
    468	u32 l;
    469
    470	l = readl_relaxed(ctrl);
    471	if (l & OMAP_TIMER_CTRL_ST) {
    472		l &= ~BIT(0);
    473		writel_relaxed(l, ctrl);
    474		/* Flush posted write */
    475		l = readl_relaxed(ctrl);
    476		/*  Wait for functional clock period x 3.5 */
    477		udelay(3500000 / t->rate + 1);
    478	}
    479	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
    480
    481	return 0;
    482}
    483
    484static int dmtimer_set_periodic(struct clock_event_device *evt)
    485{
    486	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
    487	struct dmtimer_systimer *t = &clkevt->t;
    488	void __iomem *pend = t->base + t->pend;
    489
    490	dmtimer_clockevent_shutdown(evt);
    491
    492	/* Looks like we need to first set the load value separately */
    493	while (readl_relaxed(pend) & WP_TLDR)
    494		cpu_relax();
    495	writel_relaxed(clkevt->period, t->base + t->load);
    496
    497	while (readl_relaxed(pend) & WP_TCRR)
    498		cpu_relax();
    499	writel_relaxed(clkevt->period, t->base + t->counter);
    500
    501	while (readl_relaxed(pend) & WP_TCLR)
    502		cpu_relax();
    503	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
    504		       t->base + t->ctrl);
    505
    506	return 0;
    507}
    508
    509static void omap_clockevent_idle(struct clock_event_device *evt)
    510{
    511	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
    512	struct dmtimer_systimer *t = &clkevt->t;
    513
    514	dmtimer_systimer_disable(t);
    515	clk_disable(t->fck);
    516}
    517
    518static void omap_clockevent_unidle(struct clock_event_device *evt)
    519{
    520	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
    521	struct dmtimer_systimer *t = &clkevt->t;
    522	int error;
    523
    524	error = clk_enable(t->fck);
    525	if (error)
    526		pr_err("could not enable timer fck on resume: %i\n", error);
    527
    528	dmtimer_systimer_enable(t);
    529	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
    530	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
    531}
    532
    533static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
    534					     struct device_node *np,
    535					     unsigned int features,
    536					     const struct cpumask *cpumask,
    537					     const char *name,
    538					     int rating)
    539{
    540	struct clock_event_device *dev;
    541	struct dmtimer_systimer *t;
    542	int error;
    543
    544	t = &clkevt->t;
    545	dev = &clkevt->dev;
    546
    547	/*
    548	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
    549	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
    550	 */
    551	dev->features = features;
    552	dev->rating = rating;
    553	dev->set_next_event = dmtimer_set_next_event;
    554	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
    555	dev->set_state_periodic = dmtimer_set_periodic;
    556	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
    557	dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
    558	dev->tick_resume = dmtimer_clockevent_shutdown;
    559	dev->cpumask = cpumask;
    560
    561	dev->irq = irq_of_parse_and_map(np, 0);
    562	if (!dev->irq)
    563		return -ENXIO;
    564
    565	error = dmtimer_systimer_setup(np, &clkevt->t);
    566	if (error)
    567		return error;
    568
    569	clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
    570
    571	/*
    572	 * For clock-event timers we never read the timer counter and
    573	 * so we are not impacted by errata i103 and i767. Therefore,
    574	 * we can safely ignore this errata for clock-event timers.
    575	 */
    576	writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
    577
    578	error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
    579			    IRQF_TIMER, name, clkevt);
    580	if (error)
    581		goto err_out_unmap;
    582
    583	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
    584	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
    585
    586	pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
    587		name, of_find_property(np, "ti,timer-alwon", NULL) ?
    588		"always-on " : "", t->rate, np->parent);
    589
    590	return 0;
    591
    592err_out_unmap:
    593	iounmap(t->base);
    594
    595	return error;
    596}
    597
    598static int __init dmtimer_clockevent_init(struct device_node *np)
    599{
    600	struct dmtimer_clockevent *clkevt;
    601	int error;
    602
    603	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
    604	if (!clkevt)
    605		return -ENOMEM;
    606
    607	error = dmtimer_clkevt_init_common(clkevt, np,
    608					   CLOCK_EVT_FEAT_PERIODIC |
    609					   CLOCK_EVT_FEAT_ONESHOT,
    610					   cpu_possible_mask, "clockevent",
    611					   300);
    612	if (error)
    613		goto err_out_free;
    614
    615	clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
    616					3, /* Timer internal resync latency */
    617					0xffffffff);
    618
    619	if (of_machine_is_compatible("ti,am33xx") ||
    620	    of_machine_is_compatible("ti,am43")) {
    621		clkevt->dev.suspend = omap_clockevent_idle;
    622		clkevt->dev.resume = omap_clockevent_unidle;
    623	}
    624
    625	return 0;
    626
    627err_out_free:
    628	kfree(clkevt);
    629
    630	return error;
    631}
    632
    633/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
    634static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
    635
    636static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
    637{
    638	struct dmtimer_clockevent *clkevt;
    639	int error;
    640
    641	if (!cpu_possible(cpu))
    642		return -EINVAL;
    643
    644	if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
    645	    !of_property_read_bool(np->parent, "ti,no-idle"))
    646		pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
    647
    648	clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
    649
    650	error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
    651					   cpumask_of(cpu), "percpu-dmtimer",
    652					   500);
    653	if (error)
    654		return error;
    655
    656	return 0;
    657}
    658
    659/* See TRM for timer internal resynch latency */
    660static int omap_dmtimer_starting_cpu(unsigned int cpu)
    661{
    662	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
    663	struct clock_event_device *dev = &clkevt->dev;
    664	struct dmtimer_systimer *t = &clkevt->t;
    665
    666	clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
    667	irq_force_affinity(dev->irq, cpumask_of(cpu));
    668
    669	return 0;
    670}
    671
    672static int __init dmtimer_percpu_timer_startup(void)
    673{
    674	struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
    675	struct dmtimer_systimer *t = &clkevt->t;
    676
    677	if (t->sysc) {
    678		cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
    679				  "clockevents/omap/gptimer:starting",
    680				  omap_dmtimer_starting_cpu, NULL);
    681	}
    682
    683	return 0;
    684}
    685subsys_initcall(dmtimer_percpu_timer_startup);
    686
    687static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
    688{
    689	struct device_node *arm_timer;
    690
    691	arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
    692	if (of_device_is_available(arm_timer)) {
    693		pr_warn_once("ARM architected timer wrap issue i940 detected\n");
    694		return 0;
    695	}
    696
    697	if (pa == 0x4882c000)           /* dra7 dmtimer15 */
    698		return dmtimer_percpu_timer_init(np, 0);
    699	else if (pa == 0x4882e000)      /* dra7 dmtimer16 */
    700		return dmtimer_percpu_timer_init(np, 1);
    701
    702	return 0;
    703}
    704
    705/* Clocksource */
    706static struct dmtimer_clocksource *
    707to_dmtimer_clocksource(struct clocksource *cs)
    708{
    709	return container_of(cs, struct dmtimer_clocksource, dev);
    710}
    711
    712static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
    713{
    714	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
    715	struct dmtimer_systimer *t = &clksrc->t;
    716
    717	return (u64)readl_relaxed(t->base + t->counter);
    718}
    719
    720static void __iomem *dmtimer_sched_clock_counter;
    721
    722static u64 notrace dmtimer_read_sched_clock(void)
    723{
    724	return readl_relaxed(dmtimer_sched_clock_counter);
    725}
    726
    727static void dmtimer_clocksource_suspend(struct clocksource *cs)
    728{
    729	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
    730	struct dmtimer_systimer *t = &clksrc->t;
    731
    732	clksrc->loadval = readl_relaxed(t->base + t->counter);
    733	dmtimer_systimer_disable(t);
    734	clk_disable(t->fck);
    735}
    736
    737static void dmtimer_clocksource_resume(struct clocksource *cs)
    738{
    739	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
    740	struct dmtimer_systimer *t = &clksrc->t;
    741	int error;
    742
    743	error = clk_enable(t->fck);
    744	if (error)
    745		pr_err("could not enable timer fck on resume: %i\n", error);
    746
    747	dmtimer_systimer_enable(t);
    748	writel_relaxed(clksrc->loadval, t->base + t->counter);
    749	writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
    750		       t->base + t->ctrl);
    751}
    752
    753static int __init dmtimer_clocksource_init(struct device_node *np)
    754{
    755	struct dmtimer_clocksource *clksrc;
    756	struct dmtimer_systimer *t;
    757	struct clocksource *dev;
    758	int error;
    759
    760	clksrc = kzalloc(sizeof(*clksrc), GFP_KERNEL);
    761	if (!clksrc)
    762		return -ENOMEM;
    763
    764	dev = &clksrc->dev;
    765	t = &clksrc->t;
    766
    767	error = dmtimer_systimer_setup(np, t);
    768	if (error)
    769		goto err_out_free;
    770
    771	dev->name = "dmtimer";
    772	dev->rating = 300;
    773	dev->read = dmtimer_clocksource_read_cycles;
    774	dev->mask = CLOCKSOURCE_MASK(32);
    775	dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
    776
    777	/* Unlike for clockevent, legacy code sets suspend only for am4 */
    778	if (of_machine_is_compatible("ti,am43")) {
    779		dev->suspend = dmtimer_clocksource_suspend;
    780		dev->resume = dmtimer_clocksource_resume;
    781	}
    782
    783	writel_relaxed(0, t->base + t->counter);
    784	writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
    785		       t->base + t->ctrl);
    786
    787	pr_info("TI gptimer clocksource: %s%pOF\n",
    788		of_find_property(np, "ti,timer-alwon", NULL) ?
    789		"always-on " : "", np->parent);
    790
    791	if (!dmtimer_sched_clock_counter) {
    792		dmtimer_sched_clock_counter = t->base + t->counter;
    793		sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
    794	}
    795
    796	if (clocksource_register_hz(dev, t->rate))
    797		pr_err("Could not register clocksource %pOF\n", np);
    798
    799	return 0;
    800
    801err_out_free:
    802	kfree(clksrc);
    803
    804	return -ENODEV;
    805}
    806
    807/*
    808 * To detect between a clocksource and clockevent, we assume the device tree
    809 * has no interrupts configured for a clocksource timer.
    810 */
    811static int __init dmtimer_systimer_init(struct device_node *np)
    812{
    813	const __be32 *addr;
    814	u32 pa;
    815
    816	/* One time init for the preferred timer configuration */
    817	if (!clocksource && !clockevent)
    818		dmtimer_systimer_select_best();
    819
    820	if (!clocksource && !clockevent) {
    821		pr_err("%s: unable to detect system timers, update dtb?\n",
    822		       __func__);
    823
    824		return -EINVAL;
    825	}
    826
    827	addr = of_get_address(np, 0, NULL, NULL);
    828	pa = of_translate_address(np, addr);
    829	if (!pa)
    830		return -EINVAL;
    831
    832	if (counter_32k <= 0 && clocksource == pa)
    833		return dmtimer_clocksource_init(np);
    834
    835	if (clockevent == pa)
    836		return dmtimer_clockevent_init(np);
    837
    838	if (of_machine_is_compatible("ti,dra7"))
    839		return dmtimer_percpu_quirk_init(np, pa);
    840
    841	return 0;
    842}
    843
    844TIMER_OF_DECLARE(systimer_omap2, "ti,omap2420-timer", dmtimer_systimer_init);
    845TIMER_OF_DECLARE(systimer_omap3, "ti,omap3430-timer", dmtimer_systimer_init);
    846TIMER_OF_DECLARE(systimer_omap4, "ti,omap4430-timer", dmtimer_systimer_init);
    847TIMER_OF_DECLARE(systimer_omap5, "ti,omap5430-timer", dmtimer_systimer_init);
    848TIMER_OF_DECLARE(systimer_am33x, "ti,am335x-timer", dmtimer_systimer_init);
    849TIMER_OF_DECLARE(systimer_am3ms, "ti,am335x-timer-1ms", dmtimer_systimer_init);
    850TIMER_OF_DECLARE(systimer_dm814, "ti,dm814-timer", dmtimer_systimer_init);
    851TIMER_OF_DECLARE(systimer_dm816, "ti,dm816-timer", dmtimer_systimer_init);