cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

timer-qcom.c (6136B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *
      4 * Copyright (C) 2007 Google, Inc.
      5 * Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
      6 */
      7
      8#include <linux/clocksource.h>
      9#include <linux/clockchips.h>
     10#include <linux/cpu.h>
     11#include <linux/init.h>
     12#include <linux/interrupt.h>
     13#include <linux/irq.h>
     14#include <linux/io.h>
     15#include <linux/of.h>
     16#include <linux/of_address.h>
     17#include <linux/of_irq.h>
     18#include <linux/sched_clock.h>
     19
     20#include <asm/delay.h>
     21
     22#define TIMER_MATCH_VAL			0x0000
     23#define TIMER_COUNT_VAL			0x0004
     24#define TIMER_ENABLE			0x0008
     25#define TIMER_ENABLE_CLR_ON_MATCH_EN	BIT(1)
     26#define TIMER_ENABLE_EN			BIT(0)
     27#define TIMER_CLEAR			0x000C
     28#define DGT_CLK_CTL			0x10
     29#define DGT_CLK_CTL_DIV_4		0x3
     30#define TIMER_STS_GPT0_CLR_PEND		BIT(10)
     31
     32#define GPT_HZ 32768
     33
     34static void __iomem *event_base;
     35static void __iomem *sts_base;
     36
     37static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
     38{
     39	struct clock_event_device *evt = dev_id;
     40	/* Stop the timer tick */
     41	if (clockevent_state_oneshot(evt)) {
     42		u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
     43		ctrl &= ~TIMER_ENABLE_EN;
     44		writel_relaxed(ctrl, event_base + TIMER_ENABLE);
     45	}
     46	evt->event_handler(evt);
     47	return IRQ_HANDLED;
     48}
     49
     50static int msm_timer_set_next_event(unsigned long cycles,
     51				    struct clock_event_device *evt)
     52{
     53	u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
     54
     55	ctrl &= ~TIMER_ENABLE_EN;
     56	writel_relaxed(ctrl, event_base + TIMER_ENABLE);
     57
     58	writel_relaxed(ctrl, event_base + TIMER_CLEAR);
     59	writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
     60
     61	if (sts_base)
     62		while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
     63			cpu_relax();
     64
     65	writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
     66	return 0;
     67}
     68
     69static int msm_timer_shutdown(struct clock_event_device *evt)
     70{
     71	u32 ctrl;
     72
     73	ctrl = readl_relaxed(event_base + TIMER_ENABLE);
     74	ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
     75	writel_relaxed(ctrl, event_base + TIMER_ENABLE);
     76	return 0;
     77}
     78
     79static struct clock_event_device __percpu *msm_evt;
     80
     81static void __iomem *source_base;
     82
     83static notrace u64 msm_read_timer_count(struct clocksource *cs)
     84{
     85	return readl_relaxed(source_base + TIMER_COUNT_VAL);
     86}
     87
     88static struct clocksource msm_clocksource = {
     89	.name	= "dg_timer",
     90	.rating	= 300,
     91	.read	= msm_read_timer_count,
     92	.mask	= CLOCKSOURCE_MASK(32),
     93	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
     94};
     95
     96static int msm_timer_irq;
     97static int msm_timer_has_ppi;
     98
     99static int msm_local_timer_starting_cpu(unsigned int cpu)
    100{
    101	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
    102	int err;
    103
    104	evt->irq = msm_timer_irq;
    105	evt->name = "msm_timer";
    106	evt->features = CLOCK_EVT_FEAT_ONESHOT;
    107	evt->rating = 200;
    108	evt->set_state_shutdown = msm_timer_shutdown;
    109	evt->set_state_oneshot = msm_timer_shutdown;
    110	evt->tick_resume = msm_timer_shutdown;
    111	evt->set_next_event = msm_timer_set_next_event;
    112	evt->cpumask = cpumask_of(cpu);
    113
    114	clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
    115
    116	if (msm_timer_has_ppi) {
    117		enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
    118	} else {
    119		err = request_irq(evt->irq, msm_timer_interrupt,
    120				IRQF_TIMER | IRQF_NOBALANCING |
    121				IRQF_TRIGGER_RISING, "gp_timer", evt);
    122		if (err)
    123			pr_err("request_irq failed\n");
    124	}
    125
    126	return 0;
    127}
    128
    129static int msm_local_timer_dying_cpu(unsigned int cpu)
    130{
    131	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
    132
    133	evt->set_state_shutdown(evt);
    134	disable_percpu_irq(evt->irq);
    135	return 0;
    136}
    137
    138static u64 notrace msm_sched_clock_read(void)
    139{
    140	return msm_clocksource.read(&msm_clocksource);
    141}
    142
    143static unsigned long msm_read_current_timer(void)
    144{
    145	return msm_clocksource.read(&msm_clocksource);
    146}
    147
    148static struct delay_timer msm_delay_timer = {
    149	.read_current_timer = msm_read_current_timer,
    150};
    151
    152static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
    153				  bool percpu)
    154{
    155	struct clocksource *cs = &msm_clocksource;
    156	int res = 0;
    157
    158	msm_timer_irq = irq;
    159	msm_timer_has_ppi = percpu;
    160
    161	msm_evt = alloc_percpu(struct clock_event_device);
    162	if (!msm_evt) {
    163		pr_err("memory allocation failed for clockevents\n");
    164		goto err;
    165	}
    166
    167	if (percpu)
    168		res = request_percpu_irq(irq, msm_timer_interrupt,
    169					 "gp_timer", msm_evt);
    170
    171	if (res) {
    172		pr_err("request_percpu_irq failed\n");
    173	} else {
    174		/* Install and invoke hotplug callbacks */
    175		res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
    176					"clockevents/qcom/timer:starting",
    177					msm_local_timer_starting_cpu,
    178					msm_local_timer_dying_cpu);
    179		if (res) {
    180			free_percpu_irq(irq, msm_evt);
    181			goto err;
    182		}
    183	}
    184
    185err:
    186	writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
    187	res = clocksource_register_hz(cs, dgt_hz);
    188	if (res)
    189		pr_err("clocksource_register failed\n");
    190	sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
    191	msm_delay_timer.freq = dgt_hz;
    192	register_current_timer_delay(&msm_delay_timer);
    193
    194	return res;
    195}
    196
    197static int __init msm_dt_timer_init(struct device_node *np)
    198{
    199	u32 freq;
    200	int irq, ret;
    201	struct resource res;
    202	u32 percpu_offset;
    203	void __iomem *base;
    204	void __iomem *cpu0_base;
    205
    206	base = of_iomap(np, 0);
    207	if (!base) {
    208		pr_err("Failed to map event base\n");
    209		return -ENXIO;
    210	}
    211
    212	/* We use GPT0 for the clockevent */
    213	irq = irq_of_parse_and_map(np, 1);
    214	if (irq <= 0) {
    215		pr_err("Can't get irq\n");
    216		return -EINVAL;
    217	}
    218
    219	/* We use CPU0's DGT for the clocksource */
    220	if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
    221		percpu_offset = 0;
    222
    223	ret = of_address_to_resource(np, 0, &res);
    224	if (ret) {
    225		pr_err("Failed to parse DGT resource\n");
    226		return ret;
    227	}
    228
    229	cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
    230	if (!cpu0_base) {
    231		pr_err("Failed to map source base\n");
    232		return -EINVAL;
    233	}
    234
    235	if (of_property_read_u32(np, "clock-frequency", &freq)) {
    236		pr_err("Unknown frequency\n");
    237		return -EINVAL;
    238	}
    239
    240	event_base = base + 0x4;
    241	sts_base = base + 0x88;
    242	source_base = cpu0_base + 0x24;
    243	freq /= 4;
    244	writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
    245
    246	return msm_timer_init(freq, 32, irq, !!percpu_offset);
    247}
    248TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
    249TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);