cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sh_mtu2.c (12439B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * SuperH Timer Support - MTU2
      4 *
      5 *  Copyright (C) 2009 Magnus Damm
      6 */
      7
      8#include <linux/clk.h>
      9#include <linux/clockchips.h>
     10#include <linux/delay.h>
     11#include <linux/err.h>
     12#include <linux/init.h>
     13#include <linux/interrupt.h>
     14#include <linux/io.h>
     15#include <linux/ioport.h>
     16#include <linux/irq.h>
     17#include <linux/module.h>
     18#include <linux/of.h>
     19#include <linux/platform_device.h>
     20#include <linux/pm_domain.h>
     21#include <linux/pm_runtime.h>
     22#include <linux/sh_timer.h>
     23#include <linux/slab.h>
     24#include <linux/spinlock.h>
     25
     26#ifdef CONFIG_SUPERH
     27#include <asm/platform_early.h>
     28#endif
     29
     30struct sh_mtu2_device;
     31
     32struct sh_mtu2_channel {
     33	struct sh_mtu2_device *mtu;
     34	unsigned int index;
     35
     36	void __iomem *base;
     37
     38	struct clock_event_device ced;
     39};
     40
     41struct sh_mtu2_device {
     42	struct platform_device *pdev;
     43
     44	void __iomem *mapbase;
     45	struct clk *clk;
     46
     47	raw_spinlock_t lock; /* Protect the shared registers */
     48
     49	struct sh_mtu2_channel *channels;
     50	unsigned int num_channels;
     51
     52	bool has_clockevent;
     53};
     54
     55#define TSTR -1 /* shared register */
     56#define TCR  0 /* channel register */
     57#define TMDR 1 /* channel register */
     58#define TIOR 2 /* channel register */
     59#define TIER 3 /* channel register */
     60#define TSR  4 /* channel register */
     61#define TCNT 5 /* channel register */
     62#define TGR  6 /* channel register */
     63
     64#define TCR_CCLR_NONE		(0 << 5)
     65#define TCR_CCLR_TGRA		(1 << 5)
     66#define TCR_CCLR_TGRB		(2 << 5)
     67#define TCR_CCLR_SYNC		(3 << 5)
     68#define TCR_CCLR_TGRC		(5 << 5)
     69#define TCR_CCLR_TGRD		(6 << 5)
     70#define TCR_CCLR_MASK		(7 << 5)
     71#define TCR_CKEG_RISING		(0 << 3)
     72#define TCR_CKEG_FALLING	(1 << 3)
     73#define TCR_CKEG_BOTH		(2 << 3)
     74#define TCR_CKEG_MASK		(3 << 3)
     75/* Values 4 to 7 are channel-dependent */
     76#define TCR_TPSC_P1		(0 << 0)
     77#define TCR_TPSC_P4		(1 << 0)
     78#define TCR_TPSC_P16		(2 << 0)
     79#define TCR_TPSC_P64		(3 << 0)
     80#define TCR_TPSC_CH0_TCLKA	(4 << 0)
     81#define TCR_TPSC_CH0_TCLKB	(5 << 0)
     82#define TCR_TPSC_CH0_TCLKC	(6 << 0)
     83#define TCR_TPSC_CH0_TCLKD	(7 << 0)
     84#define TCR_TPSC_CH1_TCLKA	(4 << 0)
     85#define TCR_TPSC_CH1_TCLKB	(5 << 0)
     86#define TCR_TPSC_CH1_P256	(6 << 0)
     87#define TCR_TPSC_CH1_TCNT2	(7 << 0)
     88#define TCR_TPSC_CH2_TCLKA	(4 << 0)
     89#define TCR_TPSC_CH2_TCLKB	(5 << 0)
     90#define TCR_TPSC_CH2_TCLKC	(6 << 0)
     91#define TCR_TPSC_CH2_P1024	(7 << 0)
     92#define TCR_TPSC_CH34_P256	(4 << 0)
     93#define TCR_TPSC_CH34_P1024	(5 << 0)
     94#define TCR_TPSC_CH34_TCLKA	(6 << 0)
     95#define TCR_TPSC_CH34_TCLKB	(7 << 0)
     96#define TCR_TPSC_MASK		(7 << 0)
     97
     98#define TMDR_BFE		(1 << 6)
     99#define TMDR_BFB		(1 << 5)
    100#define TMDR_BFA		(1 << 4)
    101#define TMDR_MD_NORMAL		(0 << 0)
    102#define TMDR_MD_PWM_1		(2 << 0)
    103#define TMDR_MD_PWM_2		(3 << 0)
    104#define TMDR_MD_PHASE_1		(4 << 0)
    105#define TMDR_MD_PHASE_2		(5 << 0)
    106#define TMDR_MD_PHASE_3		(6 << 0)
    107#define TMDR_MD_PHASE_4		(7 << 0)
    108#define TMDR_MD_PWM_SYNC	(8 << 0)
    109#define TMDR_MD_PWM_COMP_CREST	(13 << 0)
    110#define TMDR_MD_PWM_COMP_TROUGH	(14 << 0)
    111#define TMDR_MD_PWM_COMP_BOTH	(15 << 0)
    112#define TMDR_MD_MASK		(15 << 0)
    113
    114#define TIOC_IOCH(n)		((n) << 4)
    115#define TIOC_IOCL(n)		((n) << 0)
    116#define TIOR_OC_RETAIN		(0 << 0)
    117#define TIOR_OC_0_CLEAR		(1 << 0)
    118#define TIOR_OC_0_SET		(2 << 0)
    119#define TIOR_OC_0_TOGGLE	(3 << 0)
    120#define TIOR_OC_1_CLEAR		(5 << 0)
    121#define TIOR_OC_1_SET		(6 << 0)
    122#define TIOR_OC_1_TOGGLE	(7 << 0)
    123#define TIOR_IC_RISING		(8 << 0)
    124#define TIOR_IC_FALLING		(9 << 0)
    125#define TIOR_IC_BOTH		(10 << 0)
    126#define TIOR_IC_TCNT		(12 << 0)
    127#define TIOR_MASK		(15 << 0)
    128
    129#define TIER_TTGE		(1 << 7)
    130#define TIER_TTGE2		(1 << 6)
    131#define TIER_TCIEU		(1 << 5)
    132#define TIER_TCIEV		(1 << 4)
    133#define TIER_TGIED		(1 << 3)
    134#define TIER_TGIEC		(1 << 2)
    135#define TIER_TGIEB		(1 << 1)
    136#define TIER_TGIEA		(1 << 0)
    137
    138#define TSR_TCFD		(1 << 7)
    139#define TSR_TCFU		(1 << 5)
    140#define TSR_TCFV		(1 << 4)
    141#define TSR_TGFD		(1 << 3)
    142#define TSR_TGFC		(1 << 2)
    143#define TSR_TGFB		(1 << 1)
    144#define TSR_TGFA		(1 << 0)
    145
    146static unsigned long mtu2_reg_offs[] = {
    147	[TCR] = 0,
    148	[TMDR] = 1,
    149	[TIOR] = 2,
    150	[TIER] = 4,
    151	[TSR] = 5,
    152	[TCNT] = 6,
    153	[TGR] = 8,
    154};
    155
    156static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
    157{
    158	unsigned long offs;
    159
    160	if (reg_nr == TSTR)
    161		return ioread8(ch->mtu->mapbase + 0x280);
    162
    163	offs = mtu2_reg_offs[reg_nr];
    164
    165	if ((reg_nr == TCNT) || (reg_nr == TGR))
    166		return ioread16(ch->base + offs);
    167	else
    168		return ioread8(ch->base + offs);
    169}
    170
    171static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
    172				unsigned long value)
    173{
    174	unsigned long offs;
    175
    176	if (reg_nr == TSTR)
    177		return iowrite8(value, ch->mtu->mapbase + 0x280);
    178
    179	offs = mtu2_reg_offs[reg_nr];
    180
    181	if ((reg_nr == TCNT) || (reg_nr == TGR))
    182		iowrite16(value, ch->base + offs);
    183	else
    184		iowrite8(value, ch->base + offs);
    185}
    186
    187static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
    188{
    189	unsigned long flags, value;
    190
    191	/* start stop register shared by multiple timer channels */
    192	raw_spin_lock_irqsave(&ch->mtu->lock, flags);
    193	value = sh_mtu2_read(ch, TSTR);
    194
    195	if (start)
    196		value |= 1 << ch->index;
    197	else
    198		value &= ~(1 << ch->index);
    199
    200	sh_mtu2_write(ch, TSTR, value);
    201	raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
    202}
    203
    204static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
    205{
    206	unsigned long periodic;
    207	unsigned long rate;
    208	int ret;
    209
    210	pm_runtime_get_sync(&ch->mtu->pdev->dev);
    211	dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
    212
    213	/* enable clock */
    214	ret = clk_enable(ch->mtu->clk);
    215	if (ret) {
    216		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
    217			ch->index);
    218		return ret;
    219	}
    220
    221	/* make sure channel is disabled */
    222	sh_mtu2_start_stop_ch(ch, 0);
    223
    224	rate = clk_get_rate(ch->mtu->clk) / 64;
    225	periodic = (rate + HZ/2) / HZ;
    226
    227	/*
    228	 * "Periodic Counter Operation"
    229	 * Clear on TGRA compare match, divide clock by 64.
    230	 */
    231	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
    232	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
    233		      TIOC_IOCL(TIOR_OC_0_CLEAR));
    234	sh_mtu2_write(ch, TGR, periodic);
    235	sh_mtu2_write(ch, TCNT, 0);
    236	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
    237	sh_mtu2_write(ch, TIER, TIER_TGIEA);
    238
    239	/* enable channel */
    240	sh_mtu2_start_stop_ch(ch, 1);
    241
    242	return 0;
    243}
    244
    245static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
    246{
    247	/* disable channel */
    248	sh_mtu2_start_stop_ch(ch, 0);
    249
    250	/* stop clock */
    251	clk_disable(ch->mtu->clk);
    252
    253	dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
    254	pm_runtime_put(&ch->mtu->pdev->dev);
    255}
    256
    257static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
    258{
    259	struct sh_mtu2_channel *ch = dev_id;
    260
    261	/* acknowledge interrupt */
    262	sh_mtu2_read(ch, TSR);
    263	sh_mtu2_write(ch, TSR, ~TSR_TGFA);
    264
    265	/* notify clockevent layer */
    266	ch->ced.event_handler(&ch->ced);
    267	return IRQ_HANDLED;
    268}
    269
    270static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
    271{
    272	return container_of(ced, struct sh_mtu2_channel, ced);
    273}
    274
    275static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
    276{
    277	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
    278
    279	if (clockevent_state_periodic(ced))
    280		sh_mtu2_disable(ch);
    281
    282	return 0;
    283}
    284
    285static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
    286{
    287	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
    288
    289	if (clockevent_state_periodic(ced))
    290		sh_mtu2_disable(ch);
    291
    292	dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
    293		 ch->index);
    294	sh_mtu2_enable(ch);
    295	return 0;
    296}
    297
    298static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
    299{
    300	dev_pm_genpd_suspend(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
    301}
    302
    303static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
    304{
    305	dev_pm_genpd_resume(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
    306}
    307
    308static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
    309					const char *name)
    310{
    311	struct clock_event_device *ced = &ch->ced;
    312
    313	ced->name = name;
    314	ced->features = CLOCK_EVT_FEAT_PERIODIC;
    315	ced->rating = 200;
    316	ced->cpumask = cpu_possible_mask;
    317	ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
    318	ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
    319	ced->suspend = sh_mtu2_clock_event_suspend;
    320	ced->resume = sh_mtu2_clock_event_resume;
    321
    322	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
    323		 ch->index);
    324	clockevents_register_device(ced);
    325}
    326
    327static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
    328{
    329	ch->mtu->has_clockevent = true;
    330	sh_mtu2_register_clockevent(ch, name);
    331
    332	return 0;
    333}
    334
    335static const unsigned int sh_mtu2_channel_offsets[] = {
    336	0x300, 0x380, 0x000,
    337};
    338
    339static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
    340				 struct sh_mtu2_device *mtu)
    341{
    342	char name[6];
    343	int irq;
    344	int ret;
    345
    346	ch->mtu = mtu;
    347
    348	sprintf(name, "tgi%ua", index);
    349	irq = platform_get_irq_byname(mtu->pdev, name);
    350	if (irq < 0) {
    351		/* Skip channels with no declared interrupt. */
    352		return 0;
    353	}
    354
    355	ret = request_irq(irq, sh_mtu2_interrupt,
    356			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
    357			  dev_name(&ch->mtu->pdev->dev), ch);
    358	if (ret) {
    359		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
    360			index, irq);
    361		return ret;
    362	}
    363
    364	ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
    365	ch->index = index;
    366
    367	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
    368}
    369
    370static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
    371{
    372	struct resource *res;
    373
    374	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
    375	if (!res) {
    376		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
    377		return -ENXIO;
    378	}
    379
    380	mtu->mapbase = ioremap(res->start, resource_size(res));
    381	if (mtu->mapbase == NULL)
    382		return -ENXIO;
    383
    384	return 0;
    385}
    386
    387static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
    388			 struct platform_device *pdev)
    389{
    390	unsigned int i;
    391	int ret;
    392
    393	mtu->pdev = pdev;
    394
    395	raw_spin_lock_init(&mtu->lock);
    396
    397	/* Get hold of clock. */
    398	mtu->clk = clk_get(&mtu->pdev->dev, "fck");
    399	if (IS_ERR(mtu->clk)) {
    400		dev_err(&mtu->pdev->dev, "cannot get clock\n");
    401		return PTR_ERR(mtu->clk);
    402	}
    403
    404	ret = clk_prepare(mtu->clk);
    405	if (ret < 0)
    406		goto err_clk_put;
    407
    408	/* Map the memory resource. */
    409	ret = sh_mtu2_map_memory(mtu);
    410	if (ret < 0) {
    411		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
    412		goto err_clk_unprepare;
    413	}
    414
    415	/* Allocate and setup the channels. */
    416	ret = platform_irq_count(pdev);
    417	if (ret < 0)
    418		goto err_unmap;
    419
    420	mtu->num_channels = min_t(unsigned int, ret,
    421				  ARRAY_SIZE(sh_mtu2_channel_offsets));
    422
    423	mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
    424				GFP_KERNEL);
    425	if (mtu->channels == NULL) {
    426		ret = -ENOMEM;
    427		goto err_unmap;
    428	}
    429
    430	for (i = 0; i < mtu->num_channels; ++i) {
    431		ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
    432		if (ret < 0)
    433			goto err_unmap;
    434	}
    435
    436	platform_set_drvdata(pdev, mtu);
    437
    438	return 0;
    439
    440err_unmap:
    441	kfree(mtu->channels);
    442	iounmap(mtu->mapbase);
    443err_clk_unprepare:
    444	clk_unprepare(mtu->clk);
    445err_clk_put:
    446	clk_put(mtu->clk);
    447	return ret;
    448}
    449
    450static int sh_mtu2_probe(struct platform_device *pdev)
    451{
    452	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
    453	int ret;
    454
    455	if (!is_sh_early_platform_device(pdev)) {
    456		pm_runtime_set_active(&pdev->dev);
    457		pm_runtime_enable(&pdev->dev);
    458	}
    459
    460	if (mtu) {
    461		dev_info(&pdev->dev, "kept as earlytimer\n");
    462		goto out;
    463	}
    464
    465	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
    466	if (mtu == NULL)
    467		return -ENOMEM;
    468
    469	ret = sh_mtu2_setup(mtu, pdev);
    470	if (ret) {
    471		kfree(mtu);
    472		pm_runtime_idle(&pdev->dev);
    473		return ret;
    474	}
    475	if (is_sh_early_platform_device(pdev))
    476		return 0;
    477
    478 out:
    479	if (mtu->has_clockevent)
    480		pm_runtime_irq_safe(&pdev->dev);
    481	else
    482		pm_runtime_idle(&pdev->dev);
    483
    484	return 0;
    485}
    486
    487static int sh_mtu2_remove(struct platform_device *pdev)
    488{
    489	return -EBUSY; /* cannot unregister clockevent */
    490}
    491
    492static const struct platform_device_id sh_mtu2_id_table[] = {
    493	{ "sh-mtu2", 0 },
    494	{ },
    495};
    496MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
    497
    498static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
    499	{ .compatible = "renesas,mtu2" },
    500	{ }
    501};
    502MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
    503
    504static struct platform_driver sh_mtu2_device_driver = {
    505	.probe		= sh_mtu2_probe,
    506	.remove		= sh_mtu2_remove,
    507	.driver		= {
    508		.name	= "sh_mtu2",
    509		.of_match_table = of_match_ptr(sh_mtu2_of_table),
    510	},
    511	.id_table	= sh_mtu2_id_table,
    512};
    513
    514static int __init sh_mtu2_init(void)
    515{
    516	return platform_driver_register(&sh_mtu2_device_driver);
    517}
    518
    519static void __exit sh_mtu2_exit(void)
    520{
    521	platform_driver_unregister(&sh_mtu2_device_driver);
    522}
    523
    524#ifdef CONFIG_SUPERH
    525sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
    526#endif
    527
    528subsys_initcall(sh_mtu2_init);
    529module_exit(sh_mtu2_exit);
    530
    531MODULE_AUTHOR("Magnus Damm");
    532MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
    533MODULE_LICENSE("GPL v2");