cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ravb_ptp.c (8820B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/* PTP 1588 clock using the Renesas Ethernet AVB
      3 *
      4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
      5 * Copyright (C) 2015 Renesas Solutions Corp.
      6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
      7 */
      8
      9#include "ravb.h"
     10
     11static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
     12{
     13	struct net_device *ndev = priv->ndev;
     14	int error;
     15
     16	error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
     17	if (error)
     18		return error;
     19
     20	ravb_modify(ndev, GCCR, request, request);
     21	return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
     22}
     23
     24/* Caller must hold the lock */
     25static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
     26{
     27	struct net_device *ndev = priv->ndev;
     28	int error;
     29
     30	error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
     31	if (error)
     32		return error;
     33
     34	ts->tv_nsec = ravb_read(ndev, GCT0);
     35	ts->tv_sec  = ravb_read(ndev, GCT1) |
     36		((s64)ravb_read(ndev, GCT2) << 32);
     37
     38	return 0;
     39}
     40
     41/* Caller must hold the lock */
     42static int ravb_ptp_time_write(struct ravb_private *priv,
     43				const struct timespec64 *ts)
     44{
     45	struct net_device *ndev = priv->ndev;
     46	int error;
     47	u32 gccr;
     48
     49	error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
     50	if (error)
     51		return error;
     52
     53	gccr = ravb_read(ndev, GCCR);
     54	if (gccr & GCCR_LTO)
     55		return -EBUSY;
     56	ravb_write(ndev, ts->tv_nsec, GTO0);
     57	ravb_write(ndev, ts->tv_sec,  GTO1);
     58	ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
     59	ravb_write(ndev, gccr | GCCR_LTO, GCCR);
     60
     61	return 0;
     62}
     63
     64/* Caller must hold the lock */
     65static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
     66{
     67	struct net_device *ndev = priv->ndev;
     68	/* When the comparison value (GPTC.PTCV) is in range of
     69	 * [x-1 to x+1] (x is the configured increment value in
     70	 * GTI.TIV), it may happen that a comparison match is
     71	 * not detected when the timer wraps around.
     72	 */
     73	u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
     74	u32 gccr;
     75
     76	if (ns < gti_ns_plus_1)
     77		ns = gti_ns_plus_1;
     78	else if (ns > 0 - gti_ns_plus_1)
     79		ns = 0 - gti_ns_plus_1;
     80
     81	gccr = ravb_read(ndev, GCCR);
     82	if (gccr & GCCR_LPTC)
     83		return -EBUSY;
     84	ravb_write(ndev, ns, GPTC);
     85	ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
     86
     87	return 0;
     88}
     89
     90/* PTP clock operations */
     91static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
     92{
     93	struct ravb_private *priv = container_of(ptp, struct ravb_private,
     94						 ptp.info);
     95	struct net_device *ndev = priv->ndev;
     96	unsigned long flags;
     97	u32 diff, addend;
     98	bool neg_adj = false;
     99	u32 gccr;
    100
    101	if (ppb < 0) {
    102		neg_adj = true;
    103		ppb = -ppb;
    104	}
    105	addend = priv->ptp.default_addend;
    106	diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
    107
    108	addend = neg_adj ? addend - diff : addend + diff;
    109
    110	spin_lock_irqsave(&priv->lock, flags);
    111
    112	priv->ptp.current_addend = addend;
    113
    114	gccr = ravb_read(ndev, GCCR);
    115	if (gccr & GCCR_LTI) {
    116		spin_unlock_irqrestore(&priv->lock, flags);
    117		return -EBUSY;
    118	}
    119	ravb_write(ndev, addend & GTI_TIV, GTI);
    120	ravb_write(ndev, gccr | GCCR_LTI, GCCR);
    121
    122	spin_unlock_irqrestore(&priv->lock, flags);
    123
    124	return 0;
    125}
    126
    127static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
    128{
    129	struct ravb_private *priv = container_of(ptp, struct ravb_private,
    130						 ptp.info);
    131	struct timespec64 ts;
    132	unsigned long flags;
    133	int error;
    134
    135	spin_lock_irqsave(&priv->lock, flags);
    136	error = ravb_ptp_time_read(priv, &ts);
    137	if (!error) {
    138		u64 now = ktime_to_ns(timespec64_to_ktime(ts));
    139
    140		ts = ns_to_timespec64(now + delta);
    141		error = ravb_ptp_time_write(priv, &ts);
    142	}
    143	spin_unlock_irqrestore(&priv->lock, flags);
    144
    145	return error;
    146}
    147
    148static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
    149{
    150	struct ravb_private *priv = container_of(ptp, struct ravb_private,
    151						 ptp.info);
    152	unsigned long flags;
    153	int error;
    154
    155	spin_lock_irqsave(&priv->lock, flags);
    156	error = ravb_ptp_time_read(priv, ts);
    157	spin_unlock_irqrestore(&priv->lock, flags);
    158
    159	return error;
    160}
    161
    162static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
    163			      const struct timespec64 *ts)
    164{
    165	struct ravb_private *priv = container_of(ptp, struct ravb_private,
    166						 ptp.info);
    167	unsigned long flags;
    168	int error;
    169
    170	spin_lock_irqsave(&priv->lock, flags);
    171	error = ravb_ptp_time_write(priv, ts);
    172	spin_unlock_irqrestore(&priv->lock, flags);
    173
    174	return error;
    175}
    176
    177static int ravb_ptp_extts(struct ptp_clock_info *ptp,
    178			  struct ptp_extts_request *req, int on)
    179{
    180	struct ravb_private *priv = container_of(ptp, struct ravb_private,
    181						 ptp.info);
    182	const struct ravb_hw_info *info = priv->info;
    183	struct net_device *ndev = priv->ndev;
    184	unsigned long flags;
    185
    186	/* Reject requests with unsupported flags */
    187	if (req->flags & ~(PTP_ENABLE_FEATURE |
    188			   PTP_RISING_EDGE |
    189			   PTP_FALLING_EDGE |
    190			   PTP_STRICT_FLAGS))
    191		return -EOPNOTSUPP;
    192
    193	if (req->index)
    194		return -EINVAL;
    195
    196	if (priv->ptp.extts[req->index] == on)
    197		return 0;
    198	priv->ptp.extts[req->index] = on;
    199
    200	spin_lock_irqsave(&priv->lock, flags);
    201	if (!info->irq_en_dis)
    202		ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
    203	else if (on)
    204		ravb_write(ndev, GIE_PTCS, GIE);
    205	else
    206		ravb_write(ndev, GID_PTCD, GID);
    207	spin_unlock_irqrestore(&priv->lock, flags);
    208
    209	return 0;
    210}
    211
    212static int ravb_ptp_perout(struct ptp_clock_info *ptp,
    213			   struct ptp_perout_request *req, int on)
    214{
    215	struct ravb_private *priv = container_of(ptp, struct ravb_private,
    216						 ptp.info);
    217	const struct ravb_hw_info *info = priv->info;
    218	struct net_device *ndev = priv->ndev;
    219	struct ravb_ptp_perout *perout;
    220	unsigned long flags;
    221	int error = 0;
    222
    223	/* Reject requests with unsupported flags */
    224	if (req->flags)
    225		return -EOPNOTSUPP;
    226
    227	if (req->index)
    228		return -EINVAL;
    229
    230	if (on) {
    231		u64 start_ns;
    232		u64 period_ns;
    233
    234		start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
    235		period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
    236
    237		if (start_ns > U32_MAX) {
    238			netdev_warn(ndev,
    239				    "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
    240			return -ERANGE;
    241		}
    242
    243		if (period_ns > U32_MAX) {
    244			netdev_warn(ndev,
    245				    "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
    246			return -ERANGE;
    247		}
    248
    249		spin_lock_irqsave(&priv->lock, flags);
    250
    251		perout = &priv->ptp.perout[req->index];
    252		perout->target = (u32)start_ns;
    253		perout->period = (u32)period_ns;
    254		error = ravb_ptp_update_compare(priv, (u32)start_ns);
    255		if (!error) {
    256			/* Unmask interrupt */
    257			if (!info->irq_en_dis)
    258				ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
    259			else
    260				ravb_write(ndev, GIE_PTMS0, GIE);
    261		}
    262	} else	{
    263		spin_lock_irqsave(&priv->lock, flags);
    264
    265		perout = &priv->ptp.perout[req->index];
    266		perout->period = 0;
    267
    268		/* Mask interrupt */
    269		if (!info->irq_en_dis)
    270			ravb_modify(ndev, GIC, GIC_PTME, 0);
    271		else
    272			ravb_write(ndev, GID_PTMD0, GID);
    273	}
    274	spin_unlock_irqrestore(&priv->lock, flags);
    275
    276	return error;
    277}
    278
    279static int ravb_ptp_enable(struct ptp_clock_info *ptp,
    280			   struct ptp_clock_request *req, int on)
    281{
    282	switch (req->type) {
    283	case PTP_CLK_REQ_EXTTS:
    284		return ravb_ptp_extts(ptp, &req->extts, on);
    285	case PTP_CLK_REQ_PEROUT:
    286		return ravb_ptp_perout(ptp, &req->perout, on);
    287	default:
    288		return -EOPNOTSUPP;
    289	}
    290}
    291
    292static const struct ptp_clock_info ravb_ptp_info = {
    293	.owner		= THIS_MODULE,
    294	.name		= "ravb clock",
    295	.max_adj	= 50000000,
    296	.n_ext_ts	= N_EXT_TS,
    297	.n_per_out	= N_PER_OUT,
    298	.adjfreq	= ravb_ptp_adjfreq,
    299	.adjtime	= ravb_ptp_adjtime,
    300	.gettime64	= ravb_ptp_gettime64,
    301	.settime64	= ravb_ptp_settime64,
    302	.enable		= ravb_ptp_enable,
    303};
    304
    305/* Caller must hold the lock */
    306void ravb_ptp_interrupt(struct net_device *ndev)
    307{
    308	struct ravb_private *priv = netdev_priv(ndev);
    309	u32 gis = ravb_read(ndev, GIS);
    310
    311	gis &= ravb_read(ndev, GIC);
    312	if (gis & GIS_PTCF) {
    313		struct ptp_clock_event event;
    314
    315		event.type = PTP_CLOCK_EXTTS;
    316		event.index = 0;
    317		event.timestamp = ravb_read(ndev, GCPT);
    318		ptp_clock_event(priv->ptp.clock, &event);
    319	}
    320	if (gis & GIS_PTMF) {
    321		struct ravb_ptp_perout *perout = priv->ptp.perout;
    322
    323		if (perout->period) {
    324			perout->target += perout->period;
    325			ravb_ptp_update_compare(priv, perout->target);
    326		}
    327	}
    328
    329	ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
    330}
    331
    332void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
    333{
    334	struct ravb_private *priv = netdev_priv(ndev);
    335	unsigned long flags;
    336
    337	priv->ptp.info = ravb_ptp_info;
    338
    339	priv->ptp.default_addend = ravb_read(ndev, GTI);
    340	priv->ptp.current_addend = priv->ptp.default_addend;
    341
    342	spin_lock_irqsave(&priv->lock, flags);
    343	ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
    344	ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
    345	spin_unlock_irqrestore(&priv->lock, flags);
    346
    347	priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
    348}
    349
    350void ravb_ptp_stop(struct net_device *ndev)
    351{
    352	struct ravb_private *priv = netdev_priv(ndev);
    353
    354	ravb_write(ndev, 0, GIC);
    355	ravb_write(ndev, 0, GIS);
    356
    357	ptp_clock_unregister(priv->ptp.clock);
    358}