cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpts.c (19707B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * TI Common Platform Time Sync
      4 *
      5 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
      6 *
      7 */
      8#include <linux/clk-provider.h>
      9#include <linux/err.h>
     10#include <linux/if.h>
     11#include <linux/hrtimer.h>
     12#include <linux/module.h>
     13#include <linux/net_tstamp.h>
     14#include <linux/ptp_classify.h>
     15#include <linux/time.h>
     16#include <linux/uaccess.h>
     17#include <linux/workqueue.h>
     18#include <linux/if_ether.h>
     19#include <linux/if_vlan.h>
     20
     21#include "cpts.h"
     22
     23#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
     24#define CPTS_SKB_RX_TX_TMO 100 /*ms */
     25#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
     26
     27struct cpts_skb_cb_data {
     28	u32 skb_mtype_seqid;
     29	unsigned long tmo;
     30};
     31
     32#define cpts_read32(c, r)	readl_relaxed(&c->reg->r)
     33#define cpts_write32(c, v, r)	writel_relaxed(v, &c->reg->r)
     34
     35static int cpts_event_port(struct cpts_event *event)
     36{
     37	return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
     38}
     39
     40static int event_expired(struct cpts_event *event)
     41{
     42	return time_after(jiffies, event->tmo);
     43}
     44
     45static int event_type(struct cpts_event *event)
     46{
     47	return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
     48}
     49
     50static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
     51{
     52	u32 r = cpts_read32(cpts, intstat_raw);
     53
     54	if (r & TS_PEND_RAW) {
     55		*high = cpts_read32(cpts, event_high);
     56		*low  = cpts_read32(cpts, event_low);
     57		cpts_write32(cpts, EVENT_POP, event_pop);
     58		return 0;
     59	}
     60	return -1;
     61}
     62
     63static int cpts_purge_events(struct cpts *cpts)
     64{
     65	struct list_head *this, *next;
     66	struct cpts_event *event;
     67	int removed = 0;
     68
     69	list_for_each_safe(this, next, &cpts->events) {
     70		event = list_entry(this, struct cpts_event, list);
     71		if (event_expired(event)) {
     72			list_del_init(&event->list);
     73			list_add(&event->list, &cpts->pool);
     74			++removed;
     75		}
     76	}
     77
     78	if (removed)
     79		dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
     80	return removed ? 0 : -1;
     81}
     82
     83static void cpts_purge_txq(struct cpts *cpts)
     84{
     85	struct cpts_skb_cb_data *skb_cb;
     86	struct sk_buff *skb, *tmp;
     87	int removed = 0;
     88
     89	skb_queue_walk_safe(&cpts->txq, skb, tmp) {
     90		skb_cb = (struct cpts_skb_cb_data *)skb->cb;
     91		if (time_after(jiffies, skb_cb->tmo)) {
     92			__skb_unlink(skb, &cpts->txq);
     93			dev_consume_skb_any(skb);
     94			++removed;
     95		}
     96	}
     97
     98	if (removed)
     99		dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
    100}
    101
    102/*
    103 * Returns zero if matching event type was found.
    104 */
    105static int cpts_fifo_read(struct cpts *cpts, int match)
    106{
    107	struct ptp_clock_event pevent;
    108	bool need_schedule = false;
    109	struct cpts_event *event;
    110	unsigned long flags;
    111	int i, type = -1;
    112	u32 hi, lo;
    113
    114	spin_lock_irqsave(&cpts->lock, flags);
    115
    116	for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
    117		if (cpts_fifo_pop(cpts, &hi, &lo))
    118			break;
    119
    120		if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
    121			dev_warn(cpts->dev, "cpts: event pool empty\n");
    122			break;
    123		}
    124
    125		event = list_first_entry(&cpts->pool, struct cpts_event, list);
    126		event->high = hi;
    127		event->low = lo;
    128		event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
    129		type = event_type(event);
    130
    131		dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
    132			type, event->high, event->low);
    133		switch (type) {
    134		case CPTS_EV_PUSH:
    135			WRITE_ONCE(cpts->cur_timestamp, lo);
    136			timecounter_read(&cpts->tc);
    137			if (cpts->mult_new) {
    138				cpts->cc.mult = cpts->mult_new;
    139				cpts->mult_new = 0;
    140			}
    141			if (!cpts->irq_poll)
    142				complete(&cpts->ts_push_complete);
    143			break;
    144		case CPTS_EV_TX:
    145		case CPTS_EV_RX:
    146			event->tmo = jiffies +
    147				msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
    148
    149			list_del_init(&event->list);
    150			list_add_tail(&event->list, &cpts->events);
    151			need_schedule = true;
    152			break;
    153		case CPTS_EV_ROLL:
    154		case CPTS_EV_HALF:
    155			break;
    156		case CPTS_EV_HW:
    157			pevent.timestamp = event->timestamp;
    158			pevent.type = PTP_CLOCK_EXTTS;
    159			pevent.index = cpts_event_port(event) - 1;
    160			ptp_clock_event(cpts->clock, &pevent);
    161			break;
    162		default:
    163			dev_err(cpts->dev, "cpts: unknown event type\n");
    164			break;
    165		}
    166		if (type == match)
    167			break;
    168	}
    169
    170	spin_unlock_irqrestore(&cpts->lock, flags);
    171
    172	if (!cpts->irq_poll && need_schedule)
    173		ptp_schedule_worker(cpts->clock, 0);
    174
    175	return type == match ? 0 : -1;
    176}
    177
    178void cpts_misc_interrupt(struct cpts *cpts)
    179{
    180	cpts_fifo_read(cpts, -1);
    181}
    182EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
    183
    184static u64 cpts_systim_read(const struct cyclecounter *cc)
    185{
    186	struct cpts *cpts = container_of(cc, struct cpts, cc);
    187
    188	return READ_ONCE(cpts->cur_timestamp);
    189}
    190
    191static void cpts_update_cur_time(struct cpts *cpts, int match,
    192				 struct ptp_system_timestamp *sts)
    193{
    194	unsigned long flags;
    195
    196	reinit_completion(&cpts->ts_push_complete);
    197
    198	/* use spin_lock_irqsave() here as it has to run very fast */
    199	spin_lock_irqsave(&cpts->lock, flags);
    200	ptp_read_system_prets(sts);
    201	cpts_write32(cpts, TS_PUSH, ts_push);
    202	cpts_read32(cpts, ts_push);
    203	ptp_read_system_postts(sts);
    204	spin_unlock_irqrestore(&cpts->lock, flags);
    205
    206	if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
    207		dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
    208
    209	if (!cpts->irq_poll &&
    210	    !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
    211		dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
    212}
    213
    214/* PTP clock operations */
    215
    216static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
    217{
    218	struct cpts *cpts = container_of(ptp, struct cpts, info);
    219	int neg_adj = 0;
    220	u32 diff, mult;
    221	u64 adj;
    222
    223	if (ppb < 0) {
    224		neg_adj = 1;
    225		ppb = -ppb;
    226	}
    227	mult = cpts->cc_mult;
    228	adj = mult;
    229	adj *= ppb;
    230	diff = div_u64(adj, 1000000000ULL);
    231
    232	mutex_lock(&cpts->ptp_clk_mutex);
    233
    234	cpts->mult_new = neg_adj ? mult - diff : mult + diff;
    235
    236	cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
    237
    238	mutex_unlock(&cpts->ptp_clk_mutex);
    239	return 0;
    240}
    241
    242static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
    243{
    244	struct cpts *cpts = container_of(ptp, struct cpts, info);
    245
    246	mutex_lock(&cpts->ptp_clk_mutex);
    247	timecounter_adjtime(&cpts->tc, delta);
    248	mutex_unlock(&cpts->ptp_clk_mutex);
    249
    250	return 0;
    251}
    252
    253static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
    254			      struct timespec64 *ts,
    255			      struct ptp_system_timestamp *sts)
    256{
    257	struct cpts *cpts = container_of(ptp, struct cpts, info);
    258	u64 ns;
    259
    260	mutex_lock(&cpts->ptp_clk_mutex);
    261
    262	cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
    263
    264	ns = timecounter_read(&cpts->tc);
    265	mutex_unlock(&cpts->ptp_clk_mutex);
    266
    267	*ts = ns_to_timespec64(ns);
    268
    269	return 0;
    270}
    271
    272static int cpts_ptp_settime(struct ptp_clock_info *ptp,
    273			    const struct timespec64 *ts)
    274{
    275	struct cpts *cpts = container_of(ptp, struct cpts, info);
    276	u64 ns;
    277
    278	ns = timespec64_to_ns(ts);
    279
    280	mutex_lock(&cpts->ptp_clk_mutex);
    281	timecounter_init(&cpts->tc, &cpts->cc, ns);
    282	mutex_unlock(&cpts->ptp_clk_mutex);
    283
    284	return 0;
    285}
    286
    287static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
    288{
    289	u32 v;
    290
    291	if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
    292		return 0;
    293
    294	mutex_lock(&cpts->ptp_clk_mutex);
    295
    296	v = cpts_read32(cpts, control);
    297	if (on) {
    298		v |= BIT(8 + index);
    299		cpts->hw_ts_enable |= BIT(index);
    300	} else {
    301		v &= ~BIT(8 + index);
    302		cpts->hw_ts_enable &= ~BIT(index);
    303	}
    304	cpts_write32(cpts, v, control);
    305
    306	mutex_unlock(&cpts->ptp_clk_mutex);
    307
    308	return 0;
    309}
    310
    311static int cpts_ptp_enable(struct ptp_clock_info *ptp,
    312			   struct ptp_clock_request *rq, int on)
    313{
    314	struct cpts *cpts = container_of(ptp, struct cpts, info);
    315
    316	switch (rq->type) {
    317	case PTP_CLK_REQ_EXTTS:
    318		return cpts_extts_enable(cpts, rq->extts.index, on);
    319	default:
    320		break;
    321	}
    322
    323	return -EOPNOTSUPP;
    324}
    325
    326static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
    327{
    328	struct sk_buff_head txq_list;
    329	struct sk_buff *skb, *tmp;
    330	unsigned long flags;
    331	bool found = false;
    332	u32 mtype_seqid;
    333
    334	mtype_seqid = event->high &
    335		      ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
    336		       (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
    337		       (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
    338
    339	__skb_queue_head_init(&txq_list);
    340
    341	spin_lock_irqsave(&cpts->txq.lock, flags);
    342	skb_queue_splice_init(&cpts->txq, &txq_list);
    343	spin_unlock_irqrestore(&cpts->txq.lock, flags);
    344
    345	skb_queue_walk_safe(&txq_list, skb, tmp) {
    346		struct skb_shared_hwtstamps ssh;
    347		struct cpts_skb_cb_data *skb_cb =
    348					(struct cpts_skb_cb_data *)skb->cb;
    349
    350		if (mtype_seqid == skb_cb->skb_mtype_seqid) {
    351			memset(&ssh, 0, sizeof(ssh));
    352			ssh.hwtstamp = ns_to_ktime(event->timestamp);
    353			skb_tstamp_tx(skb, &ssh);
    354			found = true;
    355			__skb_unlink(skb, &txq_list);
    356			dev_consume_skb_any(skb);
    357			dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
    358				mtype_seqid);
    359			break;
    360		}
    361
    362		if (time_after(jiffies, skb_cb->tmo)) {
    363			/* timeout any expired skbs over 1s */
    364			dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
    365			__skb_unlink(skb, &txq_list);
    366			dev_consume_skb_any(skb);
    367		}
    368	}
    369
    370	spin_lock_irqsave(&cpts->txq.lock, flags);
    371	skb_queue_splice(&txq_list, &cpts->txq);
    372	spin_unlock_irqrestore(&cpts->txq.lock, flags);
    373
    374	return found;
    375}
    376
    377static void cpts_process_events(struct cpts *cpts)
    378{
    379	struct list_head *this, *next;
    380	struct cpts_event *event;
    381	LIST_HEAD(events_free);
    382	unsigned long flags;
    383	LIST_HEAD(events);
    384
    385	spin_lock_irqsave(&cpts->lock, flags);
    386	list_splice_init(&cpts->events, &events);
    387	spin_unlock_irqrestore(&cpts->lock, flags);
    388
    389	list_for_each_safe(this, next, &events) {
    390		event = list_entry(this, struct cpts_event, list);
    391		if (cpts_match_tx_ts(cpts, event) ||
    392		    time_after(jiffies, event->tmo)) {
    393			list_del_init(&event->list);
    394			list_add(&event->list, &events_free);
    395		}
    396	}
    397
    398	spin_lock_irqsave(&cpts->lock, flags);
    399	list_splice_tail(&events, &cpts->events);
    400	list_splice_tail(&events_free, &cpts->pool);
    401	spin_unlock_irqrestore(&cpts->lock, flags);
    402}
    403
    404static long cpts_overflow_check(struct ptp_clock_info *ptp)
    405{
    406	struct cpts *cpts = container_of(ptp, struct cpts, info);
    407	unsigned long delay = cpts->ov_check_period;
    408	unsigned long flags;
    409	u64 ns;
    410
    411	mutex_lock(&cpts->ptp_clk_mutex);
    412
    413	cpts_update_cur_time(cpts, -1, NULL);
    414	ns = timecounter_read(&cpts->tc);
    415
    416	cpts_process_events(cpts);
    417
    418	spin_lock_irqsave(&cpts->txq.lock, flags);
    419	if (!skb_queue_empty(&cpts->txq)) {
    420		cpts_purge_txq(cpts);
    421		if (!skb_queue_empty(&cpts->txq))
    422			delay = CPTS_SKB_TX_WORK_TIMEOUT;
    423	}
    424	spin_unlock_irqrestore(&cpts->txq.lock, flags);
    425
    426	dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
    427	mutex_unlock(&cpts->ptp_clk_mutex);
    428	return (long)delay;
    429}
    430
    431static const struct ptp_clock_info cpts_info = {
    432	.owner		= THIS_MODULE,
    433	.name		= "CTPS timer",
    434	.max_adj	= 1000000,
    435	.n_ext_ts	= 0,
    436	.n_pins		= 0,
    437	.pps		= 0,
    438	.adjfreq	= cpts_ptp_adjfreq,
    439	.adjtime	= cpts_ptp_adjtime,
    440	.gettimex64	= cpts_ptp_gettimeex,
    441	.settime64	= cpts_ptp_settime,
    442	.enable		= cpts_ptp_enable,
    443	.do_aux_work	= cpts_overflow_check,
    444};
    445
    446static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
    447{
    448	unsigned int ptp_class = ptp_classify_raw(skb);
    449	struct ptp_header *hdr;
    450	u8 msgtype;
    451	u16 seqid;
    452
    453	if (ptp_class == PTP_CLASS_NONE)
    454		return 0;
    455
    456	hdr = ptp_parse_header(skb, ptp_class);
    457	if (!hdr)
    458		return 0;
    459
    460	msgtype = ptp_get_msgtype(hdr, ptp_class);
    461	seqid	= ntohs(hdr->sequence_id);
    462
    463	*mtype_seqid  = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
    464	*mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
    465
    466	return 1;
    467}
    468
    469static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
    470			int ev_type, u32 skb_mtype_seqid)
    471{
    472	struct list_head *this, *next;
    473	struct cpts_event *event;
    474	unsigned long flags;
    475	u32 mtype_seqid;
    476	u64 ns = 0;
    477
    478	cpts_fifo_read(cpts, -1);
    479	spin_lock_irqsave(&cpts->lock, flags);
    480	list_for_each_safe(this, next, &cpts->events) {
    481		event = list_entry(this, struct cpts_event, list);
    482		if (event_expired(event)) {
    483			list_del_init(&event->list);
    484			list_add(&event->list, &cpts->pool);
    485			continue;
    486		}
    487
    488		mtype_seqid = event->high &
    489			      ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
    490			       (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
    491			       (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
    492
    493		if (mtype_seqid == skb_mtype_seqid) {
    494			ns = event->timestamp;
    495			list_del_init(&event->list);
    496			list_add(&event->list, &cpts->pool);
    497			break;
    498		}
    499	}
    500	spin_unlock_irqrestore(&cpts->lock, flags);
    501
    502	return ns;
    503}
    504
    505void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
    506{
    507	struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
    508	struct skb_shared_hwtstamps *ssh;
    509	int ret;
    510	u64 ns;
    511
    512	/* cpts_rx_timestamp() is called before eth_type_trans(), so
    513	 * skb MAC Hdr properties are not configured yet. Hence need to
    514	 * reset skb MAC header here
    515	 */
    516	skb_reset_mac_header(skb);
    517	ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
    518	if (!ret)
    519		return;
    520
    521	skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
    522
    523	dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
    524		__func__, skb_cb->skb_mtype_seqid);
    525
    526	ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
    527	if (!ns)
    528		return;
    529	ssh = skb_hwtstamps(skb);
    530	memset(ssh, 0, sizeof(*ssh));
    531	ssh->hwtstamp = ns_to_ktime(ns);
    532}
    533EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
    534
    535void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
    536{
    537	struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
    538	int ret;
    539
    540	if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
    541		return;
    542
    543	ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
    544	if (!ret)
    545		return;
    546
    547	skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
    548
    549	dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
    550		__func__, skb_cb->skb_mtype_seqid);
    551
    552	/* Always defer TX TS processing to PTP worker */
    553	skb_get(skb);
    554	/* get the timestamp for timeouts */
    555	skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
    556	skb_queue_tail(&cpts->txq, skb);
    557	ptp_schedule_worker(cpts->clock, 0);
    558}
    559EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
    560
    561int cpts_register(struct cpts *cpts)
    562{
    563	int err, i;
    564
    565	skb_queue_head_init(&cpts->txq);
    566	INIT_LIST_HEAD(&cpts->events);
    567	INIT_LIST_HEAD(&cpts->pool);
    568	for (i = 0; i < CPTS_MAX_EVENTS; i++)
    569		list_add(&cpts->pool_data[i].list, &cpts->pool);
    570
    571	err = clk_enable(cpts->refclk);
    572	if (err)
    573		return err;
    574
    575	cpts_write32(cpts, CPTS_EN, control);
    576	cpts_write32(cpts, TS_PEND_EN, int_enable);
    577
    578	timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
    579
    580	cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
    581	if (IS_ERR(cpts->clock)) {
    582		err = PTR_ERR(cpts->clock);
    583		cpts->clock = NULL;
    584		goto err_ptp;
    585	}
    586	cpts->phc_index = ptp_clock_index(cpts->clock);
    587
    588	ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
    589	return 0;
    590
    591err_ptp:
    592	clk_disable(cpts->refclk);
    593	return err;
    594}
    595EXPORT_SYMBOL_GPL(cpts_register);
    596
    597void cpts_unregister(struct cpts *cpts)
    598{
    599	if (WARN_ON(!cpts->clock))
    600		return;
    601
    602	ptp_clock_unregister(cpts->clock);
    603	cpts->clock = NULL;
    604	cpts->phc_index = -1;
    605
    606	cpts_write32(cpts, 0, int_enable);
    607	cpts_write32(cpts, 0, control);
    608
    609	/* Drop all packet */
    610	skb_queue_purge(&cpts->txq);
    611
    612	clk_disable(cpts->refclk);
    613}
    614EXPORT_SYMBOL_GPL(cpts_unregister);
    615
    616static void cpts_calc_mult_shift(struct cpts *cpts)
    617{
    618	u64 frac, maxsec, ns;
    619	u32 freq;
    620
    621	freq = clk_get_rate(cpts->refclk);
    622
    623	/* Calc the maximum number of seconds which we can run before
    624	 * wrapping around.
    625	 */
    626	maxsec = cpts->cc.mask;
    627	do_div(maxsec, freq);
    628	/* limit conversation rate to 10 sec as higher values will produce
    629	 * too small mult factors and so reduce the conversion accuracy
    630	 */
    631	if (maxsec > 10)
    632		maxsec = 10;
    633
    634	/* Calc overflow check period (maxsec / 2) */
    635	cpts->ov_check_period = (HZ * maxsec) / 2;
    636	dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
    637		 cpts->ov_check_period);
    638
    639	if (cpts->cc.mult || cpts->cc.shift)
    640		return;
    641
    642	clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
    643			       freq, NSEC_PER_SEC, maxsec);
    644
    645	frac = 0;
    646	ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
    647
    648	dev_info(cpts->dev,
    649		 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
    650		 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
    651}
    652
    653static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
    654{
    655	struct device_node *refclk_np;
    656	const char **parent_names;
    657	unsigned int num_parents;
    658	struct clk_hw *clk_hw;
    659	int ret = -EINVAL;
    660	u32 *mux_table;
    661
    662	refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
    663	if (!refclk_np)
    664		/* refclk selection supported not for all SoCs */
    665		return 0;
    666
    667	num_parents = of_clk_get_parent_count(refclk_np);
    668	if (num_parents < 1) {
    669		dev_err(cpts->dev, "mux-clock %s must have parents\n",
    670			refclk_np->name);
    671		goto mux_fail;
    672	}
    673
    674	parent_names = devm_kcalloc(cpts->dev, num_parents,
    675				    sizeof(*parent_names), GFP_KERNEL);
    676
    677	mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
    678				 GFP_KERNEL);
    679	if (!mux_table || !parent_names) {
    680		ret = -ENOMEM;
    681		goto mux_fail;
    682	}
    683
    684	of_clk_parent_fill(refclk_np, parent_names, num_parents);
    685
    686	ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
    687						  mux_table,
    688						  num_parents, num_parents);
    689	if (ret < 0)
    690		goto mux_fail;
    691
    692	clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
    693					   parent_names, num_parents,
    694					   0,
    695					   &cpts->reg->rftclk_sel, 0, 0x1F,
    696					   0, mux_table, NULL);
    697	if (IS_ERR(clk_hw)) {
    698		ret = PTR_ERR(clk_hw);
    699		goto mux_fail;
    700	}
    701
    702	ret = devm_add_action_or_reset(cpts->dev,
    703				       (void(*)(void *))clk_hw_unregister_mux,
    704				       clk_hw);
    705	if (ret) {
    706		dev_err(cpts->dev, "add clkmux unreg action %d", ret);
    707		goto mux_fail;
    708	}
    709
    710	ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
    711	if (ret)
    712		goto mux_fail;
    713
    714	ret = devm_add_action_or_reset(cpts->dev,
    715				       (void(*)(void *))of_clk_del_provider,
    716				       refclk_np);
    717	if (ret) {
    718		dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
    719		goto mux_fail;
    720	}
    721
    722	return ret;
    723
    724mux_fail:
    725	of_node_put(refclk_np);
    726	return ret;
    727}
    728
    729static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
    730{
    731	int ret = -EINVAL;
    732	u32 prop;
    733
    734	if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
    735		cpts->cc.mult = prop;
    736
    737	if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
    738		cpts->cc.shift = prop;
    739
    740	if ((cpts->cc.mult && !cpts->cc.shift) ||
    741	    (!cpts->cc.mult && cpts->cc.shift))
    742		goto of_error;
    743
    744	return cpts_of_mux_clk_setup(cpts, node);
    745
    746of_error:
    747	dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
    748	return ret;
    749}
    750
    751struct cpts *cpts_create(struct device *dev, void __iomem *regs,
    752			 struct device_node *node, u32 n_ext_ts)
    753{
    754	struct cpts *cpts;
    755	int ret;
    756
    757	cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
    758	if (!cpts)
    759		return ERR_PTR(-ENOMEM);
    760
    761	cpts->dev = dev;
    762	cpts->reg = (struct cpsw_cpts __iomem *)regs;
    763	cpts->irq_poll = true;
    764	spin_lock_init(&cpts->lock);
    765	mutex_init(&cpts->ptp_clk_mutex);
    766	init_completion(&cpts->ts_push_complete);
    767
    768	ret = cpts_of_parse(cpts, node);
    769	if (ret)
    770		return ERR_PTR(ret);
    771
    772	cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
    773	if (IS_ERR(cpts->refclk))
    774		/* try get clk from dev node for compatibility */
    775		cpts->refclk = devm_clk_get(dev, "cpts");
    776
    777	if (IS_ERR(cpts->refclk)) {
    778		dev_err(dev, "Failed to get cpts refclk %ld\n",
    779			PTR_ERR(cpts->refclk));
    780		return ERR_CAST(cpts->refclk);
    781	}
    782
    783	ret = clk_prepare(cpts->refclk);
    784	if (ret)
    785		return ERR_PTR(ret);
    786
    787	cpts->cc.read = cpts_systim_read;
    788	cpts->cc.mask = CLOCKSOURCE_MASK(32);
    789	cpts->info = cpts_info;
    790	cpts->phc_index = -1;
    791
    792	if (n_ext_ts)
    793		cpts->info.n_ext_ts = n_ext_ts;
    794
    795	cpts_calc_mult_shift(cpts);
    796	/* save cc.mult original value as it can be modified
    797	 * by cpts_ptp_adjfreq().
    798	 */
    799	cpts->cc_mult = cpts->cc.mult;
    800
    801	return cpts;
    802}
    803EXPORT_SYMBOL_GPL(cpts_create);
    804
    805void cpts_release(struct cpts *cpts)
    806{
    807	if (!cpts)
    808		return;
    809
    810	if (WARN_ON(!cpts->refclk))
    811		return;
    812
    813	clk_unprepare(cpts->refclk);
    814}
    815EXPORT_SYMBOL_GPL(cpts_release);
    816
    817MODULE_LICENSE("GPL v2");
    818MODULE_DESCRIPTION("TI CPTS driver");
    819MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");