cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

m_can.c (51843B)


      1// SPDX-License-Identifier: GPL-2.0
      2// CAN bus driver for Bosch M_CAN controller
      3// Copyright (C) 2014 Freescale Semiconductor, Inc.
      4//      Dong Aisheng <b29396@freescale.com>
      5// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
      6
      7/* Bosch M_CAN user manual can be obtained from:
      8 * https://github.com/linux-can/can-doc/tree/master/m_can
      9 */
     10
     11#include <linux/bitfield.h>
     12#include <linux/interrupt.h>
     13#include <linux/io.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/netdevice.h>
     17#include <linux/of.h>
     18#include <linux/of_device.h>
     19#include <linux/platform_device.h>
     20#include <linux/pm_runtime.h>
     21#include <linux/iopoll.h>
     22#include <linux/can/dev.h>
     23#include <linux/pinctrl/consumer.h>
     24#include <linux/phy/phy.h>
     25
     26#include "m_can.h"
     27
     28/* registers definition */
     29enum m_can_reg {
     30	M_CAN_CREL	= 0x0,
     31	M_CAN_ENDN	= 0x4,
     32	M_CAN_CUST	= 0x8,
     33	M_CAN_DBTP	= 0xc,
     34	M_CAN_TEST	= 0x10,
     35	M_CAN_RWD	= 0x14,
     36	M_CAN_CCCR	= 0x18,
     37	M_CAN_NBTP	= 0x1c,
     38	M_CAN_TSCC	= 0x20,
     39	M_CAN_TSCV	= 0x24,
     40	M_CAN_TOCC	= 0x28,
     41	M_CAN_TOCV	= 0x2c,
     42	M_CAN_ECR	= 0x40,
     43	M_CAN_PSR	= 0x44,
     44	/* TDCR Register only available for version >=3.1.x */
     45	M_CAN_TDCR	= 0x48,
     46	M_CAN_IR	= 0x50,
     47	M_CAN_IE	= 0x54,
     48	M_CAN_ILS	= 0x58,
     49	M_CAN_ILE	= 0x5c,
     50	M_CAN_GFC	= 0x80,
     51	M_CAN_SIDFC	= 0x84,
     52	M_CAN_XIDFC	= 0x88,
     53	M_CAN_XIDAM	= 0x90,
     54	M_CAN_HPMS	= 0x94,
     55	M_CAN_NDAT1	= 0x98,
     56	M_CAN_NDAT2	= 0x9c,
     57	M_CAN_RXF0C	= 0xa0,
     58	M_CAN_RXF0S	= 0xa4,
     59	M_CAN_RXF0A	= 0xa8,
     60	M_CAN_RXBC	= 0xac,
     61	M_CAN_RXF1C	= 0xb0,
     62	M_CAN_RXF1S	= 0xb4,
     63	M_CAN_RXF1A	= 0xb8,
     64	M_CAN_RXESC	= 0xbc,
     65	M_CAN_TXBC	= 0xc0,
     66	M_CAN_TXFQS	= 0xc4,
     67	M_CAN_TXESC	= 0xc8,
     68	M_CAN_TXBRP	= 0xcc,
     69	M_CAN_TXBAR	= 0xd0,
     70	M_CAN_TXBCR	= 0xd4,
     71	M_CAN_TXBTO	= 0xd8,
     72	M_CAN_TXBCF	= 0xdc,
     73	M_CAN_TXBTIE	= 0xe0,
     74	M_CAN_TXBCIE	= 0xe4,
     75	M_CAN_TXEFC	= 0xf0,
     76	M_CAN_TXEFS	= 0xf4,
     77	M_CAN_TXEFA	= 0xf8,
     78};
     79
     80/* message ram configuration data length */
     81#define MRAM_CFG_LEN	8
     82
     83/* Core Release Register (CREL) */
     84#define CREL_REL_MASK		GENMASK(31, 28)
     85#define CREL_STEP_MASK		GENMASK(27, 24)
     86#define CREL_SUBSTEP_MASK	GENMASK(23, 20)
     87
     88/* Data Bit Timing & Prescaler Register (DBTP) */
     89#define DBTP_TDC		BIT(23)
     90#define DBTP_DBRP_MASK		GENMASK(20, 16)
     91#define DBTP_DTSEG1_MASK	GENMASK(12, 8)
     92#define DBTP_DTSEG2_MASK	GENMASK(7, 4)
     93#define DBTP_DSJW_MASK		GENMASK(3, 0)
     94
     95/* Transmitter Delay Compensation Register (TDCR) */
     96#define TDCR_TDCO_MASK		GENMASK(14, 8)
     97#define TDCR_TDCF_MASK		GENMASK(6, 0)
     98
     99/* Test Register (TEST) */
    100#define TEST_LBCK		BIT(4)
    101
    102/* CC Control Register (CCCR) */
    103#define CCCR_TXP		BIT(14)
    104#define CCCR_TEST		BIT(7)
    105#define CCCR_DAR		BIT(6)
    106#define CCCR_MON		BIT(5)
    107#define CCCR_CSR		BIT(4)
    108#define CCCR_CSA		BIT(3)
    109#define CCCR_ASM		BIT(2)
    110#define CCCR_CCE		BIT(1)
    111#define CCCR_INIT		BIT(0)
    112/* for version 3.0.x */
    113#define CCCR_CMR_MASK		GENMASK(11, 10)
    114#define CCCR_CMR_CANFD		0x1
    115#define CCCR_CMR_CANFD_BRS	0x2
    116#define CCCR_CMR_CAN		0x3
    117#define CCCR_CME_MASK		GENMASK(9, 8)
    118#define CCCR_CME_CAN		0
    119#define CCCR_CME_CANFD		0x1
    120#define CCCR_CME_CANFD_BRS	0x2
    121/* for version >=3.1.x */
    122#define CCCR_EFBI		BIT(13)
    123#define CCCR_PXHD		BIT(12)
    124#define CCCR_BRSE		BIT(9)
    125#define CCCR_FDOE		BIT(8)
    126/* for version >=3.2.x */
    127#define CCCR_NISO		BIT(15)
    128/* for version >=3.3.x */
    129#define CCCR_WMM		BIT(11)
    130#define CCCR_UTSU		BIT(10)
    131
    132/* Nominal Bit Timing & Prescaler Register (NBTP) */
    133#define NBTP_NSJW_MASK		GENMASK(31, 25)
    134#define NBTP_NBRP_MASK		GENMASK(24, 16)
    135#define NBTP_NTSEG1_MASK	GENMASK(15, 8)
    136#define NBTP_NTSEG2_MASK	GENMASK(6, 0)
    137
    138/* Timestamp Counter Configuration Register (TSCC) */
    139#define TSCC_TCP_MASK		GENMASK(19, 16)
    140#define TSCC_TSS_MASK		GENMASK(1, 0)
    141#define TSCC_TSS_DISABLE	0x0
    142#define TSCC_TSS_INTERNAL	0x1
    143#define TSCC_TSS_EXTERNAL	0x2
    144
    145/* Timestamp Counter Value Register (TSCV) */
    146#define TSCV_TSC_MASK		GENMASK(15, 0)
    147
    148/* Error Counter Register (ECR) */
    149#define ECR_RP			BIT(15)
    150#define ECR_REC_MASK		GENMASK(14, 8)
    151#define ECR_TEC_MASK		GENMASK(7, 0)
    152
    153/* Protocol Status Register (PSR) */
    154#define PSR_BO		BIT(7)
    155#define PSR_EW		BIT(6)
    156#define PSR_EP		BIT(5)
    157#define PSR_LEC_MASK	GENMASK(2, 0)
    158
    159/* Interrupt Register (IR) */
    160#define IR_ALL_INT	0xffffffff
    161
    162/* Renamed bits for versions > 3.1.x */
    163#define IR_ARA		BIT(29)
    164#define IR_PED		BIT(28)
    165#define IR_PEA		BIT(27)
    166
    167/* Bits for version 3.0.x */
    168#define IR_STE		BIT(31)
    169#define IR_FOE		BIT(30)
    170#define IR_ACKE		BIT(29)
    171#define IR_BE		BIT(28)
    172#define IR_CRCE		BIT(27)
    173#define IR_WDI		BIT(26)
    174#define IR_BO		BIT(25)
    175#define IR_EW		BIT(24)
    176#define IR_EP		BIT(23)
    177#define IR_ELO		BIT(22)
    178#define IR_BEU		BIT(21)
    179#define IR_BEC		BIT(20)
    180#define IR_DRX		BIT(19)
    181#define IR_TOO		BIT(18)
    182#define IR_MRAF		BIT(17)
    183#define IR_TSW		BIT(16)
    184#define IR_TEFL		BIT(15)
    185#define IR_TEFF		BIT(14)
    186#define IR_TEFW		BIT(13)
    187#define IR_TEFN		BIT(12)
    188#define IR_TFE		BIT(11)
    189#define IR_TCF		BIT(10)
    190#define IR_TC		BIT(9)
    191#define IR_HPM		BIT(8)
    192#define IR_RF1L		BIT(7)
    193#define IR_RF1F		BIT(6)
    194#define IR_RF1W		BIT(5)
    195#define IR_RF1N		BIT(4)
    196#define IR_RF0L		BIT(3)
    197#define IR_RF0F		BIT(2)
    198#define IR_RF0W		BIT(1)
    199#define IR_RF0N		BIT(0)
    200#define IR_ERR_STATE	(IR_BO | IR_EW | IR_EP)
    201
    202/* Interrupts for version 3.0.x */
    203#define IR_ERR_LEC_30X	(IR_STE	| IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
    204#define IR_ERR_BUS_30X	(IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
    205			 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
    206			 IR_RF0L)
    207#define IR_ERR_ALL_30X	(IR_ERR_STATE | IR_ERR_BUS_30X)
    208
    209/* Interrupts for version >= 3.1.x */
    210#define IR_ERR_LEC_31X	(IR_PED | IR_PEA)
    211#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
    212			 IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
    213			 IR_RF0L)
    214#define IR_ERR_ALL_31X	(IR_ERR_STATE | IR_ERR_BUS_31X)
    215
    216/* Interrupt Line Select (ILS) */
    217#define ILS_ALL_INT0	0x0
    218#define ILS_ALL_INT1	0xFFFFFFFF
    219
    220/* Interrupt Line Enable (ILE) */
    221#define ILE_EINT1	BIT(1)
    222#define ILE_EINT0	BIT(0)
    223
    224/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
    225#define RXFC_FWM_MASK	GENMASK(30, 24)
    226#define RXFC_FS_MASK	GENMASK(22, 16)
    227
    228/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
    229#define RXFS_RFL	BIT(25)
    230#define RXFS_FF		BIT(24)
    231#define RXFS_FPI_MASK	GENMASK(21, 16)
    232#define RXFS_FGI_MASK	GENMASK(13, 8)
    233#define RXFS_FFL_MASK	GENMASK(6, 0)
    234
    235/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
    236#define RXESC_RBDS_MASK		GENMASK(10, 8)
    237#define RXESC_F1DS_MASK		GENMASK(6, 4)
    238#define RXESC_F0DS_MASK		GENMASK(2, 0)
    239#define RXESC_64B		0x7
    240
    241/* Tx Buffer Configuration (TXBC) */
    242#define TXBC_TFQS_MASK		GENMASK(29, 24)
    243#define TXBC_NDTB_MASK		GENMASK(21, 16)
    244
    245/* Tx FIFO/Queue Status (TXFQS) */
    246#define TXFQS_TFQF		BIT(21)
    247#define TXFQS_TFQPI_MASK	GENMASK(20, 16)
    248#define TXFQS_TFGI_MASK		GENMASK(12, 8)
    249#define TXFQS_TFFL_MASK		GENMASK(5, 0)
    250
    251/* Tx Buffer Element Size Configuration (TXESC) */
    252#define TXESC_TBDS_MASK		GENMASK(2, 0)
    253#define TXESC_TBDS_64B		0x7
    254
    255/* Tx Event FIFO Configuration (TXEFC) */
    256#define TXEFC_EFS_MASK		GENMASK(21, 16)
    257
    258/* Tx Event FIFO Status (TXEFS) */
    259#define TXEFS_TEFL		BIT(25)
    260#define TXEFS_EFF		BIT(24)
    261#define TXEFS_EFGI_MASK		GENMASK(12, 8)
    262#define TXEFS_EFFL_MASK		GENMASK(5, 0)
    263
    264/* Tx Event FIFO Acknowledge (TXEFA) */
    265#define TXEFA_EFAI_MASK		GENMASK(4, 0)
    266
    267/* Message RAM Configuration (in bytes) */
    268#define SIDF_ELEMENT_SIZE	4
    269#define XIDF_ELEMENT_SIZE	8
    270#define RXF0_ELEMENT_SIZE	72
    271#define RXF1_ELEMENT_SIZE	72
    272#define RXB_ELEMENT_SIZE	72
    273#define TXE_ELEMENT_SIZE	8
    274#define TXB_ELEMENT_SIZE	72
    275
    276/* Message RAM Elements */
    277#define M_CAN_FIFO_ID		0x0
    278#define M_CAN_FIFO_DLC		0x4
    279#define M_CAN_FIFO_DATA		0x8
    280
    281/* Rx Buffer Element */
    282/* R0 */
    283#define RX_BUF_ESI		BIT(31)
    284#define RX_BUF_XTD		BIT(30)
    285#define RX_BUF_RTR		BIT(29)
    286/* R1 */
    287#define RX_BUF_ANMF		BIT(31)
    288#define RX_BUF_FDF		BIT(21)
    289#define RX_BUF_BRS		BIT(20)
    290#define RX_BUF_RXTS_MASK	GENMASK(15, 0)
    291
    292/* Tx Buffer Element */
    293/* T0 */
    294#define TX_BUF_ESI		BIT(31)
    295#define TX_BUF_XTD		BIT(30)
    296#define TX_BUF_RTR		BIT(29)
    297/* T1 */
    298#define TX_BUF_EFC		BIT(23)
    299#define TX_BUF_FDF		BIT(21)
    300#define TX_BUF_BRS		BIT(20)
    301#define TX_BUF_MM_MASK		GENMASK(31, 24)
    302#define TX_BUF_DLC_MASK		GENMASK(19, 16)
    303
    304/* Tx event FIFO Element */
    305/* E1 */
    306#define TX_EVENT_MM_MASK	GENMASK(31, 24)
    307#define TX_EVENT_TXTS_MASK	GENMASK(15, 0)
    308
    309/* The ID and DLC registers are adjacent in M_CAN FIFO memory,
    310 * and we can save a (potentially slow) bus round trip by combining
    311 * reads and writes to them.
    312 */
    313struct id_and_dlc {
    314	u32 id;
    315	u32 dlc;
    316};
    317
    318static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
    319{
    320	return cdev->ops->read_reg(cdev, reg);
    321}
    322
    323static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
    324			       u32 val)
    325{
    326	cdev->ops->write_reg(cdev, reg, val);
    327}
    328
    329static int
    330m_can_fifo_read(struct m_can_classdev *cdev,
    331		u32 fgi, unsigned int offset, void *val, size_t val_count)
    332{
    333	u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
    334		offset;
    335
    336	if (val_count == 0)
    337		return 0;
    338
    339	return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
    340}
    341
    342static int
    343m_can_fifo_write(struct m_can_classdev *cdev,
    344		 u32 fpi, unsigned int offset, const void *val, size_t val_count)
    345{
    346	u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
    347		offset;
    348
    349	if (val_count == 0)
    350		return 0;
    351
    352	return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
    353}
    354
    355static inline int m_can_fifo_write_no_off(struct m_can_classdev *cdev,
    356					  u32 fpi, u32 val)
    357{
    358	return cdev->ops->write_fifo(cdev, fpi, &val, 1);
    359}
    360
    361static int
    362m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
    363{
    364	u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
    365		offset;
    366
    367	return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
    368}
    369
    370static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
    371{
    372	return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
    373}
    374
    375static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
    376{
    377	u32 cccr = m_can_read(cdev, M_CAN_CCCR);
    378	u32 timeout = 10;
    379	u32 val = 0;
    380
    381	/* Clear the Clock stop request if it was set */
    382	if (cccr & CCCR_CSR)
    383		cccr &= ~CCCR_CSR;
    384
    385	if (enable) {
    386		/* enable m_can configuration */
    387		m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
    388		udelay(5);
    389		/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
    390		m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
    391	} else {
    392		m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
    393	}
    394
    395	/* there's a delay for module initialization */
    396	if (enable)
    397		val = CCCR_INIT | CCCR_CCE;
    398
    399	while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
    400		if (timeout == 0) {
    401			netdev_warn(cdev->net, "Failed to init module\n");
    402			return;
    403		}
    404		timeout--;
    405		udelay(1);
    406	}
    407}
    408
    409static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
    410{
    411	/* Only interrupt line 0 is used in this driver */
    412	m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
    413}
    414
    415static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
    416{
    417	m_can_write(cdev, M_CAN_ILE, 0x0);
    418}
    419
    420/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
    421 * width.
    422 */
    423static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
    424{
    425	u32 tscv;
    426	u32 tsc;
    427
    428	tscv = m_can_read(cdev, M_CAN_TSCV);
    429	tsc = FIELD_GET(TSCV_TSC_MASK, tscv);
    430
    431	return (tsc << 16);
    432}
    433
    434static void m_can_clean(struct net_device *net)
    435{
    436	struct m_can_classdev *cdev = netdev_priv(net);
    437
    438	if (cdev->tx_skb) {
    439		int putidx = 0;
    440
    441		net->stats.tx_errors++;
    442		if (cdev->version > 30)
    443			putidx = FIELD_GET(TXFQS_TFQPI_MASK,
    444					   m_can_read(cdev, M_CAN_TXFQS));
    445
    446		can_free_echo_skb(cdev->net, putidx, NULL);
    447		cdev->tx_skb = NULL;
    448	}
    449}
    450
    451/* For peripherals, pass skb to rx-offload, which will push skb from
    452 * napi. For non-peripherals, RX is done in napi already, so push
    453 * directly. timestamp is used to ensure good skb ordering in
    454 * rx-offload and is ignored for non-peripherals.
    455 */
    456static void m_can_receive_skb(struct m_can_classdev *cdev,
    457			      struct sk_buff *skb,
    458			      u32 timestamp)
    459{
    460	if (cdev->is_peripheral) {
    461		struct net_device_stats *stats = &cdev->net->stats;
    462		int err;
    463
    464		err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
    465						  timestamp);
    466		if (err)
    467			stats->rx_fifo_errors++;
    468	} else {
    469		netif_receive_skb(skb);
    470	}
    471}
    472
    473static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
    474{
    475	struct net_device_stats *stats = &dev->stats;
    476	struct m_can_classdev *cdev = netdev_priv(dev);
    477	struct canfd_frame *cf;
    478	struct sk_buff *skb;
    479	struct id_and_dlc fifo_header;
    480	u32 fgi;
    481	u32 timestamp = 0;
    482	int err;
    483
    484	/* calculate the fifo get index for where to read data */
    485	fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
    486	err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
    487	if (err)
    488		goto out_fail;
    489
    490	if (fifo_header.dlc & RX_BUF_FDF)
    491		skb = alloc_canfd_skb(dev, &cf);
    492	else
    493		skb = alloc_can_skb(dev, (struct can_frame **)&cf);
    494	if (!skb) {
    495		stats->rx_dropped++;
    496		return 0;
    497	}
    498
    499	if (fifo_header.dlc & RX_BUF_FDF)
    500		cf->len = can_fd_dlc2len((fifo_header.dlc >> 16) & 0x0F);
    501	else
    502		cf->len = can_cc_dlc2len((fifo_header.dlc >> 16) & 0x0F);
    503
    504	if (fifo_header.id & RX_BUF_XTD)
    505		cf->can_id = (fifo_header.id & CAN_EFF_MASK) | CAN_EFF_FLAG;
    506	else
    507		cf->can_id = (fifo_header.id >> 18) & CAN_SFF_MASK;
    508
    509	if (fifo_header.id & RX_BUF_ESI) {
    510		cf->flags |= CANFD_ESI;
    511		netdev_dbg(dev, "ESI Error\n");
    512	}
    513
    514	if (!(fifo_header.dlc & RX_BUF_FDF) && (fifo_header.id & RX_BUF_RTR)) {
    515		cf->can_id |= CAN_RTR_FLAG;
    516	} else {
    517		if (fifo_header.dlc & RX_BUF_BRS)
    518			cf->flags |= CANFD_BRS;
    519
    520		err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
    521				      cf->data, DIV_ROUND_UP(cf->len, 4));
    522		if (err)
    523			goto out_free_skb;
    524
    525		stats->rx_bytes += cf->len;
    526	}
    527	stats->rx_packets++;
    528
    529	/* acknowledge rx fifo 0 */
    530	m_can_write(cdev, M_CAN_RXF0A, fgi);
    531
    532	timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
    533
    534	m_can_receive_skb(cdev, skb, timestamp);
    535
    536	return 0;
    537
    538out_free_skb:
    539	kfree_skb(skb);
    540out_fail:
    541	netdev_err(dev, "FIFO read returned %d\n", err);
    542	return err;
    543}
    544
    545static int m_can_do_rx_poll(struct net_device *dev, int quota)
    546{
    547	struct m_can_classdev *cdev = netdev_priv(dev);
    548	u32 pkts = 0;
    549	u32 rxfs;
    550	int err;
    551
    552	rxfs = m_can_read(cdev, M_CAN_RXF0S);
    553	if (!(rxfs & RXFS_FFL_MASK)) {
    554		netdev_dbg(dev, "no messages in fifo0\n");
    555		return 0;
    556	}
    557
    558	while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
    559		err = m_can_read_fifo(dev, rxfs);
    560		if (err)
    561			return err;
    562
    563		quota--;
    564		pkts++;
    565		rxfs = m_can_read(cdev, M_CAN_RXF0S);
    566	}
    567
    568	return pkts;
    569}
    570
    571static int m_can_handle_lost_msg(struct net_device *dev)
    572{
    573	struct m_can_classdev *cdev = netdev_priv(dev);
    574	struct net_device_stats *stats = &dev->stats;
    575	struct sk_buff *skb;
    576	struct can_frame *frame;
    577	u32 timestamp = 0;
    578
    579	netdev_err(dev, "msg lost in rxf0\n");
    580
    581	stats->rx_errors++;
    582	stats->rx_over_errors++;
    583
    584	skb = alloc_can_err_skb(dev, &frame);
    585	if (unlikely(!skb))
    586		return 0;
    587
    588	frame->can_id |= CAN_ERR_CRTL;
    589	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
    590
    591	if (cdev->is_peripheral)
    592		timestamp = m_can_get_timestamp(cdev);
    593
    594	m_can_receive_skb(cdev, skb, timestamp);
    595
    596	return 1;
    597}
    598
    599static int m_can_handle_lec_err(struct net_device *dev,
    600				enum m_can_lec_type lec_type)
    601{
    602	struct m_can_classdev *cdev = netdev_priv(dev);
    603	struct net_device_stats *stats = &dev->stats;
    604	struct can_frame *cf;
    605	struct sk_buff *skb;
    606	u32 timestamp = 0;
    607
    608	cdev->can.can_stats.bus_error++;
    609	stats->rx_errors++;
    610
    611	/* propagate the error condition to the CAN stack */
    612	skb = alloc_can_err_skb(dev, &cf);
    613	if (unlikely(!skb))
    614		return 0;
    615
    616	/* check for 'last error code' which tells us the
    617	 * type of the last error to occur on the CAN bus
    618	 */
    619	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
    620
    621	switch (lec_type) {
    622	case LEC_STUFF_ERROR:
    623		netdev_dbg(dev, "stuff error\n");
    624		cf->data[2] |= CAN_ERR_PROT_STUFF;
    625		break;
    626	case LEC_FORM_ERROR:
    627		netdev_dbg(dev, "form error\n");
    628		cf->data[2] |= CAN_ERR_PROT_FORM;
    629		break;
    630	case LEC_ACK_ERROR:
    631		netdev_dbg(dev, "ack error\n");
    632		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
    633		break;
    634	case LEC_BIT1_ERROR:
    635		netdev_dbg(dev, "bit1 error\n");
    636		cf->data[2] |= CAN_ERR_PROT_BIT1;
    637		break;
    638	case LEC_BIT0_ERROR:
    639		netdev_dbg(dev, "bit0 error\n");
    640		cf->data[2] |= CAN_ERR_PROT_BIT0;
    641		break;
    642	case LEC_CRC_ERROR:
    643		netdev_dbg(dev, "CRC error\n");
    644		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
    645		break;
    646	default:
    647		break;
    648	}
    649
    650	if (cdev->is_peripheral)
    651		timestamp = m_can_get_timestamp(cdev);
    652
    653	m_can_receive_skb(cdev, skb, timestamp);
    654
    655	return 1;
    656}
    657
    658static int __m_can_get_berr_counter(const struct net_device *dev,
    659				    struct can_berr_counter *bec)
    660{
    661	struct m_can_classdev *cdev = netdev_priv(dev);
    662	unsigned int ecr;
    663
    664	ecr = m_can_read(cdev, M_CAN_ECR);
    665	bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr);
    666	bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr);
    667
    668	return 0;
    669}
    670
    671static int m_can_clk_start(struct m_can_classdev *cdev)
    672{
    673	if (cdev->pm_clock_support == 0)
    674		return 0;
    675
    676	return pm_runtime_resume_and_get(cdev->dev);
    677}
    678
    679static void m_can_clk_stop(struct m_can_classdev *cdev)
    680{
    681	if (cdev->pm_clock_support)
    682		pm_runtime_put_sync(cdev->dev);
    683}
    684
    685static int m_can_get_berr_counter(const struct net_device *dev,
    686				  struct can_berr_counter *bec)
    687{
    688	struct m_can_classdev *cdev = netdev_priv(dev);
    689	int err;
    690
    691	err = m_can_clk_start(cdev);
    692	if (err)
    693		return err;
    694
    695	__m_can_get_berr_counter(dev, bec);
    696
    697	m_can_clk_stop(cdev);
    698
    699	return 0;
    700}
    701
    702static int m_can_handle_state_change(struct net_device *dev,
    703				     enum can_state new_state)
    704{
    705	struct m_can_classdev *cdev = netdev_priv(dev);
    706	struct can_frame *cf;
    707	struct sk_buff *skb;
    708	struct can_berr_counter bec;
    709	unsigned int ecr;
    710	u32 timestamp = 0;
    711
    712	switch (new_state) {
    713	case CAN_STATE_ERROR_WARNING:
    714		/* error warning state */
    715		cdev->can.can_stats.error_warning++;
    716		cdev->can.state = CAN_STATE_ERROR_WARNING;
    717		break;
    718	case CAN_STATE_ERROR_PASSIVE:
    719		/* error passive state */
    720		cdev->can.can_stats.error_passive++;
    721		cdev->can.state = CAN_STATE_ERROR_PASSIVE;
    722		break;
    723	case CAN_STATE_BUS_OFF:
    724		/* bus-off state */
    725		cdev->can.state = CAN_STATE_BUS_OFF;
    726		m_can_disable_all_interrupts(cdev);
    727		cdev->can.can_stats.bus_off++;
    728		can_bus_off(dev);
    729		break;
    730	default:
    731		break;
    732	}
    733
    734	/* propagate the error condition to the CAN stack */
    735	skb = alloc_can_err_skb(dev, &cf);
    736	if (unlikely(!skb))
    737		return 0;
    738
    739	__m_can_get_berr_counter(dev, &bec);
    740
    741	switch (new_state) {
    742	case CAN_STATE_ERROR_WARNING:
    743		/* error warning state */
    744		cf->can_id |= CAN_ERR_CRTL;
    745		cf->data[1] = (bec.txerr > bec.rxerr) ?
    746			CAN_ERR_CRTL_TX_WARNING :
    747			CAN_ERR_CRTL_RX_WARNING;
    748		cf->data[6] = bec.txerr;
    749		cf->data[7] = bec.rxerr;
    750		break;
    751	case CAN_STATE_ERROR_PASSIVE:
    752		/* error passive state */
    753		cf->can_id |= CAN_ERR_CRTL;
    754		ecr = m_can_read(cdev, M_CAN_ECR);
    755		if (ecr & ECR_RP)
    756			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
    757		if (bec.txerr > 127)
    758			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
    759		cf->data[6] = bec.txerr;
    760		cf->data[7] = bec.rxerr;
    761		break;
    762	case CAN_STATE_BUS_OFF:
    763		/* bus-off state */
    764		cf->can_id |= CAN_ERR_BUSOFF;
    765		break;
    766	default:
    767		break;
    768	}
    769
    770	if (cdev->is_peripheral)
    771		timestamp = m_can_get_timestamp(cdev);
    772
    773	m_can_receive_skb(cdev, skb, timestamp);
    774
    775	return 1;
    776}
    777
    778static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
    779{
    780	struct m_can_classdev *cdev = netdev_priv(dev);
    781	int work_done = 0;
    782
    783	if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
    784		netdev_dbg(dev, "entered error warning state\n");
    785		work_done += m_can_handle_state_change(dev,
    786						       CAN_STATE_ERROR_WARNING);
    787	}
    788
    789	if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
    790		netdev_dbg(dev, "entered error passive state\n");
    791		work_done += m_can_handle_state_change(dev,
    792						       CAN_STATE_ERROR_PASSIVE);
    793	}
    794
    795	if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
    796		netdev_dbg(dev, "entered error bus off state\n");
    797		work_done += m_can_handle_state_change(dev,
    798						       CAN_STATE_BUS_OFF);
    799	}
    800
    801	return work_done;
    802}
    803
    804static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
    805{
    806	if (irqstatus & IR_WDI)
    807		netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
    808	if (irqstatus & IR_BEU)
    809		netdev_err(dev, "Bit Error Uncorrected\n");
    810	if (irqstatus & IR_BEC)
    811		netdev_err(dev, "Bit Error Corrected\n");
    812	if (irqstatus & IR_TOO)
    813		netdev_err(dev, "Timeout reached\n");
    814	if (irqstatus & IR_MRAF)
    815		netdev_err(dev, "Message RAM access failure occurred\n");
    816}
    817
    818static inline bool is_lec_err(u32 psr)
    819{
    820	psr &= LEC_UNUSED;
    821
    822	return psr && (psr != LEC_UNUSED);
    823}
    824
    825static inline bool m_can_is_protocol_err(u32 irqstatus)
    826{
    827	return irqstatus & IR_ERR_LEC_31X;
    828}
    829
    830static int m_can_handle_protocol_error(struct net_device *dev, u32 irqstatus)
    831{
    832	struct net_device_stats *stats = &dev->stats;
    833	struct m_can_classdev *cdev = netdev_priv(dev);
    834	struct can_frame *cf;
    835	struct sk_buff *skb;
    836	u32 timestamp = 0;
    837
    838	/* propagate the error condition to the CAN stack */
    839	skb = alloc_can_err_skb(dev, &cf);
    840
    841	/* update tx error stats since there is protocol error */
    842	stats->tx_errors++;
    843
    844	/* update arbitration lost status */
    845	if (cdev->version >= 31 && (irqstatus & IR_PEA)) {
    846		netdev_dbg(dev, "Protocol error in Arbitration fail\n");
    847		cdev->can.can_stats.arbitration_lost++;
    848		if (skb) {
    849			cf->can_id |= CAN_ERR_LOSTARB;
    850			cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
    851		}
    852	}
    853
    854	if (unlikely(!skb)) {
    855		netdev_dbg(dev, "allocation of skb failed\n");
    856		return 0;
    857	}
    858
    859	if (cdev->is_peripheral)
    860		timestamp = m_can_get_timestamp(cdev);
    861
    862	m_can_receive_skb(cdev, skb, timestamp);
    863
    864	return 1;
    865}
    866
    867static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
    868				   u32 psr)
    869{
    870	struct m_can_classdev *cdev = netdev_priv(dev);
    871	int work_done = 0;
    872
    873	if (irqstatus & IR_RF0L)
    874		work_done += m_can_handle_lost_msg(dev);
    875
    876	/* handle lec errors on the bus */
    877	if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
    878	    is_lec_err(psr))
    879		work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
    880
    881	/* handle protocol errors in arbitration phase */
    882	if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
    883	    m_can_is_protocol_err(irqstatus))
    884		work_done += m_can_handle_protocol_error(dev, irqstatus);
    885
    886	/* other unproccessed error interrupts */
    887	m_can_handle_other_err(dev, irqstatus);
    888
    889	return work_done;
    890}
    891
    892static int m_can_rx_handler(struct net_device *dev, int quota)
    893{
    894	struct m_can_classdev *cdev = netdev_priv(dev);
    895	int rx_work_or_err;
    896	int work_done = 0;
    897	u32 irqstatus, psr;
    898
    899	irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
    900	if (!irqstatus)
    901		goto end;
    902
    903	/* Errata workaround for issue "Needless activation of MRAF irq"
    904	 * During frame reception while the MCAN is in Error Passive state
    905	 * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
    906	 * it may happen that MCAN_IR.MRAF is set although there was no
    907	 * Message RAM access failure.
    908	 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
    909	 * The Message RAM Access Failure interrupt routine needs to check
    910	 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
    911	 * In this case, reset MCAN_IR.MRAF. No further action is required.
    912	 */
    913	if (cdev->version <= 31 && irqstatus & IR_MRAF &&
    914	    m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
    915		struct can_berr_counter bec;
    916
    917		__m_can_get_berr_counter(dev, &bec);
    918		if (bec.rxerr == 127) {
    919			m_can_write(cdev, M_CAN_IR, IR_MRAF);
    920			irqstatus &= ~IR_MRAF;
    921		}
    922	}
    923
    924	psr = m_can_read(cdev, M_CAN_PSR);
    925
    926	if (irqstatus & IR_ERR_STATE)
    927		work_done += m_can_handle_state_errors(dev, psr);
    928
    929	if (irqstatus & IR_ERR_BUS_30X)
    930		work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
    931
    932	if (irqstatus & IR_RF0N) {
    933		rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
    934		if (rx_work_or_err < 0)
    935			return rx_work_or_err;
    936
    937		work_done += rx_work_or_err;
    938	}
    939end:
    940	return work_done;
    941}
    942
    943static int m_can_rx_peripheral(struct net_device *dev)
    944{
    945	struct m_can_classdev *cdev = netdev_priv(dev);
    946	int work_done;
    947
    948	work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT);
    949
    950	/* Don't re-enable interrupts if the driver had a fatal error
    951	 * (e.g., FIFO read failure).
    952	 */
    953	if (work_done >= 0)
    954		m_can_enable_all_interrupts(cdev);
    955
    956	return work_done;
    957}
    958
    959static int m_can_poll(struct napi_struct *napi, int quota)
    960{
    961	struct net_device *dev = napi->dev;
    962	struct m_can_classdev *cdev = netdev_priv(dev);
    963	int work_done;
    964
    965	work_done = m_can_rx_handler(dev, quota);
    966
    967	/* Don't re-enable interrupts if the driver had a fatal error
    968	 * (e.g., FIFO read failure).
    969	 */
    970	if (work_done >= 0 && work_done < quota) {
    971		napi_complete_done(napi, work_done);
    972		m_can_enable_all_interrupts(cdev);
    973	}
    974
    975	return work_done;
    976}
    977
    978/* Echo tx skb and update net stats. Peripherals use rx-offload for
    979 * echo. timestamp is used for peripherals to ensure correct ordering
    980 * by rx-offload, and is ignored for non-peripherals.
    981 */
    982static void m_can_tx_update_stats(struct m_can_classdev *cdev,
    983				  unsigned int msg_mark,
    984				  u32 timestamp)
    985{
    986	struct net_device *dev = cdev->net;
    987	struct net_device_stats *stats = &dev->stats;
    988
    989	if (cdev->is_peripheral)
    990		stats->tx_bytes +=
    991			can_rx_offload_get_echo_skb(&cdev->offload,
    992						    msg_mark,
    993						    timestamp,
    994						    NULL);
    995	else
    996		stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
    997
    998	stats->tx_packets++;
    999}
   1000
   1001static int m_can_echo_tx_event(struct net_device *dev)
   1002{
   1003	u32 txe_count = 0;
   1004	u32 m_can_txefs;
   1005	u32 fgi = 0;
   1006	int i = 0;
   1007	unsigned int msg_mark;
   1008
   1009	struct m_can_classdev *cdev = netdev_priv(dev);
   1010
   1011	/* read tx event fifo status */
   1012	m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
   1013
   1014	/* Get Tx Event fifo element count */
   1015	txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
   1016
   1017	/* Get and process all sent elements */
   1018	for (i = 0; i < txe_count; i++) {
   1019		u32 txe, timestamp = 0;
   1020		int err;
   1021
   1022		/* retrieve get index */
   1023		fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
   1024
   1025		/* get message marker, timestamp */
   1026		err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
   1027		if (err) {
   1028			netdev_err(dev, "TXE FIFO read returned %d\n", err);
   1029			return err;
   1030		}
   1031
   1032		msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
   1033		timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
   1034
   1035		/* ack txe element */
   1036		m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
   1037							  fgi));
   1038
   1039		/* update stats */
   1040		m_can_tx_update_stats(cdev, msg_mark, timestamp);
   1041	}
   1042
   1043	return 0;
   1044}
   1045
   1046static irqreturn_t m_can_isr(int irq, void *dev_id)
   1047{
   1048	struct net_device *dev = (struct net_device *)dev_id;
   1049	struct m_can_classdev *cdev = netdev_priv(dev);
   1050	u32 ir;
   1051
   1052	if (pm_runtime_suspended(cdev->dev))
   1053		return IRQ_NONE;
   1054	ir = m_can_read(cdev, M_CAN_IR);
   1055	if (!ir)
   1056		return IRQ_NONE;
   1057
   1058	/* ACK all irqs */
   1059	if (ir & IR_ALL_INT)
   1060		m_can_write(cdev, M_CAN_IR, ir);
   1061
   1062	if (cdev->ops->clear_interrupts)
   1063		cdev->ops->clear_interrupts(cdev);
   1064
   1065	/* schedule NAPI in case of
   1066	 * - rx IRQ
   1067	 * - state change IRQ
   1068	 * - bus error IRQ and bus error reporting
   1069	 */
   1070	if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
   1071		cdev->irqstatus = ir;
   1072		m_can_disable_all_interrupts(cdev);
   1073		if (!cdev->is_peripheral)
   1074			napi_schedule(&cdev->napi);
   1075		else if (m_can_rx_peripheral(dev) < 0)
   1076			goto out_fail;
   1077	}
   1078
   1079	if (cdev->version == 30) {
   1080		if (ir & IR_TC) {
   1081			/* Transmission Complete Interrupt*/
   1082			u32 timestamp = 0;
   1083
   1084			if (cdev->is_peripheral)
   1085				timestamp = m_can_get_timestamp(cdev);
   1086			m_can_tx_update_stats(cdev, 0, timestamp);
   1087			netif_wake_queue(dev);
   1088		}
   1089	} else  {
   1090		if (ir & IR_TEFN) {
   1091			/* New TX FIFO Element arrived */
   1092			if (m_can_echo_tx_event(dev) != 0)
   1093				goto out_fail;
   1094
   1095			if (netif_queue_stopped(dev) &&
   1096			    !m_can_tx_fifo_full(cdev))
   1097				netif_wake_queue(dev);
   1098		}
   1099	}
   1100
   1101	if (cdev->is_peripheral)
   1102		can_rx_offload_threaded_irq_finish(&cdev->offload);
   1103
   1104	return IRQ_HANDLED;
   1105
   1106out_fail:
   1107	m_can_disable_all_interrupts(cdev);
   1108	return IRQ_HANDLED;
   1109}
   1110
   1111static const struct can_bittiming_const m_can_bittiming_const_30X = {
   1112	.name = KBUILD_MODNAME,
   1113	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
   1114	.tseg1_max = 64,
   1115	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
   1116	.tseg2_max = 16,
   1117	.sjw_max = 16,
   1118	.brp_min = 1,
   1119	.brp_max = 1024,
   1120	.brp_inc = 1,
   1121};
   1122
   1123static const struct can_bittiming_const m_can_data_bittiming_const_30X = {
   1124	.name = KBUILD_MODNAME,
   1125	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
   1126	.tseg1_max = 16,
   1127	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
   1128	.tseg2_max = 8,
   1129	.sjw_max = 4,
   1130	.brp_min = 1,
   1131	.brp_max = 32,
   1132	.brp_inc = 1,
   1133};
   1134
   1135static const struct can_bittiming_const m_can_bittiming_const_31X = {
   1136	.name = KBUILD_MODNAME,
   1137	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
   1138	.tseg1_max = 256,
   1139	.tseg2_min = 2,		/* Time segment 2 = phase_seg2 */
   1140	.tseg2_max = 128,
   1141	.sjw_max = 128,
   1142	.brp_min = 1,
   1143	.brp_max = 512,
   1144	.brp_inc = 1,
   1145};
   1146
   1147static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
   1148	.name = KBUILD_MODNAME,
   1149	.tseg1_min = 1,		/* Time segment 1 = prop_seg + phase_seg1 */
   1150	.tseg1_max = 32,
   1151	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
   1152	.tseg2_max = 16,
   1153	.sjw_max = 16,
   1154	.brp_min = 1,
   1155	.brp_max = 32,
   1156	.brp_inc = 1,
   1157};
   1158
   1159static int m_can_set_bittiming(struct net_device *dev)
   1160{
   1161	struct m_can_classdev *cdev = netdev_priv(dev);
   1162	const struct can_bittiming *bt = &cdev->can.bittiming;
   1163	const struct can_bittiming *dbt = &cdev->can.data_bittiming;
   1164	u16 brp, sjw, tseg1, tseg2;
   1165	u32 reg_btp;
   1166
   1167	brp = bt->brp - 1;
   1168	sjw = bt->sjw - 1;
   1169	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
   1170	tseg2 = bt->phase_seg2 - 1;
   1171	reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
   1172		  FIELD_PREP(NBTP_NSJW_MASK, sjw) |
   1173		  FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
   1174		  FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
   1175	m_can_write(cdev, M_CAN_NBTP, reg_btp);
   1176
   1177	if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
   1178		reg_btp = 0;
   1179		brp = dbt->brp - 1;
   1180		sjw = dbt->sjw - 1;
   1181		tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
   1182		tseg2 = dbt->phase_seg2 - 1;
   1183
   1184		/* TDC is only needed for bitrates beyond 2.5 MBit/s.
   1185		 * This is mentioned in the "Bit Time Requirements for CAN FD"
   1186		 * paper presented at the International CAN Conference 2013
   1187		 */
   1188		if (dbt->bitrate > 2500000) {
   1189			u32 tdco, ssp;
   1190
   1191			/* Use the same value of secondary sampling point
   1192			 * as the data sampling point
   1193			 */
   1194			ssp = dbt->sample_point;
   1195
   1196			/* Equation based on Bosch's M_CAN User Manual's
   1197			 * Transmitter Delay Compensation Section
   1198			 */
   1199			tdco = (cdev->can.clock.freq / 1000) *
   1200				ssp / dbt->bitrate;
   1201
   1202			/* Max valid TDCO value is 127 */
   1203			if (tdco > 127) {
   1204				netdev_warn(dev, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
   1205					    tdco);
   1206				tdco = 127;
   1207			}
   1208
   1209			reg_btp |= DBTP_TDC;
   1210			m_can_write(cdev, M_CAN_TDCR,
   1211				    FIELD_PREP(TDCR_TDCO_MASK, tdco));
   1212		}
   1213
   1214		reg_btp |= FIELD_PREP(DBTP_DBRP_MASK, brp) |
   1215			FIELD_PREP(DBTP_DSJW_MASK, sjw) |
   1216			FIELD_PREP(DBTP_DTSEG1_MASK, tseg1) |
   1217			FIELD_PREP(DBTP_DTSEG2_MASK, tseg2);
   1218
   1219		m_can_write(cdev, M_CAN_DBTP, reg_btp);
   1220	}
   1221
   1222	return 0;
   1223}
   1224
   1225/* Configure M_CAN chip:
   1226 * - set rx buffer/fifo element size
   1227 * - configure rx fifo
   1228 * - accept non-matching frame into fifo 0
   1229 * - configure tx buffer
   1230 *		- >= v3.1.x: TX FIFO is used
   1231 * - configure mode
   1232 * - setup bittiming
   1233 * - configure timestamp generation
   1234 */
   1235static void m_can_chip_config(struct net_device *dev)
   1236{
   1237	struct m_can_classdev *cdev = netdev_priv(dev);
   1238	u32 cccr, test;
   1239
   1240	m_can_config_endisable(cdev, true);
   1241
   1242	/* RX Buffer/FIFO Element Size 64 bytes data field */
   1243	m_can_write(cdev, M_CAN_RXESC,
   1244		    FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) |
   1245		    FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) |
   1246		    FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B));
   1247
   1248	/* Accept Non-matching Frames Into FIFO 0 */
   1249	m_can_write(cdev, M_CAN_GFC, 0x0);
   1250
   1251	if (cdev->version == 30) {
   1252		/* only support one Tx Buffer currently */
   1253		m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) |
   1254			    cdev->mcfg[MRAM_TXB].off);
   1255	} else {
   1256		/* TX FIFO is used for newer IP Core versions */
   1257		m_can_write(cdev, M_CAN_TXBC,
   1258			    FIELD_PREP(TXBC_TFQS_MASK,
   1259				       cdev->mcfg[MRAM_TXB].num) |
   1260			    cdev->mcfg[MRAM_TXB].off);
   1261	}
   1262
   1263	/* support 64 bytes payload */
   1264	m_can_write(cdev, M_CAN_TXESC,
   1265		    FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B));
   1266
   1267	/* TX Event FIFO */
   1268	if (cdev->version == 30) {
   1269		m_can_write(cdev, M_CAN_TXEFC,
   1270			    FIELD_PREP(TXEFC_EFS_MASK, 1) |
   1271			    cdev->mcfg[MRAM_TXE].off);
   1272	} else {
   1273		/* Full TX Event FIFO is used */
   1274		m_can_write(cdev, M_CAN_TXEFC,
   1275			    FIELD_PREP(TXEFC_EFS_MASK,
   1276				       cdev->mcfg[MRAM_TXE].num) |
   1277			    cdev->mcfg[MRAM_TXE].off);
   1278	}
   1279
   1280	/* rx fifo configuration, blocking mode, fifo size 1 */
   1281	m_can_write(cdev, M_CAN_RXF0C,
   1282		    FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
   1283		    cdev->mcfg[MRAM_RXF0].off);
   1284
   1285	m_can_write(cdev, M_CAN_RXF1C,
   1286		    FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) |
   1287		    cdev->mcfg[MRAM_RXF1].off);
   1288
   1289	cccr = m_can_read(cdev, M_CAN_CCCR);
   1290	test = m_can_read(cdev, M_CAN_TEST);
   1291	test &= ~TEST_LBCK;
   1292	if (cdev->version == 30) {
   1293		/* Version 3.0.x */
   1294
   1295		cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
   1296			  FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) |
   1297			  FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK)));
   1298
   1299		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
   1300			cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS);
   1301
   1302	} else {
   1303		/* Version 3.1.x or 3.2.x */
   1304		cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
   1305			  CCCR_NISO | CCCR_DAR);
   1306
   1307		/* Only 3.2.x has NISO Bit implemented */
   1308		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
   1309			cccr |= CCCR_NISO;
   1310
   1311		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
   1312			cccr |= (CCCR_BRSE | CCCR_FDOE);
   1313	}
   1314
   1315	/* Loopback Mode */
   1316	if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
   1317		cccr |= CCCR_TEST | CCCR_MON;
   1318		test |= TEST_LBCK;
   1319	}
   1320
   1321	/* Enable Monitoring (all versions) */
   1322	if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
   1323		cccr |= CCCR_MON;
   1324
   1325	/* Disable Auto Retransmission (all versions) */
   1326	if (cdev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
   1327		cccr |= CCCR_DAR;
   1328
   1329	/* Write config */
   1330	m_can_write(cdev, M_CAN_CCCR, cccr);
   1331	m_can_write(cdev, M_CAN_TEST, test);
   1332
   1333	/* Enable interrupts */
   1334	m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
   1335	if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
   1336		if (cdev->version == 30)
   1337			m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
   1338				    ~(IR_ERR_LEC_30X));
   1339		else
   1340			m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
   1341				    ~(IR_ERR_LEC_31X));
   1342	else
   1343		m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
   1344
   1345	/* route all interrupts to INT0 */
   1346	m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
   1347
   1348	/* set bittiming params */
   1349	m_can_set_bittiming(dev);
   1350
   1351	/* enable internal timestamp generation, with a prescalar of 16. The
   1352	 * prescalar is applied to the nominal bit timing
   1353	 */
   1354	m_can_write(cdev, M_CAN_TSCC,
   1355		    FIELD_PREP(TSCC_TCP_MASK, 0xf) |
   1356		    FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
   1357
   1358	m_can_config_endisable(cdev, false);
   1359
   1360	if (cdev->ops->init)
   1361		cdev->ops->init(cdev);
   1362}
   1363
   1364static void m_can_start(struct net_device *dev)
   1365{
   1366	struct m_can_classdev *cdev = netdev_priv(dev);
   1367
   1368	/* basic m_can configuration */
   1369	m_can_chip_config(dev);
   1370
   1371	cdev->can.state = CAN_STATE_ERROR_ACTIVE;
   1372
   1373	m_can_enable_all_interrupts(cdev);
   1374}
   1375
   1376static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
   1377{
   1378	switch (mode) {
   1379	case CAN_MODE_START:
   1380		m_can_clean(dev);
   1381		m_can_start(dev);
   1382		netif_wake_queue(dev);
   1383		break;
   1384	default:
   1385		return -EOPNOTSUPP;
   1386	}
   1387
   1388	return 0;
   1389}
   1390
   1391/* Checks core release number of M_CAN
   1392 * returns 0 if an unsupported device is detected
   1393 * else it returns the release and step coded as:
   1394 * return value = 10 * <release> + 1 * <step>
   1395 */
   1396static int m_can_check_core_release(struct m_can_classdev *cdev)
   1397{
   1398	u32 crel_reg;
   1399	u8 rel;
   1400	u8 step;
   1401	int res;
   1402
   1403	/* Read Core Release Version and split into version number
   1404	 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
   1405	 */
   1406	crel_reg = m_can_read(cdev, M_CAN_CREL);
   1407	rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg);
   1408	step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg);
   1409
   1410	if (rel == 3) {
   1411		/* M_CAN v3.x.y: create return value */
   1412		res = 30 + step;
   1413	} else {
   1414		/* Unsupported M_CAN version */
   1415		res = 0;
   1416	}
   1417
   1418	return res;
   1419}
   1420
   1421/* Selectable Non ISO support only in version 3.2.x
   1422 * This function checks if the bit is writable.
   1423 */
   1424static bool m_can_niso_supported(struct m_can_classdev *cdev)
   1425{
   1426	u32 cccr_reg, cccr_poll = 0;
   1427	int niso_timeout = -ETIMEDOUT;
   1428	int i;
   1429
   1430	m_can_config_endisable(cdev, true);
   1431	cccr_reg = m_can_read(cdev, M_CAN_CCCR);
   1432	cccr_reg |= CCCR_NISO;
   1433	m_can_write(cdev, M_CAN_CCCR, cccr_reg);
   1434
   1435	for (i = 0; i <= 10; i++) {
   1436		cccr_poll = m_can_read(cdev, M_CAN_CCCR);
   1437		if (cccr_poll == cccr_reg) {
   1438			niso_timeout = 0;
   1439			break;
   1440		}
   1441
   1442		usleep_range(1, 5);
   1443	}
   1444
   1445	/* Clear NISO */
   1446	cccr_reg &= ~(CCCR_NISO);
   1447	m_can_write(cdev, M_CAN_CCCR, cccr_reg);
   1448
   1449	m_can_config_endisable(cdev, false);
   1450
   1451	/* return false if time out (-ETIMEDOUT), else return true */
   1452	return !niso_timeout;
   1453}
   1454
   1455static int m_can_dev_setup(struct m_can_classdev *cdev)
   1456{
   1457	struct net_device *dev = cdev->net;
   1458	int m_can_version, err;
   1459
   1460	m_can_version = m_can_check_core_release(cdev);
   1461	/* return if unsupported version */
   1462	if (!m_can_version) {
   1463		dev_err(cdev->dev, "Unsupported version number: %2d",
   1464			m_can_version);
   1465		return -EINVAL;
   1466	}
   1467
   1468	if (!cdev->is_peripheral)
   1469		netif_napi_add(dev, &cdev->napi,
   1470			       m_can_poll, NAPI_POLL_WEIGHT);
   1471
   1472	/* Shared properties of all M_CAN versions */
   1473	cdev->version = m_can_version;
   1474	cdev->can.do_set_mode = m_can_set_mode;
   1475	cdev->can.do_get_berr_counter = m_can_get_berr_counter;
   1476
   1477	/* Set M_CAN supported operations */
   1478	cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
   1479		CAN_CTRLMODE_LISTENONLY |
   1480		CAN_CTRLMODE_BERR_REPORTING |
   1481		CAN_CTRLMODE_FD |
   1482		CAN_CTRLMODE_ONE_SHOT;
   1483
   1484	/* Set properties depending on M_CAN version */
   1485	switch (cdev->version) {
   1486	case 30:
   1487		/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
   1488		err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
   1489		if (err)
   1490			return err;
   1491		cdev->can.bittiming_const = &m_can_bittiming_const_30X;
   1492		cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
   1493		break;
   1494	case 31:
   1495		/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
   1496		err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
   1497		if (err)
   1498			return err;
   1499		cdev->can.bittiming_const = &m_can_bittiming_const_31X;
   1500		cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
   1501		break;
   1502	case 32:
   1503	case 33:
   1504		/* Support both MCAN version v3.2.x and v3.3.0 */
   1505		cdev->can.bittiming_const = &m_can_bittiming_const_31X;
   1506		cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
   1507
   1508		cdev->can.ctrlmode_supported |=
   1509			(m_can_niso_supported(cdev) ?
   1510			 CAN_CTRLMODE_FD_NON_ISO : 0);
   1511		break;
   1512	default:
   1513		dev_err(cdev->dev, "Unsupported version number: %2d",
   1514			cdev->version);
   1515		return -EINVAL;
   1516	}
   1517
   1518	if (cdev->ops->init)
   1519		cdev->ops->init(cdev);
   1520
   1521	return 0;
   1522}
   1523
   1524static void m_can_stop(struct net_device *dev)
   1525{
   1526	struct m_can_classdev *cdev = netdev_priv(dev);
   1527
   1528	/* disable all interrupts */
   1529	m_can_disable_all_interrupts(cdev);
   1530
   1531	/* Set init mode to disengage from the network */
   1532	m_can_config_endisable(cdev, true);
   1533
   1534	/* set the state as STOPPED */
   1535	cdev->can.state = CAN_STATE_STOPPED;
   1536}
   1537
   1538static int m_can_close(struct net_device *dev)
   1539{
   1540	struct m_can_classdev *cdev = netdev_priv(dev);
   1541
   1542	netif_stop_queue(dev);
   1543
   1544	if (!cdev->is_peripheral)
   1545		napi_disable(&cdev->napi);
   1546
   1547	m_can_stop(dev);
   1548	m_can_clk_stop(cdev);
   1549	free_irq(dev->irq, dev);
   1550
   1551	if (cdev->is_peripheral) {
   1552		cdev->tx_skb = NULL;
   1553		destroy_workqueue(cdev->tx_wq);
   1554		cdev->tx_wq = NULL;
   1555	}
   1556
   1557	if (cdev->is_peripheral)
   1558		can_rx_offload_disable(&cdev->offload);
   1559
   1560	close_candev(dev);
   1561
   1562	phy_power_off(cdev->transceiver);
   1563
   1564	return 0;
   1565}
   1566
   1567static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
   1568{
   1569	struct m_can_classdev *cdev = netdev_priv(dev);
   1570	/*get wrap around for loopback skb index */
   1571	unsigned int wrap = cdev->can.echo_skb_max;
   1572	int next_idx;
   1573
   1574	/* calculate next index */
   1575	next_idx = (++putidx >= wrap ? 0 : putidx);
   1576
   1577	/* check if occupied */
   1578	return !!cdev->can.echo_skb[next_idx];
   1579}
   1580
   1581static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
   1582{
   1583	struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
   1584	struct net_device *dev = cdev->net;
   1585	struct sk_buff *skb = cdev->tx_skb;
   1586	struct id_and_dlc fifo_header;
   1587	u32 cccr, fdflags;
   1588	int err;
   1589	int putidx;
   1590
   1591	cdev->tx_skb = NULL;
   1592
   1593	/* Generate ID field for TX buffer Element */
   1594	/* Common to all supported M_CAN versions */
   1595	if (cf->can_id & CAN_EFF_FLAG) {
   1596		fifo_header.id = cf->can_id & CAN_EFF_MASK;
   1597		fifo_header.id |= TX_BUF_XTD;
   1598	} else {
   1599		fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
   1600	}
   1601
   1602	if (cf->can_id & CAN_RTR_FLAG)
   1603		fifo_header.id |= TX_BUF_RTR;
   1604
   1605	if (cdev->version == 30) {
   1606		netif_stop_queue(dev);
   1607
   1608		fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
   1609
   1610		/* Write the frame ID, DLC, and payload to the FIFO element. */
   1611		err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
   1612		if (err)
   1613			goto out_fail;
   1614
   1615		err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
   1616				       cf->data, DIV_ROUND_UP(cf->len, 4));
   1617		if (err)
   1618			goto out_fail;
   1619
   1620		if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
   1621			cccr = m_can_read(cdev, M_CAN_CCCR);
   1622			cccr &= ~CCCR_CMR_MASK;
   1623			if (can_is_canfd_skb(skb)) {
   1624				if (cf->flags & CANFD_BRS)
   1625					cccr |= FIELD_PREP(CCCR_CMR_MASK,
   1626							   CCCR_CMR_CANFD_BRS);
   1627				else
   1628					cccr |= FIELD_PREP(CCCR_CMR_MASK,
   1629							   CCCR_CMR_CANFD);
   1630			} else {
   1631				cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN);
   1632			}
   1633			m_can_write(cdev, M_CAN_CCCR, cccr);
   1634		}
   1635		m_can_write(cdev, M_CAN_TXBTIE, 0x1);
   1636
   1637		can_put_echo_skb(skb, dev, 0, 0);
   1638
   1639		m_can_write(cdev, M_CAN_TXBAR, 0x1);
   1640		/* End of xmit function for version 3.0.x */
   1641	} else {
   1642		/* Transmit routine for version >= v3.1.x */
   1643
   1644		/* Check if FIFO full */
   1645		if (m_can_tx_fifo_full(cdev)) {
   1646			/* This shouldn't happen */
   1647			netif_stop_queue(dev);
   1648			netdev_warn(dev,
   1649				    "TX queue active although FIFO is full.");
   1650
   1651			if (cdev->is_peripheral) {
   1652				kfree_skb(skb);
   1653				dev->stats.tx_dropped++;
   1654				return NETDEV_TX_OK;
   1655			} else {
   1656				return NETDEV_TX_BUSY;
   1657			}
   1658		}
   1659
   1660		/* get put index for frame */
   1661		putidx = FIELD_GET(TXFQS_TFQPI_MASK,
   1662				   m_can_read(cdev, M_CAN_TXFQS));
   1663
   1664		/* Construct DLC Field, with CAN-FD configuration.
   1665		 * Use the put index of the fifo as the message marker,
   1666		 * used in the TX interrupt for sending the correct echo frame.
   1667		 */
   1668
   1669		/* get CAN FD configuration of frame */
   1670		fdflags = 0;
   1671		if (can_is_canfd_skb(skb)) {
   1672			fdflags |= TX_BUF_FDF;
   1673			if (cf->flags & CANFD_BRS)
   1674				fdflags |= TX_BUF_BRS;
   1675		}
   1676
   1677		fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
   1678			FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
   1679			fdflags | TX_BUF_EFC;
   1680		err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
   1681		if (err)
   1682			goto out_fail;
   1683
   1684		err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
   1685				       cf->data, DIV_ROUND_UP(cf->len, 4));
   1686		if (err)
   1687			goto out_fail;
   1688
   1689		/* Push loopback echo.
   1690		 * Will be looped back on TX interrupt based on message marker
   1691		 */
   1692		can_put_echo_skb(skb, dev, putidx, 0);
   1693
   1694		/* Enable TX FIFO element to start transfer  */
   1695		m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
   1696
   1697		/* stop network queue if fifo full */
   1698		if (m_can_tx_fifo_full(cdev) ||
   1699		    m_can_next_echo_skb_occupied(dev, putidx))
   1700			netif_stop_queue(dev);
   1701	}
   1702
   1703	return NETDEV_TX_OK;
   1704
   1705out_fail:
   1706	netdev_err(dev, "FIFO write returned %d\n", err);
   1707	m_can_disable_all_interrupts(cdev);
   1708	return NETDEV_TX_BUSY;
   1709}
   1710
   1711static void m_can_tx_work_queue(struct work_struct *ws)
   1712{
   1713	struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
   1714						   tx_work);
   1715
   1716	m_can_tx_handler(cdev);
   1717}
   1718
   1719static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
   1720				    struct net_device *dev)
   1721{
   1722	struct m_can_classdev *cdev = netdev_priv(dev);
   1723
   1724	if (can_dropped_invalid_skb(dev, skb))
   1725		return NETDEV_TX_OK;
   1726
   1727	if (cdev->is_peripheral) {
   1728		if (cdev->tx_skb) {
   1729			netdev_err(dev, "hard_xmit called while tx busy\n");
   1730			return NETDEV_TX_BUSY;
   1731		}
   1732
   1733		if (cdev->can.state == CAN_STATE_BUS_OFF) {
   1734			m_can_clean(dev);
   1735		} else {
   1736			/* Need to stop the queue to avoid numerous requests
   1737			 * from being sent.  Suggested improvement is to create
   1738			 * a queueing mechanism that will queue the skbs and
   1739			 * process them in order.
   1740			 */
   1741			cdev->tx_skb = skb;
   1742			netif_stop_queue(cdev->net);
   1743			queue_work(cdev->tx_wq, &cdev->tx_work);
   1744		}
   1745	} else {
   1746		cdev->tx_skb = skb;
   1747		return m_can_tx_handler(cdev);
   1748	}
   1749
   1750	return NETDEV_TX_OK;
   1751}
   1752
   1753static int m_can_open(struct net_device *dev)
   1754{
   1755	struct m_can_classdev *cdev = netdev_priv(dev);
   1756	int err;
   1757
   1758	err = phy_power_on(cdev->transceiver);
   1759	if (err)
   1760		return err;
   1761
   1762	err = m_can_clk_start(cdev);
   1763	if (err)
   1764		goto out_phy_power_off;
   1765
   1766	/* open the can device */
   1767	err = open_candev(dev);
   1768	if (err) {
   1769		netdev_err(dev, "failed to open can device\n");
   1770		goto exit_disable_clks;
   1771	}
   1772
   1773	if (cdev->is_peripheral)
   1774		can_rx_offload_enable(&cdev->offload);
   1775
   1776	/* register interrupt handler */
   1777	if (cdev->is_peripheral) {
   1778		cdev->tx_skb = NULL;
   1779		cdev->tx_wq = alloc_workqueue("mcan_wq",
   1780					      WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
   1781		if (!cdev->tx_wq) {
   1782			err = -ENOMEM;
   1783			goto out_wq_fail;
   1784		}
   1785
   1786		INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
   1787
   1788		err = request_threaded_irq(dev->irq, NULL, m_can_isr,
   1789					   IRQF_ONESHOT,
   1790					   dev->name, dev);
   1791	} else {
   1792		err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
   1793				  dev);
   1794	}
   1795
   1796	if (err < 0) {
   1797		netdev_err(dev, "failed to request interrupt\n");
   1798		goto exit_irq_fail;
   1799	}
   1800
   1801	/* start the m_can controller */
   1802	m_can_start(dev);
   1803
   1804	if (!cdev->is_peripheral)
   1805		napi_enable(&cdev->napi);
   1806
   1807	netif_start_queue(dev);
   1808
   1809	return 0;
   1810
   1811exit_irq_fail:
   1812	if (cdev->is_peripheral)
   1813		destroy_workqueue(cdev->tx_wq);
   1814out_wq_fail:
   1815	if (cdev->is_peripheral)
   1816		can_rx_offload_disable(&cdev->offload);
   1817	close_candev(dev);
   1818exit_disable_clks:
   1819	m_can_clk_stop(cdev);
   1820out_phy_power_off:
   1821	phy_power_off(cdev->transceiver);
   1822	return err;
   1823}
   1824
   1825static const struct net_device_ops m_can_netdev_ops = {
   1826	.ndo_open = m_can_open,
   1827	.ndo_stop = m_can_close,
   1828	.ndo_start_xmit = m_can_start_xmit,
   1829	.ndo_change_mtu = can_change_mtu,
   1830};
   1831
   1832static int register_m_can_dev(struct net_device *dev)
   1833{
   1834	dev->flags |= IFF_ECHO;	/* we support local echo */
   1835	dev->netdev_ops = &m_can_netdev_ops;
   1836
   1837	return register_candev(dev);
   1838}
   1839
   1840static void m_can_of_parse_mram(struct m_can_classdev *cdev,
   1841				const u32 *mram_config_vals)
   1842{
   1843	cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
   1844	cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
   1845	cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
   1846		cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
   1847	cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
   1848	cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
   1849		cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
   1850	cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
   1851		FIELD_MAX(RXFC_FS_MASK);
   1852	cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
   1853		cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
   1854	cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
   1855		FIELD_MAX(RXFC_FS_MASK);
   1856	cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
   1857		cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
   1858	cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
   1859	cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
   1860		cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
   1861	cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
   1862	cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
   1863		cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
   1864	cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
   1865		FIELD_MAX(TXBC_NDTB_MASK);
   1866
   1867	dev_dbg(cdev->dev,
   1868		"sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
   1869		cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
   1870		cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
   1871		cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
   1872		cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
   1873		cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
   1874		cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
   1875		cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
   1876}
   1877
   1878int m_can_init_ram(struct m_can_classdev *cdev)
   1879{
   1880	int end, i, start;
   1881	int err = 0;
   1882
   1883	/* initialize the entire Message RAM in use to avoid possible
   1884	 * ECC/parity checksum errors when reading an uninitialized buffer
   1885	 */
   1886	start = cdev->mcfg[MRAM_SIDF].off;
   1887	end = cdev->mcfg[MRAM_TXB].off +
   1888		cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
   1889
   1890	for (i = start; i < end; i += 4) {
   1891		err = m_can_fifo_write_no_off(cdev, i, 0x0);
   1892		if (err)
   1893			break;
   1894	}
   1895
   1896	return err;
   1897}
   1898EXPORT_SYMBOL_GPL(m_can_init_ram);
   1899
   1900int m_can_class_get_clocks(struct m_can_classdev *cdev)
   1901{
   1902	int ret = 0;
   1903
   1904	cdev->hclk = devm_clk_get(cdev->dev, "hclk");
   1905	cdev->cclk = devm_clk_get(cdev->dev, "cclk");
   1906
   1907	if (IS_ERR(cdev->cclk)) {
   1908		dev_err(cdev->dev, "no clock found\n");
   1909		ret = -ENODEV;
   1910	}
   1911
   1912	return ret;
   1913}
   1914EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
   1915
   1916struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
   1917						int sizeof_priv)
   1918{
   1919	struct m_can_classdev *class_dev = NULL;
   1920	u32 mram_config_vals[MRAM_CFG_LEN];
   1921	struct net_device *net_dev;
   1922	u32 tx_fifo_size;
   1923	int ret;
   1924
   1925	ret = fwnode_property_read_u32_array(dev_fwnode(dev),
   1926					     "bosch,mram-cfg",
   1927					     mram_config_vals,
   1928					     sizeof(mram_config_vals) / 4);
   1929	if (ret) {
   1930		dev_err(dev, "Could not get Message RAM configuration.");
   1931		goto out;
   1932	}
   1933
   1934	/* Get TX FIFO size
   1935	 * Defines the total amount of echo buffers for loopback
   1936	 */
   1937	tx_fifo_size = mram_config_vals[7];
   1938
   1939	/* allocate the m_can device */
   1940	net_dev = alloc_candev(sizeof_priv, tx_fifo_size);
   1941	if (!net_dev) {
   1942		dev_err(dev, "Failed to allocate CAN device");
   1943		goto out;
   1944	}
   1945
   1946	class_dev = netdev_priv(net_dev);
   1947	class_dev->net = net_dev;
   1948	class_dev->dev = dev;
   1949	SET_NETDEV_DEV(net_dev, dev);
   1950
   1951	m_can_of_parse_mram(class_dev, mram_config_vals);
   1952out:
   1953	return class_dev;
   1954}
   1955EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
   1956
   1957void m_can_class_free_dev(struct net_device *net)
   1958{
   1959	free_candev(net);
   1960}
   1961EXPORT_SYMBOL_GPL(m_can_class_free_dev);
   1962
   1963int m_can_class_register(struct m_can_classdev *cdev)
   1964{
   1965	int ret;
   1966
   1967	if (cdev->pm_clock_support) {
   1968		ret = m_can_clk_start(cdev);
   1969		if (ret)
   1970			return ret;
   1971	}
   1972
   1973	if (cdev->is_peripheral) {
   1974		ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
   1975						NAPI_POLL_WEIGHT);
   1976		if (ret)
   1977			goto clk_disable;
   1978	}
   1979
   1980	ret = m_can_dev_setup(cdev);
   1981	if (ret)
   1982		goto rx_offload_del;
   1983
   1984	ret = register_m_can_dev(cdev->net);
   1985	if (ret) {
   1986		dev_err(cdev->dev, "registering %s failed (err=%d)\n",
   1987			cdev->net->name, ret);
   1988		goto rx_offload_del;
   1989	}
   1990
   1991	of_can_transceiver(cdev->net);
   1992
   1993	dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
   1994		 KBUILD_MODNAME, cdev->net->irq, cdev->version);
   1995
   1996	/* Probe finished
   1997	 * Stop clocks. They will be reactivated once the M_CAN device is opened
   1998	 */
   1999	m_can_clk_stop(cdev);
   2000
   2001	return 0;
   2002
   2003rx_offload_del:
   2004	if (cdev->is_peripheral)
   2005		can_rx_offload_del(&cdev->offload);
   2006clk_disable:
   2007	m_can_clk_stop(cdev);
   2008
   2009	return ret;
   2010}
   2011EXPORT_SYMBOL_GPL(m_can_class_register);
   2012
   2013void m_can_class_unregister(struct m_can_classdev *cdev)
   2014{
   2015	if (cdev->is_peripheral)
   2016		can_rx_offload_del(&cdev->offload);
   2017	unregister_candev(cdev->net);
   2018}
   2019EXPORT_SYMBOL_GPL(m_can_class_unregister);
   2020
   2021int m_can_class_suspend(struct device *dev)
   2022{
   2023	struct m_can_classdev *cdev = dev_get_drvdata(dev);
   2024	struct net_device *ndev = cdev->net;
   2025
   2026	if (netif_running(ndev)) {
   2027		netif_stop_queue(ndev);
   2028		netif_device_detach(ndev);
   2029		m_can_stop(ndev);
   2030		m_can_clk_stop(cdev);
   2031	}
   2032
   2033	pinctrl_pm_select_sleep_state(dev);
   2034
   2035	cdev->can.state = CAN_STATE_SLEEPING;
   2036
   2037	return 0;
   2038}
   2039EXPORT_SYMBOL_GPL(m_can_class_suspend);
   2040
   2041int m_can_class_resume(struct device *dev)
   2042{
   2043	struct m_can_classdev *cdev = dev_get_drvdata(dev);
   2044	struct net_device *ndev = cdev->net;
   2045
   2046	pinctrl_pm_select_default_state(dev);
   2047
   2048	cdev->can.state = CAN_STATE_ERROR_ACTIVE;
   2049
   2050	if (netif_running(ndev)) {
   2051		int ret;
   2052
   2053		ret = m_can_clk_start(cdev);
   2054		if (ret)
   2055			return ret;
   2056
   2057		m_can_init_ram(cdev);
   2058		m_can_start(ndev);
   2059		netif_device_attach(ndev);
   2060		netif_start_queue(ndev);
   2061	}
   2062
   2063	return 0;
   2064}
   2065EXPORT_SYMBOL_GPL(m_can_class_resume);
   2066
   2067MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
   2068MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
   2069MODULE_LICENSE("GPL v2");
   2070MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");