cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvaser_pciefd.c (53468B)


      1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
      2/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
      3 * Parts of this driver are based on the following:
      4 *  - Kvaser linux pciefd driver (version 5.25)
      5 *  - PEAK linux canfd driver
      6 *  - Altera Avalon EPCS flash controller driver
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/device.h>
     12#include <linux/pci.h>
     13#include <linux/can/dev.h>
     14#include <linux/timer.h>
     15#include <linux/netdevice.h>
     16#include <linux/crc32.h>
     17#include <linux/iopoll.h>
     18
     19MODULE_LICENSE("Dual BSD/GPL");
     20MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
     21MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
     22
     23#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
     24
     25#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
     26#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
     27#define KVASER_PCIEFD_MAX_ERR_REP 256
     28#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
     29#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
     30#define KVASER_PCIEFD_DMA_COUNT 2
     31
     32#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
     33#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
     34
     35#define KVASER_PCIEFD_VENDOR 0x1a07
     36#define KVASER_PCIEFD_4HS_ID 0x0d
     37#define KVASER_PCIEFD_2HS_ID 0x0e
     38#define KVASER_PCIEFD_HS_ID 0x0f
     39#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
     40#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
     41
     42/* PCIe IRQ registers */
     43#define KVASER_PCIEFD_IRQ_REG 0x40
     44#define KVASER_PCIEFD_IEN_REG 0x50
     45/* DMA map */
     46#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
     47/* Kvaser KCAN CAN controller registers */
     48#define KVASER_PCIEFD_KCAN0_BASE 0x10000
     49#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
     50#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
     51#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
     52#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
     53#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
     54#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
     55#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
     56#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
     57#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
     58#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
     59#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
     60#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
     61#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
     62#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
     63/* Loopback control register */
     64#define KVASER_PCIEFD_LOOP_REG 0x1f000
     65/* System identification and information registers */
     66#define KVASER_PCIEFD_SYSID_BASE 0x1f020
     67#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
     68#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
     69#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
     70#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
     71/* Shared receive buffer registers */
     72#define KVASER_PCIEFD_SRB_BASE 0x1f200
     73#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
     74#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
     75#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
     76#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
     77#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
     78/* EPCS flash controller registers */
     79#define KVASER_PCIEFD_SPI_BASE 0x1fc00
     80#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
     81#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
     82#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
     83#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
     84#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
     85
     86#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
     87#define KVASER_PCIEFD_IRQ_SRB BIT(4)
     88
     89#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
     90#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
     91#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
     92
     93/* Reset DMA buffer 0, 1 and FIFO offset */
     94#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
     95#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
     96#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
     97
     98/* DMA packet done, buffer 0 and 1 */
     99#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
    100#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
    101/* DMA overflow, buffer 0 and 1 */
    102#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
    103#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
    104/* DMA underflow, buffer 0 and 1 */
    105#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
    106#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
    107
    108/* DMA idle */
    109#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
    110/* DMA support */
    111#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
    112
    113/* DMA Enable */
    114#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
    115
    116/* EPCS flash controller definitions */
    117#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
    118#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
    119#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
    120#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
    121#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
    122#define KVASER_PCIEFD_CFG_SYS_VER 1
    123#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
    124#define KVASER_PCIEFD_SPI_TMT BIT(5)
    125#define KVASER_PCIEFD_SPI_TRDY BIT(6)
    126#define KVASER_PCIEFD_SPI_RRDY BIT(7)
    127#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
    128/* Commands for controlling the onboard flash */
    129#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
    130#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
    131#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
    132
    133/* Kvaser KCAN definitions */
    134#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
    135#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
    136
    137#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
    138/* Request status packet */
    139#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
    140/* Abort, flush and reset */
    141#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
    142
    143/* Tx FIFO unaligned read */
    144#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
    145/* Tx FIFO unaligned end */
    146#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
    147/* Bus parameter protection error */
    148#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
    149/* FDF bit when controller is in classic mode */
    150#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
    151/* Rx FIFO overflow */
    152#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
    153/* Abort done */
    154#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
    155/* Tx buffer flush done */
    156#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
    157/* Tx FIFO overflow */
    158#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
    159/* Tx FIFO empty */
    160#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
    161/* Transmitter unaligned */
    162#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
    163
    164#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
    165
    166#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
    167/* Abort request */
    168#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
    169/* Idle state. Controller in reset mode and no abort or flush pending */
    170#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
    171/* Bus off */
    172#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
    173/* Reset mode request */
    174#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
    175/* Controller in reset mode */
    176#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
    177/* Controller got one-shot capability */
    178#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
    179/* Controller got CAN FD capability */
    180#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
    181#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
    182	KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
    183	KVASER_PCIEFD_KCAN_STAT_IRM)
    184
    185/* Reset mode */
    186#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
    187/* Listen only mode */
    188#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
    189/* Error packet enable */
    190#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
    191/* CAN FD non-ISO */
    192#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
    193/* Acknowledgment packet type */
    194#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
    195/* Active error flag enable. Clear to force error passive */
    196#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
    197/* Classic CAN mode */
    198#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
    199
    200#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
    201#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
    202#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
    203
    204#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
    205
    206/* Kvaser KCAN packet types */
    207#define KVASER_PCIEFD_PACK_TYPE_DATA 0
    208#define KVASER_PCIEFD_PACK_TYPE_ACK 1
    209#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
    210#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
    211#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
    212#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
    213#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
    214#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
    215#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
    216
    217/* Kvaser KCAN packet common definitions */
    218#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
    219#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
    220#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
    221
    222/* Kvaser KCAN TDATA and RDATA first word */
    223#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
    224#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
    225/* Kvaser KCAN TDATA and RDATA second word */
    226#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
    227#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
    228#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
    229#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
    230/* Kvaser KCAN TDATA second word */
    231#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
    232#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
    233
    234/* Kvaser KCAN APACKET */
    235#define KVASER_PCIEFD_APACKET_FLU BIT(8)
    236#define KVASER_PCIEFD_APACKET_CT BIT(9)
    237#define KVASER_PCIEFD_APACKET_ABL BIT(10)
    238#define KVASER_PCIEFD_APACKET_NACK BIT(11)
    239
    240/* Kvaser KCAN SPACK first word */
    241#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
    242#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
    243#define KVASER_PCIEFD_SPACK_IDET BIT(20)
    244#define KVASER_PCIEFD_SPACK_IRM BIT(21)
    245#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
    246/* Kvaser KCAN SPACK second word */
    247#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
    248#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
    249#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
    250
    251/* Kvaser KCAN_EPACK second word */
    252#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
    253
    254struct kvaser_pciefd;
    255
    256struct kvaser_pciefd_can {
    257	struct can_priv can;
    258	struct kvaser_pciefd *kv_pcie;
    259	void __iomem *reg_base;
    260	struct can_berr_counter bec;
    261	u8 cmd_seq;
    262	int err_rep_cnt;
    263	int echo_idx;
    264	spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
    265	spinlock_t echo_lock; /* Locks the message echo buffer */
    266	struct timer_list bec_poll_timer;
    267	struct completion start_comp, flush_comp;
    268};
    269
    270struct kvaser_pciefd {
    271	struct pci_dev *pci;
    272	void __iomem *reg_base;
    273	struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
    274	void *dma_data[KVASER_PCIEFD_DMA_COUNT];
    275	u8 nr_channels;
    276	u32 bus_freq;
    277	u32 freq;
    278	u32 freq_to_ticks_div;
    279};
    280
    281struct kvaser_pciefd_rx_packet {
    282	u32 header[2];
    283	u64 timestamp;
    284};
    285
    286struct kvaser_pciefd_tx_packet {
    287	u32 header[2];
    288	u8 data[64];
    289};
    290
    291static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
    292	.name = KVASER_PCIEFD_DRV_NAME,
    293	.tseg1_min = 1,
    294	.tseg1_max = 512,
    295	.tseg2_min = 1,
    296	.tseg2_max = 32,
    297	.sjw_max = 16,
    298	.brp_min = 1,
    299	.brp_max = 8192,
    300	.brp_inc = 1,
    301};
    302
    303struct kvaser_pciefd_cfg_param {
    304	__le32 magic;
    305	__le32 nr;
    306	__le32 len;
    307	u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
    308};
    309
    310struct kvaser_pciefd_cfg_img {
    311	__le32 version;
    312	__le32 magic;
    313	__le32 crc;
    314	struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
    315};
    316
    317static struct pci_device_id kvaser_pciefd_id_table[] = {
    318	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
    319	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
    320	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
    321	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
    322	{ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
    323	{ 0,},
    324};
    325MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
    326
    327/* Onboard flash memory functions */
    328static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
    329{
    330	u32 res;
    331	int ret;
    332
    333	ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
    334				 res, res & msk, 0, 10);
    335
    336	return ret;
    337}
    338
    339static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
    340				 u32 tx_len, u8 *rx, u32 rx_len)
    341{
    342	int c;
    343
    344	iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
    345	iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
    346	ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
    347
    348	c = tx_len;
    349	while (c--) {
    350		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
    351			return -EIO;
    352
    353		iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
    354
    355		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
    356			return -EIO;
    357
    358		ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
    359	}
    360
    361	c = rx_len;
    362	while (c-- > 0) {
    363		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
    364			return -EIO;
    365
    366		iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
    367
    368		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
    369			return -EIO;
    370
    371		*rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
    372	}
    373
    374	if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
    375		return -EIO;
    376
    377	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
    378
    379	if (c != -1) {
    380		dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
    381		return -EIO;
    382	}
    383
    384	return 0;
    385}
    386
    387static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
    388					     struct kvaser_pciefd_cfg_img *img)
    389{
    390	int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
    391	int res, crc;
    392	u8 *crc_buff;
    393
    394	u8 cmd[] = {
    395		KVASER_PCIEFD_FLASH_READ_CMD,
    396		(u8)((offset >> 16) & 0xff),
    397		(u8)((offset >> 8) & 0xff),
    398		(u8)(offset & 0xff)
    399	};
    400
    401	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
    402				    KVASER_PCIEFD_CFG_IMG_SZ);
    403	if (res)
    404		return res;
    405
    406	crc_buff = (u8 *)img->params;
    407
    408	if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
    409		dev_err(&pcie->pci->dev,
    410			"Config flash corrupted, version number is wrong\n");
    411		return -ENODEV;
    412	}
    413
    414	if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
    415		dev_err(&pcie->pci->dev,
    416			"Config flash corrupted, magic number is wrong\n");
    417		return -ENODEV;
    418	}
    419
    420	crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
    421	if (le32_to_cpu(img->crc) != crc) {
    422		dev_err(&pcie->pci->dev,
    423			"Stored CRC does not match flash image contents\n");
    424		return -EIO;
    425	}
    426
    427	return 0;
    428}
    429
    430static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
    431					  struct kvaser_pciefd_cfg_img *img)
    432{
    433	struct kvaser_pciefd_cfg_param *param;
    434
    435	param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
    436	memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
    437}
    438
    439static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
    440{
    441	int res;
    442	struct kvaser_pciefd_cfg_img *img;
    443
    444	/* Read electronic signature */
    445	u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
    446
    447	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
    448	if (res)
    449		return -EIO;
    450
    451	img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
    452	if (!img)
    453		return -ENOMEM;
    454
    455	if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
    456		dev_err(&pcie->pci->dev,
    457			"Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
    458			cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
    459
    460		res = -ENODEV;
    461		goto image_free;
    462	}
    463
    464	cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
    465	res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
    466	if (res) {
    467		goto image_free;
    468	} else if (cmd[0] & 1) {
    469		res = -EIO;
    470		/* No write is ever done, the WIP should never be set */
    471		dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
    472		goto image_free;
    473	}
    474
    475	res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
    476	if (res) {
    477		res = -EIO;
    478		goto image_free;
    479	}
    480
    481	kvaser_pciefd_cfg_read_params(pcie, img);
    482
    483image_free:
    484	kfree(img);
    485	return res;
    486}
    487
    488static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
    489{
    490	u32 cmd;
    491
    492	cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
    493	cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
    494	iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
    495}
    496
    497static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
    498{
    499	u32 mode;
    500	unsigned long irq;
    501
    502	spin_lock_irqsave(&can->lock, irq);
    503	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    504	if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
    505		mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
    506		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    507	}
    508	spin_unlock_irqrestore(&can->lock, irq);
    509}
    510
    511static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
    512{
    513	u32 mode;
    514	unsigned long irq;
    515
    516	spin_lock_irqsave(&can->lock, irq);
    517	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    518	mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
    519	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    520	spin_unlock_irqrestore(&can->lock, irq);
    521}
    522
    523static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
    524{
    525	u32 msk;
    526
    527	msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
    528	      KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
    529	      KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
    530	      KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
    531	      KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
    532
    533	iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    534
    535	return 0;
    536}
    537
    538static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
    539{
    540	u32 mode;
    541	unsigned long irq;
    542
    543	spin_lock_irqsave(&can->lock, irq);
    544
    545	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    546	if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
    547		mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
    548		if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
    549			mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
    550		else
    551			mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
    552	} else {
    553		mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
    554		mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
    555	}
    556
    557	if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
    558		mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
    559
    560	mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
    561	mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
    562	/* Use ACK packet type */
    563	mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
    564	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
    565	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    566
    567	spin_unlock_irqrestore(&can->lock, irq);
    568}
    569
    570static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
    571{
    572	u32 status;
    573	unsigned long irq;
    574
    575	spin_lock_irqsave(&can->lock, irq);
    576	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
    577	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
    578		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    579
    580	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
    581	if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
    582		u32 cmd;
    583
    584		/* If controller is already idle, run abort, flush and reset */
    585		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
    586		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
    587		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
    588	} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
    589		u32 mode;
    590
    591		/* Put controller in reset mode */
    592		mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    593		mode |= KVASER_PCIEFD_KCAN_MODE_RM;
    594		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    595	}
    596
    597	spin_unlock_irqrestore(&can->lock, irq);
    598}
    599
    600static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
    601{
    602	u32 mode;
    603	unsigned long irq;
    604
    605	del_timer(&can->bec_poll_timer);
    606
    607	if (!completion_done(&can->flush_comp))
    608		kvaser_pciefd_start_controller_flush(can);
    609
    610	if (!wait_for_completion_timeout(&can->flush_comp,
    611					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
    612		netdev_err(can->can.dev, "Timeout during bus on flush\n");
    613		return -ETIMEDOUT;
    614	}
    615
    616	spin_lock_irqsave(&can->lock, irq);
    617	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    618	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
    619
    620	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
    621		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    622
    623	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    624	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
    625	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    626	spin_unlock_irqrestore(&can->lock, irq);
    627
    628	if (!wait_for_completion_timeout(&can->start_comp,
    629					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
    630		netdev_err(can->can.dev, "Timeout during bus on reset\n");
    631		return -ETIMEDOUT;
    632	}
    633	/* Reset interrupt handling */
    634	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    635	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
    636
    637	kvaser_pciefd_set_tx_irq(can);
    638	kvaser_pciefd_setup_controller(can);
    639
    640	can->can.state = CAN_STATE_ERROR_ACTIVE;
    641	netif_wake_queue(can->can.dev);
    642	can->bec.txerr = 0;
    643	can->bec.rxerr = 0;
    644	can->err_rep_cnt = 0;
    645
    646	return 0;
    647}
    648
    649static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
    650{
    651	u8 top;
    652	u32 pwm_ctrl;
    653	unsigned long irq;
    654
    655	spin_lock_irqsave(&can->lock, irq);
    656	pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
    657	top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
    658
    659	/* Set duty cycle to zero */
    660	pwm_ctrl |= top;
    661	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
    662	spin_unlock_irqrestore(&can->lock, irq);
    663}
    664
    665static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
    666{
    667	int top, trigger;
    668	u32 pwm_ctrl;
    669	unsigned long irq;
    670
    671	kvaser_pciefd_pwm_stop(can);
    672	spin_lock_irqsave(&can->lock, irq);
    673
    674	/* Set frequency to 500 KHz*/
    675	top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
    676
    677	pwm_ctrl = top & 0xff;
    678	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
    679	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
    680
    681	/* Set duty cycle to 95 */
    682	trigger = (100 * top - 95 * (top + 1) + 50) / 100;
    683	pwm_ctrl = trigger & 0xff;
    684	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
    685	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
    686	spin_unlock_irqrestore(&can->lock, irq);
    687}
    688
    689static int kvaser_pciefd_open(struct net_device *netdev)
    690{
    691	int err;
    692	struct kvaser_pciefd_can *can = netdev_priv(netdev);
    693
    694	err = open_candev(netdev);
    695	if (err)
    696		return err;
    697
    698	err = kvaser_pciefd_bus_on(can);
    699	if (err) {
    700		close_candev(netdev);
    701		return err;
    702	}
    703
    704	return 0;
    705}
    706
    707static int kvaser_pciefd_stop(struct net_device *netdev)
    708{
    709	struct kvaser_pciefd_can *can = netdev_priv(netdev);
    710	int ret = 0;
    711
    712	/* Don't interrupt ongoing flush */
    713	if (!completion_done(&can->flush_comp))
    714		kvaser_pciefd_start_controller_flush(can);
    715
    716	if (!wait_for_completion_timeout(&can->flush_comp,
    717					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
    718		netdev_err(can->can.dev, "Timeout during stop\n");
    719		ret = -ETIMEDOUT;
    720	} else {
    721		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
    722		del_timer(&can->bec_poll_timer);
    723	}
    724	close_candev(netdev);
    725
    726	return ret;
    727}
    728
    729static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
    730					   struct kvaser_pciefd_can *can,
    731					   struct sk_buff *skb)
    732{
    733	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
    734	int packet_size;
    735	int seq = can->echo_idx;
    736
    737	memset(p, 0, sizeof(*p));
    738
    739	if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
    740		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
    741
    742	if (cf->can_id & CAN_RTR_FLAG)
    743		p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
    744
    745	if (cf->can_id & CAN_EFF_FLAG)
    746		p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
    747
    748	p->header[0] |= cf->can_id & CAN_EFF_MASK;
    749	p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
    750	p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
    751
    752	if (can_is_canfd_skb(skb)) {
    753		p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
    754		if (cf->flags & CANFD_BRS)
    755			p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
    756		if (cf->flags & CANFD_ESI)
    757			p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
    758	}
    759
    760	p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
    761
    762	packet_size = cf->len;
    763	memcpy(p->data, cf->data, packet_size);
    764
    765	return DIV_ROUND_UP(packet_size, 4);
    766}
    767
    768static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
    769					    struct net_device *netdev)
    770{
    771	struct kvaser_pciefd_can *can = netdev_priv(netdev);
    772	unsigned long irq_flags;
    773	struct kvaser_pciefd_tx_packet packet;
    774	int nwords;
    775	u8 count;
    776
    777	if (can_dropped_invalid_skb(netdev, skb))
    778		return NETDEV_TX_OK;
    779
    780	nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
    781
    782	spin_lock_irqsave(&can->echo_lock, irq_flags);
    783
    784	/* Prepare and save echo skb in internal slot */
    785	can_put_echo_skb(skb, netdev, can->echo_idx, 0);
    786
    787	/* Move echo index to the next slot */
    788	can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
    789
    790	/* Write header to fifo */
    791	iowrite32(packet.header[0],
    792		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
    793	iowrite32(packet.header[1],
    794		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
    795
    796	if (nwords) {
    797		u32 data_last = ((u32 *)packet.data)[nwords - 1];
    798
    799		/* Write data to fifo, except last word */
    800		iowrite32_rep(can->reg_base +
    801			      KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
    802			      nwords - 1);
    803		/* Write last word to end of fifo */
    804		__raw_writel(data_last, can->reg_base +
    805			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
    806	} else {
    807		/* Complete write to fifo */
    808		__raw_writel(0, can->reg_base +
    809			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
    810	}
    811
    812	count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
    813	/* No room for a new message, stop the queue until at least one
    814	 * successful transmit
    815	 */
    816	if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
    817	    can->can.echo_skb[can->echo_idx])
    818		netif_stop_queue(netdev);
    819
    820	spin_unlock_irqrestore(&can->echo_lock, irq_flags);
    821
    822	return NETDEV_TX_OK;
    823}
    824
    825static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
    826{
    827	u32 mode, test, btrn;
    828	unsigned long irq_flags;
    829	int ret;
    830	struct can_bittiming *bt;
    831
    832	if (data)
    833		bt = &can->can.data_bittiming;
    834	else
    835		bt = &can->can.bittiming;
    836
    837	btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
    838	       KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
    839	       (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
    840	       KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
    841	       ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
    842	       ((bt->brp - 1) & 0x1fff);
    843
    844	spin_lock_irqsave(&can->lock, irq_flags);
    845	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    846
    847	/* Put the circuit in reset mode */
    848	iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
    849		  can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    850
    851	/* Can only set bittiming if in reset mode */
    852	ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
    853				 test, test & KVASER_PCIEFD_KCAN_MODE_RM,
    854				 0, 10);
    855
    856	if (ret) {
    857		spin_unlock_irqrestore(&can->lock, irq_flags);
    858		return -EBUSY;
    859	}
    860
    861	if (data)
    862		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
    863	else
    864		iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
    865
    866	/* Restore previous reset mode status */
    867	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
    868
    869	spin_unlock_irqrestore(&can->lock, irq_flags);
    870	return 0;
    871}
    872
    873static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
    874{
    875	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
    876}
    877
    878static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
    879{
    880	return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
    881}
    882
    883static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
    884{
    885	struct kvaser_pciefd_can *can = netdev_priv(ndev);
    886	int ret = 0;
    887
    888	switch (mode) {
    889	case CAN_MODE_START:
    890		if (!can->can.restart_ms)
    891			ret = kvaser_pciefd_bus_on(can);
    892		break;
    893	default:
    894		return -EOPNOTSUPP;
    895	}
    896
    897	return ret;
    898}
    899
    900static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
    901					  struct can_berr_counter *bec)
    902{
    903	struct kvaser_pciefd_can *can = netdev_priv(ndev);
    904
    905	bec->rxerr = can->bec.rxerr;
    906	bec->txerr = can->bec.txerr;
    907	return 0;
    908}
    909
    910static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
    911{
    912	struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
    913
    914	kvaser_pciefd_enable_err_gen(can);
    915	kvaser_pciefd_request_status(can);
    916	can->err_rep_cnt = 0;
    917}
    918
    919static const struct net_device_ops kvaser_pciefd_netdev_ops = {
    920	.ndo_open = kvaser_pciefd_open,
    921	.ndo_stop = kvaser_pciefd_stop,
    922	.ndo_start_xmit = kvaser_pciefd_start_xmit,
    923	.ndo_change_mtu = can_change_mtu,
    924};
    925
    926static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
    927{
    928	int i;
    929
    930	for (i = 0; i < pcie->nr_channels; i++) {
    931		struct net_device *netdev;
    932		struct kvaser_pciefd_can *can;
    933		u32 status, tx_npackets;
    934
    935		netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
    936				      KVASER_PCIEFD_CAN_TX_MAX_COUNT);
    937		if (!netdev)
    938			return -ENOMEM;
    939
    940		can = netdev_priv(netdev);
    941		netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
    942		can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
    943				i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
    944
    945		can->kv_pcie = pcie;
    946		can->cmd_seq = 0;
    947		can->err_rep_cnt = 0;
    948		can->bec.txerr = 0;
    949		can->bec.rxerr = 0;
    950
    951		init_completion(&can->start_comp);
    952		init_completion(&can->flush_comp);
    953		timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
    954			    0);
    955
    956		/* Disable Bus load reporting */
    957		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
    958
    959		tx_npackets = ioread32(can->reg_base +
    960				       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
    961		if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
    962		      0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
    963			dev_err(&pcie->pci->dev,
    964				"Max Tx count is smaller than expected\n");
    965
    966			free_candev(netdev);
    967			return -ENODEV;
    968		}
    969
    970		can->can.clock.freq = pcie->freq;
    971		can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
    972		can->echo_idx = 0;
    973		spin_lock_init(&can->echo_lock);
    974		spin_lock_init(&can->lock);
    975		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
    976		can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
    977
    978		can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
    979		can->can.do_set_data_bittiming =
    980			kvaser_pciefd_set_data_bittiming;
    981
    982		can->can.do_set_mode = kvaser_pciefd_set_mode;
    983		can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
    984
    985		can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
    986					      CAN_CTRLMODE_FD |
    987					      CAN_CTRLMODE_FD_NON_ISO;
    988
    989		status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
    990		if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
    991			dev_err(&pcie->pci->dev,
    992				"CAN FD not supported as expected %d\n", i);
    993
    994			free_candev(netdev);
    995			return -ENODEV;
    996		}
    997
    998		if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
    999			can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
   1000
   1001		netdev->flags |= IFF_ECHO;
   1002
   1003		SET_NETDEV_DEV(netdev, &pcie->pci->dev);
   1004
   1005		iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
   1006		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
   1007			  KVASER_PCIEFD_KCAN_IRQ_TFD,
   1008			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
   1009
   1010		pcie->can[i] = can;
   1011		kvaser_pciefd_pwm_start(can);
   1012	}
   1013
   1014	return 0;
   1015}
   1016
   1017static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
   1018{
   1019	int i;
   1020
   1021	for (i = 0; i < pcie->nr_channels; i++) {
   1022		int err = register_candev(pcie->can[i]->can.dev);
   1023
   1024		if (err) {
   1025			int j;
   1026
   1027			/* Unregister all successfully registered devices. */
   1028			for (j = 0; j < i; j++)
   1029				unregister_candev(pcie->can[j]->can.dev);
   1030			return err;
   1031		}
   1032	}
   1033
   1034	return 0;
   1035}
   1036
   1037static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
   1038					dma_addr_t addr, int offset)
   1039{
   1040	u32 word1, word2;
   1041
   1042#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
   1043	word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
   1044	word2 = addr >> 32;
   1045#else
   1046	word1 = addr;
   1047	word2 = 0;
   1048#endif
   1049	iowrite32(word1, pcie->reg_base + offset);
   1050	iowrite32(word2, pcie->reg_base + offset + 4);
   1051}
   1052
   1053static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
   1054{
   1055	int i;
   1056	u32 srb_status;
   1057	dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
   1058
   1059	/* Disable the DMA */
   1060	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
   1061	for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
   1062		unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
   1063
   1064		pcie->dma_data[i] =
   1065			dmam_alloc_coherent(&pcie->pci->dev,
   1066					    KVASER_PCIEFD_DMA_SIZE,
   1067					    &dma_addr[i],
   1068					    GFP_KERNEL);
   1069
   1070		if (!pcie->dma_data[i] || !dma_addr[i]) {
   1071			dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
   1072				KVASER_PCIEFD_DMA_SIZE);
   1073			return -ENOMEM;
   1074		}
   1075
   1076		kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
   1077	}
   1078
   1079	/* Reset Rx FIFO, and both DMA buffers */
   1080	iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
   1081		  KVASER_PCIEFD_SRB_CMD_RDB1,
   1082		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
   1083
   1084	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
   1085	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
   1086		dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
   1087		return -EIO;
   1088	}
   1089
   1090	/* Enable the DMA */
   1091	iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
   1092		  pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
   1093
   1094	return 0;
   1095}
   1096
   1097static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
   1098{
   1099	u32 sysid, srb_status, build;
   1100	u8 sysid_nr_chan;
   1101	int ret;
   1102
   1103	ret = kvaser_pciefd_read_cfg(pcie);
   1104	if (ret)
   1105		return ret;
   1106
   1107	sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
   1108	sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
   1109	if (pcie->nr_channels != sysid_nr_chan) {
   1110		dev_err(&pcie->pci->dev,
   1111			"Number of channels does not match: %u vs %u\n",
   1112			pcie->nr_channels,
   1113			sysid_nr_chan);
   1114		return -ENODEV;
   1115	}
   1116
   1117	if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
   1118		pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
   1119
   1120	build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
   1121	dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
   1122		(sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
   1123		sysid & 0xff,
   1124		(build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
   1125
   1126	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
   1127	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
   1128		dev_err(&pcie->pci->dev,
   1129			"Hardware without DMA is not supported\n");
   1130		return -ENODEV;
   1131	}
   1132
   1133	pcie->bus_freq = ioread32(pcie->reg_base +
   1134				  KVASER_PCIEFD_SYSID_BUSFREQ_REG);
   1135	pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
   1136	pcie->freq_to_ticks_div = pcie->freq / 1000000;
   1137	if (pcie->freq_to_ticks_div == 0)
   1138		pcie->freq_to_ticks_div = 1;
   1139
   1140	/* Turn off all loopback functionality */
   1141	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
   1142	return ret;
   1143}
   1144
   1145static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
   1146					    struct kvaser_pciefd_rx_packet *p,
   1147					    __le32 *data)
   1148{
   1149	struct sk_buff *skb;
   1150	struct canfd_frame *cf;
   1151	struct can_priv *priv;
   1152	struct net_device_stats *stats;
   1153	struct skb_shared_hwtstamps *shhwtstamps;
   1154	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1155
   1156	if (ch_id >= pcie->nr_channels)
   1157		return -EIO;
   1158
   1159	priv = &pcie->can[ch_id]->can;
   1160	stats = &priv->dev->stats;
   1161
   1162	if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
   1163		skb = alloc_canfd_skb(priv->dev, &cf);
   1164		if (!skb) {
   1165			stats->rx_dropped++;
   1166			return -ENOMEM;
   1167		}
   1168
   1169		if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
   1170			cf->flags |= CANFD_BRS;
   1171
   1172		if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
   1173			cf->flags |= CANFD_ESI;
   1174	} else {
   1175		skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
   1176		if (!skb) {
   1177			stats->rx_dropped++;
   1178			return -ENOMEM;
   1179		}
   1180	}
   1181
   1182	cf->can_id = p->header[0] & CAN_EFF_MASK;
   1183	if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
   1184		cf->can_id |= CAN_EFF_FLAG;
   1185
   1186	cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
   1187
   1188	if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
   1189		cf->can_id |= CAN_RTR_FLAG;
   1190	} else {
   1191		memcpy(cf->data, data, cf->len);
   1192
   1193		stats->rx_bytes += cf->len;
   1194	}
   1195	stats->rx_packets++;
   1196
   1197	shhwtstamps = skb_hwtstamps(skb);
   1198
   1199	shhwtstamps->hwtstamp =
   1200		ns_to_ktime(div_u64(p->timestamp * 1000,
   1201				    pcie->freq_to_ticks_div));
   1202
   1203	return netif_rx(skb);
   1204}
   1205
   1206static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
   1207				       struct can_frame *cf,
   1208				       enum can_state new_state,
   1209				       enum can_state tx_state,
   1210				       enum can_state rx_state)
   1211{
   1212	can_change_state(can->can.dev, cf, tx_state, rx_state);
   1213
   1214	if (new_state == CAN_STATE_BUS_OFF) {
   1215		struct net_device *ndev = can->can.dev;
   1216		unsigned long irq_flags;
   1217
   1218		spin_lock_irqsave(&can->lock, irq_flags);
   1219		netif_stop_queue(can->can.dev);
   1220		spin_unlock_irqrestore(&can->lock, irq_flags);
   1221
   1222		/* Prevent CAN controller from auto recover from bus off */
   1223		if (!can->can.restart_ms) {
   1224			kvaser_pciefd_start_controller_flush(can);
   1225			can_bus_off(ndev);
   1226		}
   1227	}
   1228}
   1229
   1230static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
   1231					  struct can_berr_counter *bec,
   1232					  enum can_state *new_state,
   1233					  enum can_state *tx_state,
   1234					  enum can_state *rx_state)
   1235{
   1236	if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
   1237	    p->header[0] & KVASER_PCIEFD_SPACK_IRM)
   1238		*new_state = CAN_STATE_BUS_OFF;
   1239	else if (bec->txerr >= 255 ||  bec->rxerr >= 255)
   1240		*new_state = CAN_STATE_BUS_OFF;
   1241	else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
   1242		*new_state = CAN_STATE_ERROR_PASSIVE;
   1243	else if (bec->txerr >= 128 || bec->rxerr >= 128)
   1244		*new_state = CAN_STATE_ERROR_PASSIVE;
   1245	else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
   1246		*new_state = CAN_STATE_ERROR_WARNING;
   1247	else if (bec->txerr >= 96 || bec->rxerr >= 96)
   1248		*new_state = CAN_STATE_ERROR_WARNING;
   1249	else
   1250		*new_state = CAN_STATE_ERROR_ACTIVE;
   1251
   1252	*tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
   1253	*rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
   1254}
   1255
   1256static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
   1257					struct kvaser_pciefd_rx_packet *p)
   1258{
   1259	struct can_berr_counter bec;
   1260	enum can_state old_state, new_state, tx_state, rx_state;
   1261	struct net_device *ndev = can->can.dev;
   1262	struct sk_buff *skb;
   1263	struct can_frame *cf = NULL;
   1264	struct skb_shared_hwtstamps *shhwtstamps;
   1265	struct net_device_stats *stats = &ndev->stats;
   1266
   1267	old_state = can->can.state;
   1268
   1269	bec.txerr = p->header[0] & 0xff;
   1270	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
   1271
   1272	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
   1273				      &rx_state);
   1274
   1275	skb = alloc_can_err_skb(ndev, &cf);
   1276
   1277	if (new_state != old_state) {
   1278		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
   1279					   rx_state);
   1280
   1281		if (old_state == CAN_STATE_BUS_OFF &&
   1282		    new_state == CAN_STATE_ERROR_ACTIVE &&
   1283		    can->can.restart_ms) {
   1284			can->can.can_stats.restarts++;
   1285			if (skb)
   1286				cf->can_id |= CAN_ERR_RESTARTED;
   1287		}
   1288	}
   1289
   1290	can->err_rep_cnt++;
   1291	can->can.can_stats.bus_error++;
   1292	if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
   1293		stats->tx_errors++;
   1294	else
   1295		stats->rx_errors++;
   1296
   1297	can->bec.txerr = bec.txerr;
   1298	can->bec.rxerr = bec.rxerr;
   1299
   1300	if (!skb) {
   1301		stats->rx_dropped++;
   1302		return -ENOMEM;
   1303	}
   1304
   1305	shhwtstamps = skb_hwtstamps(skb);
   1306	shhwtstamps->hwtstamp =
   1307		ns_to_ktime(div_u64(p->timestamp * 1000,
   1308				    can->kv_pcie->freq_to_ticks_div));
   1309	cf->can_id |= CAN_ERR_BUSERROR;
   1310
   1311	cf->data[6] = bec.txerr;
   1312	cf->data[7] = bec.rxerr;
   1313
   1314	netif_rx(skb);
   1315	return 0;
   1316}
   1317
   1318static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
   1319					     struct kvaser_pciefd_rx_packet *p)
   1320{
   1321	struct kvaser_pciefd_can *can;
   1322	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1323
   1324	if (ch_id >= pcie->nr_channels)
   1325		return -EIO;
   1326
   1327	can = pcie->can[ch_id];
   1328
   1329	kvaser_pciefd_rx_error_frame(can, p);
   1330	if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
   1331		/* Do not report more errors, until bec_poll_timer expires */
   1332		kvaser_pciefd_disable_err_gen(can);
   1333	/* Start polling the error counters */
   1334	mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
   1335	return 0;
   1336}
   1337
   1338static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
   1339					    struct kvaser_pciefd_rx_packet *p)
   1340{
   1341	struct can_berr_counter bec;
   1342	enum can_state old_state, new_state, tx_state, rx_state;
   1343
   1344	old_state = can->can.state;
   1345
   1346	bec.txerr = p->header[0] & 0xff;
   1347	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
   1348
   1349	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
   1350				      &rx_state);
   1351
   1352	if (new_state != old_state) {
   1353		struct net_device *ndev = can->can.dev;
   1354		struct sk_buff *skb;
   1355		struct can_frame *cf;
   1356		struct skb_shared_hwtstamps *shhwtstamps;
   1357
   1358		skb = alloc_can_err_skb(ndev, &cf);
   1359		if (!skb) {
   1360			struct net_device_stats *stats = &ndev->stats;
   1361
   1362			stats->rx_dropped++;
   1363			return -ENOMEM;
   1364		}
   1365
   1366		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
   1367					   rx_state);
   1368
   1369		if (old_state == CAN_STATE_BUS_OFF &&
   1370		    new_state == CAN_STATE_ERROR_ACTIVE &&
   1371		    can->can.restart_ms) {
   1372			can->can.can_stats.restarts++;
   1373			cf->can_id |= CAN_ERR_RESTARTED;
   1374		}
   1375
   1376		shhwtstamps = skb_hwtstamps(skb);
   1377		shhwtstamps->hwtstamp =
   1378			ns_to_ktime(div_u64(p->timestamp * 1000,
   1379					    can->kv_pcie->freq_to_ticks_div));
   1380
   1381		cf->data[6] = bec.txerr;
   1382		cf->data[7] = bec.rxerr;
   1383
   1384		netif_rx(skb);
   1385	}
   1386	can->bec.txerr = bec.txerr;
   1387	can->bec.rxerr = bec.rxerr;
   1388	/* Check if we need to poll the error counters */
   1389	if (bec.txerr || bec.rxerr)
   1390		mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
   1391
   1392	return 0;
   1393}
   1394
   1395static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
   1396					      struct kvaser_pciefd_rx_packet *p)
   1397{
   1398	struct kvaser_pciefd_can *can;
   1399	u8 cmdseq;
   1400	u32 status;
   1401	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1402
   1403	if (ch_id >= pcie->nr_channels)
   1404		return -EIO;
   1405
   1406	can = pcie->can[ch_id];
   1407
   1408	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
   1409	cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
   1410
   1411	/* Reset done, start abort and flush */
   1412	if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
   1413	    p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
   1414	    p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
   1415	    cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
   1416	    status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
   1417		u32 cmd;
   1418
   1419		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
   1420			  can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
   1421		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
   1422		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
   1423		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
   1424
   1425		iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
   1426			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
   1427	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
   1428		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
   1429		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
   1430		   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
   1431		/* Reset detected, send end of flush if no packet are in FIFO */
   1432		u8 count = ioread32(can->reg_base +
   1433				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
   1434
   1435		if (!count)
   1436			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
   1437				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
   1438	} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
   1439		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
   1440		/* Response to status request received */
   1441		kvaser_pciefd_handle_status_resp(can, p);
   1442		if (can->can.state != CAN_STATE_BUS_OFF &&
   1443		    can->can.state != CAN_STATE_ERROR_ACTIVE) {
   1444			mod_timer(&can->bec_poll_timer,
   1445				  KVASER_PCIEFD_BEC_POLL_FREQ);
   1446		}
   1447	} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
   1448		   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
   1449		/* Reset to bus on detected */
   1450		if (!completion_done(&can->start_comp))
   1451			complete(&can->start_comp);
   1452	}
   1453
   1454	return 0;
   1455}
   1456
   1457static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
   1458					    struct kvaser_pciefd_rx_packet *p)
   1459{
   1460	struct kvaser_pciefd_can *can;
   1461	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1462
   1463	if (ch_id >= pcie->nr_channels)
   1464		return -EIO;
   1465
   1466	can = pcie->can[ch_id];
   1467
   1468	/* If this is the last flushed packet, send end of flush */
   1469	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
   1470		u8 count = ioread32(can->reg_base +
   1471				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
   1472
   1473		if (count == 0)
   1474			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
   1475				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
   1476	} else {
   1477		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
   1478		int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
   1479		struct net_device_stats *stats = &can->can.dev->stats;
   1480
   1481		stats->tx_bytes += dlc;
   1482		stats->tx_packets++;
   1483
   1484		if (netif_queue_stopped(can->can.dev))
   1485			netif_wake_queue(can->can.dev);
   1486	}
   1487
   1488	return 0;
   1489}
   1490
   1491static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
   1492					     struct kvaser_pciefd_rx_packet *p)
   1493{
   1494	struct sk_buff *skb;
   1495	struct net_device_stats *stats = &can->can.dev->stats;
   1496	struct can_frame *cf;
   1497
   1498	skb = alloc_can_err_skb(can->can.dev, &cf);
   1499
   1500	stats->tx_errors++;
   1501	if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
   1502		if (skb)
   1503			cf->can_id |= CAN_ERR_LOSTARB;
   1504		can->can.can_stats.arbitration_lost++;
   1505	} else if (skb) {
   1506		cf->can_id |= CAN_ERR_ACK;
   1507	}
   1508
   1509	if (skb) {
   1510		cf->can_id |= CAN_ERR_BUSERROR;
   1511		netif_rx(skb);
   1512	} else {
   1513		stats->rx_dropped++;
   1514		netdev_warn(can->can.dev, "No memory left for err_skb\n");
   1515	}
   1516}
   1517
   1518static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
   1519					   struct kvaser_pciefd_rx_packet *p)
   1520{
   1521	struct kvaser_pciefd_can *can;
   1522	bool one_shot_fail = false;
   1523	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1524
   1525	if (ch_id >= pcie->nr_channels)
   1526		return -EIO;
   1527
   1528	can = pcie->can[ch_id];
   1529	/* Ignore control packet ACK */
   1530	if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
   1531		return 0;
   1532
   1533	if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
   1534		kvaser_pciefd_handle_nack_packet(can, p);
   1535		one_shot_fail = true;
   1536	}
   1537
   1538	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
   1539		netdev_dbg(can->can.dev, "Packet was flushed\n");
   1540	} else {
   1541		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
   1542		int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
   1543		u8 count = ioread32(can->reg_base +
   1544				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
   1545
   1546		if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
   1547		    netif_queue_stopped(can->can.dev))
   1548			netif_wake_queue(can->can.dev);
   1549
   1550		if (!one_shot_fail) {
   1551			struct net_device_stats *stats = &can->can.dev->stats;
   1552
   1553			stats->tx_bytes += dlc;
   1554			stats->tx_packets++;
   1555		}
   1556	}
   1557
   1558	return 0;
   1559}
   1560
   1561static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
   1562					      struct kvaser_pciefd_rx_packet *p)
   1563{
   1564	struct kvaser_pciefd_can *can;
   1565	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
   1566
   1567	if (ch_id >= pcie->nr_channels)
   1568		return -EIO;
   1569
   1570	can = pcie->can[ch_id];
   1571
   1572	if (!completion_done(&can->flush_comp))
   1573		complete(&can->flush_comp);
   1574
   1575	return 0;
   1576}
   1577
   1578static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
   1579				     int dma_buf)
   1580{
   1581	__le32 *buffer = pcie->dma_data[dma_buf];
   1582	__le64 timestamp;
   1583	struct kvaser_pciefd_rx_packet packet;
   1584	struct kvaser_pciefd_rx_packet *p = &packet;
   1585	u8 type;
   1586	int pos = *start_pos;
   1587	int size;
   1588	int ret = 0;
   1589
   1590	size = le32_to_cpu(buffer[pos++]);
   1591	if (!size) {
   1592		*start_pos = 0;
   1593		return 0;
   1594	}
   1595
   1596	p->header[0] = le32_to_cpu(buffer[pos++]);
   1597	p->header[1] = le32_to_cpu(buffer[pos++]);
   1598
   1599	/* Read 64-bit timestamp */
   1600	memcpy(&timestamp, &buffer[pos], sizeof(__le64));
   1601	pos += 2;
   1602	p->timestamp = le64_to_cpu(timestamp);
   1603
   1604	type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
   1605	switch (type) {
   1606	case KVASER_PCIEFD_PACK_TYPE_DATA:
   1607		ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
   1608		if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
   1609			u8 data_len;
   1610
   1611			data_len = can_fd_dlc2len(p->header[1] >>
   1612					       KVASER_PCIEFD_RPACKET_DLC_SHIFT);
   1613			pos += DIV_ROUND_UP(data_len, 4);
   1614		}
   1615		break;
   1616
   1617	case KVASER_PCIEFD_PACK_TYPE_ACK:
   1618		ret = kvaser_pciefd_handle_ack_packet(pcie, p);
   1619		break;
   1620
   1621	case KVASER_PCIEFD_PACK_TYPE_STATUS:
   1622		ret = kvaser_pciefd_handle_status_packet(pcie, p);
   1623		break;
   1624
   1625	case KVASER_PCIEFD_PACK_TYPE_ERROR:
   1626		ret = kvaser_pciefd_handle_error_packet(pcie, p);
   1627		break;
   1628
   1629	case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
   1630		ret = kvaser_pciefd_handle_eack_packet(pcie, p);
   1631		break;
   1632
   1633	case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
   1634		ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
   1635		break;
   1636
   1637	case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
   1638	case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
   1639	case KVASER_PCIEFD_PACK_TYPE_TXRQ:
   1640		dev_info(&pcie->pci->dev,
   1641			 "Received unexpected packet type 0x%08X\n", type);
   1642		break;
   1643
   1644	default:
   1645		dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
   1646		ret = -EIO;
   1647		break;
   1648	}
   1649
   1650	if (ret)
   1651		return ret;
   1652
   1653	/* Position does not point to the end of the package,
   1654	 * corrupted packet size?
   1655	 */
   1656	if ((*start_pos + size) != pos)
   1657		return -EIO;
   1658
   1659	/* Point to the next packet header, if any */
   1660	*start_pos = pos;
   1661
   1662	return ret;
   1663}
   1664
   1665static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
   1666{
   1667	int pos = 0;
   1668	int res = 0;
   1669
   1670	do {
   1671		res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
   1672	} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
   1673
   1674	return res;
   1675}
   1676
   1677static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
   1678{
   1679	u32 irq;
   1680
   1681	irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
   1682	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
   1683		kvaser_pciefd_read_buffer(pcie, 0);
   1684		/* Reset DMA buffer 0 */
   1685		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
   1686			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
   1687	}
   1688
   1689	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
   1690		kvaser_pciefd_read_buffer(pcie, 1);
   1691		/* Reset DMA buffer 1 */
   1692		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
   1693			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
   1694	}
   1695
   1696	if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
   1697	    irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
   1698	    irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
   1699	    irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
   1700		dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
   1701
   1702	iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
   1703	return 0;
   1704}
   1705
   1706static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
   1707{
   1708	u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
   1709
   1710	if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
   1711		netdev_err(can->can.dev, "Tx FIFO overflow\n");
   1712
   1713	if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
   1714		u8 count = ioread32(can->reg_base +
   1715				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
   1716
   1717		if (count == 0)
   1718			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
   1719				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
   1720	}
   1721
   1722	if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
   1723		netdev_err(can->can.dev,
   1724			   "Fail to change bittiming, when not in reset mode\n");
   1725
   1726	if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
   1727		netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
   1728
   1729	if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
   1730		netdev_err(can->can.dev, "Rx FIFO overflow\n");
   1731
   1732	iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
   1733	return 0;
   1734}
   1735
   1736static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
   1737{
   1738	struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
   1739	u32 board_irq;
   1740	int i;
   1741
   1742	board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
   1743
   1744	if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
   1745		return IRQ_NONE;
   1746
   1747	if (board_irq & KVASER_PCIEFD_IRQ_SRB)
   1748		kvaser_pciefd_receive_irq(pcie);
   1749
   1750	for (i = 0; i < pcie->nr_channels; i++) {
   1751		if (!pcie->can[i]) {
   1752			dev_err(&pcie->pci->dev,
   1753				"IRQ mask points to unallocated controller\n");
   1754			break;
   1755		}
   1756
   1757		/* Check that mask matches channel (i) IRQ mask */
   1758		if (board_irq & (1 << i))
   1759			kvaser_pciefd_transmit_irq(pcie->can[i]);
   1760	}
   1761
   1762	iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
   1763	return IRQ_HANDLED;
   1764}
   1765
   1766static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
   1767{
   1768	int i;
   1769	struct kvaser_pciefd_can *can;
   1770
   1771	for (i = 0; i < pcie->nr_channels; i++) {
   1772		can = pcie->can[i];
   1773		if (can) {
   1774			iowrite32(0,
   1775				  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
   1776			kvaser_pciefd_pwm_stop(can);
   1777			free_candev(can->can.dev);
   1778		}
   1779	}
   1780}
   1781
   1782static int kvaser_pciefd_probe(struct pci_dev *pdev,
   1783			       const struct pci_device_id *id)
   1784{
   1785	int err;
   1786	struct kvaser_pciefd *pcie;
   1787
   1788	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
   1789	if (!pcie)
   1790		return -ENOMEM;
   1791
   1792	pci_set_drvdata(pdev, pcie);
   1793	pcie->pci = pdev;
   1794
   1795	err = pci_enable_device(pdev);
   1796	if (err)
   1797		return err;
   1798
   1799	err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
   1800	if (err)
   1801		goto err_disable_pci;
   1802
   1803	pcie->reg_base = pci_iomap(pdev, 0, 0);
   1804	if (!pcie->reg_base) {
   1805		err = -ENOMEM;
   1806		goto err_release_regions;
   1807	}
   1808
   1809	err = kvaser_pciefd_setup_board(pcie);
   1810	if (err)
   1811		goto err_pci_iounmap;
   1812
   1813	err = kvaser_pciefd_setup_dma(pcie);
   1814	if (err)
   1815		goto err_pci_iounmap;
   1816
   1817	pci_set_master(pdev);
   1818
   1819	err = kvaser_pciefd_setup_can_ctrls(pcie);
   1820	if (err)
   1821		goto err_teardown_can_ctrls;
   1822
   1823	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
   1824		  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
   1825
   1826	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
   1827		  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
   1828		  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
   1829		  pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
   1830
   1831	/* Reset IRQ handling, expected to be off before */
   1832	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
   1833		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
   1834	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
   1835		  pcie->reg_base + KVASER_PCIEFD_IEN_REG);
   1836
   1837	/* Ready the DMA buffers */
   1838	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
   1839		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
   1840	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
   1841		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
   1842
   1843	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
   1844			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
   1845	if (err)
   1846		goto err_teardown_can_ctrls;
   1847
   1848	err = kvaser_pciefd_reg_candev(pcie);
   1849	if (err)
   1850		goto err_free_irq;
   1851
   1852	return 0;
   1853
   1854err_free_irq:
   1855	free_irq(pcie->pci->irq, pcie);
   1856
   1857err_teardown_can_ctrls:
   1858	kvaser_pciefd_teardown_can_ctrls(pcie);
   1859	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
   1860	pci_clear_master(pdev);
   1861
   1862err_pci_iounmap:
   1863	pci_iounmap(pdev, pcie->reg_base);
   1864
   1865err_release_regions:
   1866	pci_release_regions(pdev);
   1867
   1868err_disable_pci:
   1869	pci_disable_device(pdev);
   1870
   1871	return err;
   1872}
   1873
   1874static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
   1875{
   1876	struct kvaser_pciefd_can *can;
   1877	int i;
   1878
   1879	for (i = 0; i < pcie->nr_channels; i++) {
   1880		can = pcie->can[i];
   1881		if (can) {
   1882			iowrite32(0,
   1883				  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
   1884			unregister_candev(can->can.dev);
   1885			del_timer(&can->bec_poll_timer);
   1886			kvaser_pciefd_pwm_stop(can);
   1887			free_candev(can->can.dev);
   1888		}
   1889	}
   1890}
   1891
   1892static void kvaser_pciefd_remove(struct pci_dev *pdev)
   1893{
   1894	struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
   1895
   1896	kvaser_pciefd_remove_all_ctrls(pcie);
   1897
   1898	/* Turn off IRQ generation */
   1899	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
   1900	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
   1901		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
   1902	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
   1903
   1904	free_irq(pcie->pci->irq, pcie);
   1905
   1906	pci_clear_master(pdev);
   1907	pci_iounmap(pdev, pcie->reg_base);
   1908	pci_release_regions(pdev);
   1909	pci_disable_device(pdev);
   1910}
   1911
   1912static struct pci_driver kvaser_pciefd = {
   1913	.name = KVASER_PCIEFD_DRV_NAME,
   1914	.id_table = kvaser_pciefd_id_table,
   1915	.probe = kvaser_pciefd_probe,
   1916	.remove = kvaser_pciefd_remove,
   1917};
   1918
   1919module_pci_driver(kvaser_pciefd)