cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i2c.c (9459B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * i2c.c - Hardware Dependent Module for I2C Interface
      4 *
      5 * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
      6 */
      7
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/init.h>
     11#include <linux/module.h>
     12#include <linux/slab.h>
     13#include <linux/i2c.h>
     14#include <linux/interrupt.h>
     15#include <linux/err.h>
     16#include <linux/most.h>
     17
     18enum { CH_RX, CH_TX, NUM_CHANNELS };
     19
     20#define MAX_BUFFERS_CONTROL 32
     21#define MAX_BUF_SIZE_CONTROL 256
     22
     23/**
     24 * list_first_mbo - get the first mbo from a list
     25 * @ptr:	the list head to take the mbo from.
     26 */
     27#define list_first_mbo(ptr) \
     28	list_first_entry(ptr, struct mbo, list)
     29
     30static unsigned int polling_rate;
     31module_param(polling_rate, uint, 0644);
     32MODULE_PARM_DESC(polling_rate, "Polling rate [Hz]. Default = 0 (use IRQ)");
     33
     34struct hdm_i2c {
     35	struct most_interface most_iface;
     36	struct most_channel_capability capabilities[NUM_CHANNELS];
     37	struct i2c_client *client;
     38	struct rx {
     39		struct delayed_work dwork;
     40		struct list_head list;
     41		bool int_disabled;
     42		unsigned int delay;
     43	} rx;
     44	char name[64];
     45};
     46
     47#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
     48
     49static irqreturn_t most_irq_handler(int, void *);
     50static void pending_rx_work(struct work_struct *);
     51
     52/**
     53 * configure_channel - called from MOST core to configure a channel
     54 * @most_iface: interface the channel belongs to
     55 * @ch_idx: channel to be configured
     56 * @channel_config: structure that holds the configuration information
     57 *
     58 * Return 0 on success, negative on failure.
     59 *
     60 * Receives configuration information from MOST core and initialize the
     61 * corresponding channel.
     62 */
     63static int configure_channel(struct most_interface *most_iface,
     64			     int ch_idx,
     65			     struct most_channel_config *channel_config)
     66{
     67	int ret;
     68	struct hdm_i2c *dev = to_hdm(most_iface);
     69	unsigned int delay, pr;
     70
     71	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
     72
     73	if (channel_config->data_type != MOST_CH_CONTROL) {
     74		pr_err("bad data type for channel %d\n", ch_idx);
     75		return -EPERM;
     76	}
     77
     78	if (channel_config->direction != dev->capabilities[ch_idx].direction) {
     79		pr_err("bad direction for channel %d\n", ch_idx);
     80		return -EPERM;
     81	}
     82
     83	if (channel_config->direction == MOST_CH_RX) {
     84		if (!polling_rate) {
     85			if (dev->client->irq <= 0) {
     86				pr_err("bad irq: %d\n", dev->client->irq);
     87				return -ENOENT;
     88			}
     89			dev->rx.int_disabled = false;
     90			ret = request_irq(dev->client->irq, most_irq_handler, 0,
     91					  dev->client->name, dev);
     92			if (ret) {
     93				pr_err("request_irq(%d) failed: %d\n",
     94				       dev->client->irq, ret);
     95				return ret;
     96			}
     97		} else {
     98			delay = msecs_to_jiffies(MSEC_PER_SEC / polling_rate);
     99			dev->rx.delay = delay ? delay : 1;
    100			pr = MSEC_PER_SEC / jiffies_to_msecs(dev->rx.delay);
    101			pr_info("polling rate is %u Hz\n", pr);
    102		}
    103	}
    104
    105	return 0;
    106}
    107
    108/**
    109 * enqueue - called from MOST core to enqueue a buffer for data transfer
    110 * @most_iface: intended interface
    111 * @ch_idx: ID of the channel the buffer is intended for
    112 * @mbo: pointer to the buffer object
    113 *
    114 * Return 0 on success, negative on failure.
    115 *
    116 * Transmit the data over I2C if it is a "write" request or push the buffer into
    117 * list if it is an "read" request
    118 */
    119static int enqueue(struct most_interface *most_iface,
    120		   int ch_idx, struct mbo *mbo)
    121{
    122	struct hdm_i2c *dev = to_hdm(most_iface);
    123	int ret;
    124
    125	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
    126
    127	if (ch_idx == CH_RX) {
    128		/* RX */
    129		if (!polling_rate)
    130			disable_irq(dev->client->irq);
    131		cancel_delayed_work_sync(&dev->rx.dwork);
    132		list_add_tail(&mbo->list, &dev->rx.list);
    133		if (dev->rx.int_disabled || polling_rate)
    134			pending_rx_work(&dev->rx.dwork.work);
    135		if (!polling_rate)
    136			enable_irq(dev->client->irq);
    137	} else {
    138		/* TX */
    139		ret = i2c_master_send(dev->client, mbo->virt_address,
    140				      mbo->buffer_length);
    141		if (ret <= 0) {
    142			mbo->processed_length = 0;
    143			mbo->status = MBO_E_INVAL;
    144		} else {
    145			mbo->processed_length = mbo->buffer_length;
    146			mbo->status = MBO_SUCCESS;
    147		}
    148		mbo->complete(mbo);
    149	}
    150
    151	return 0;
    152}
    153
    154/**
    155 * poison_channel - called from MOST core to poison buffers of a channel
    156 * @most_iface: pointer to the interface the channel to be poisoned belongs to
    157 * @ch_idx: corresponding channel ID
    158 *
    159 * Return 0 on success, negative on failure.
    160 *
    161 * If channel direction is RX, complete the buffers in list with
    162 * status MBO_E_CLOSE
    163 */
    164static int poison_channel(struct most_interface *most_iface,
    165			  int ch_idx)
    166{
    167	struct hdm_i2c *dev = to_hdm(most_iface);
    168	struct mbo *mbo;
    169
    170	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
    171
    172	if (ch_idx == CH_RX) {
    173		if (!polling_rate)
    174			free_irq(dev->client->irq, dev);
    175		cancel_delayed_work_sync(&dev->rx.dwork);
    176
    177		while (!list_empty(&dev->rx.list)) {
    178			mbo = list_first_mbo(&dev->rx.list);
    179			list_del(&mbo->list);
    180
    181			mbo->processed_length = 0;
    182			mbo->status = MBO_E_CLOSE;
    183			mbo->complete(mbo);
    184		}
    185	}
    186
    187	return 0;
    188}
    189
    190static void do_rx_work(struct hdm_i2c *dev)
    191{
    192	struct mbo *mbo;
    193	unsigned char msg[MAX_BUF_SIZE_CONTROL];
    194	int ret;
    195	u16 pml, data_size;
    196
    197	/* Read PML (2 bytes) */
    198	ret = i2c_master_recv(dev->client, msg, 2);
    199	if (ret <= 0) {
    200		pr_err("Failed to receive PML\n");
    201		return;
    202	}
    203
    204	pml = (msg[0] << 8) | msg[1];
    205	if (!pml)
    206		return;
    207
    208	data_size = pml + 2;
    209
    210	/* Read the whole message, including PML */
    211	ret = i2c_master_recv(dev->client, msg, data_size);
    212	if (ret <= 0) {
    213		pr_err("Failed to receive a Port Message\n");
    214		return;
    215	}
    216
    217	mbo = list_first_mbo(&dev->rx.list);
    218	list_del(&mbo->list);
    219
    220	mbo->processed_length = min(data_size, mbo->buffer_length);
    221	memcpy(mbo->virt_address, msg, mbo->processed_length);
    222	mbo->status = MBO_SUCCESS;
    223	mbo->complete(mbo);
    224}
    225
    226/**
    227 * pending_rx_work - Read pending messages through I2C
    228 * @work: definition of this work item
    229 *
    230 * Invoked by the Interrupt Service Routine, most_irq_handler()
    231 */
    232static void pending_rx_work(struct work_struct *work)
    233{
    234	struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
    235
    236	if (list_empty(&dev->rx.list))
    237		return;
    238
    239	do_rx_work(dev);
    240
    241	if (polling_rate) {
    242		schedule_delayed_work(&dev->rx.dwork, dev->rx.delay);
    243	} else {
    244		dev->rx.int_disabled = false;
    245		enable_irq(dev->client->irq);
    246	}
    247}
    248
    249/*
    250 * most_irq_handler - Interrupt Service Routine
    251 * @irq: irq number
    252 * @_dev: private data
    253 *
    254 * Schedules a delayed work
    255 *
    256 * By default the interrupt line behavior is Active Low. Once an interrupt is
    257 * generated by the device, until driver clears the interrupt (by reading
    258 * the PMP message), device keeps the interrupt line in low state. Since i2c
    259 * read is done in work queue, the interrupt line must be disabled temporarily
    260 * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
    261 * after reading the message.
    262 *
    263 * Note: If we use the interrupt line in Falling edge mode, there is a
    264 * possibility to miss interrupts when ISR is getting executed.
    265 *
    266 */
    267static irqreturn_t most_irq_handler(int irq, void *_dev)
    268{
    269	struct hdm_i2c *dev = _dev;
    270
    271	disable_irq_nosync(irq);
    272	dev->rx.int_disabled = true;
    273	schedule_delayed_work(&dev->rx.dwork, 0);
    274
    275	return IRQ_HANDLED;
    276}
    277
    278/*
    279 * i2c_probe - i2c probe handler
    280 * @client: i2c client device structure
    281 * @id: i2c client device id
    282 *
    283 * Return 0 on success, negative on failure.
    284 *
    285 * Register the i2c client device as a MOST interface
    286 */
    287static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
    288{
    289	struct hdm_i2c *dev;
    290	int ret, i;
    291
    292	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    293	if (!dev)
    294		return -ENOMEM;
    295
    296	/* ID format: i2c-<bus>-<address> */
    297	snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
    298		 client->adapter->nr, client->addr);
    299
    300	for (i = 0; i < NUM_CHANNELS; i++) {
    301		dev->capabilities[i].data_type = MOST_CH_CONTROL;
    302		dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
    303		dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
    304	}
    305	dev->capabilities[CH_RX].direction = MOST_CH_RX;
    306	dev->capabilities[CH_RX].name_suffix = "rx";
    307	dev->capabilities[CH_TX].direction = MOST_CH_TX;
    308	dev->capabilities[CH_TX].name_suffix = "tx";
    309
    310	dev->most_iface.interface = ITYPE_I2C;
    311	dev->most_iface.description = dev->name;
    312	dev->most_iface.num_channels = NUM_CHANNELS;
    313	dev->most_iface.channel_vector = dev->capabilities;
    314	dev->most_iface.configure = configure_channel;
    315	dev->most_iface.enqueue = enqueue;
    316	dev->most_iface.poison_channel = poison_channel;
    317
    318	INIT_LIST_HEAD(&dev->rx.list);
    319
    320	INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
    321
    322	dev->client = client;
    323	i2c_set_clientdata(client, dev);
    324
    325	ret = most_register_interface(&dev->most_iface);
    326	if (ret) {
    327		pr_err("Failed to register i2c as a MOST interface\n");
    328		kfree(dev);
    329		return ret;
    330	}
    331
    332	return 0;
    333}
    334
    335/*
    336 * i2c_remove - i2c remove handler
    337 * @client: i2c client device structure
    338 *
    339 * Return 0 on success.
    340 *
    341 * Unregister the i2c client device as a MOST interface
    342 */
    343static int i2c_remove(struct i2c_client *client)
    344{
    345	struct hdm_i2c *dev = i2c_get_clientdata(client);
    346
    347	most_deregister_interface(&dev->most_iface);
    348	kfree(dev);
    349
    350	return 0;
    351}
    352
    353static const struct i2c_device_id i2c_id[] = {
    354	{ "most_i2c", 0 },
    355	{ }, /* Terminating entry */
    356};
    357
    358MODULE_DEVICE_TABLE(i2c, i2c_id);
    359
    360static struct i2c_driver i2c_driver = {
    361	.driver = {
    362		.name = "hdm_i2c",
    363	},
    364	.probe = i2c_probe,
    365	.remove = i2c_remove,
    366	.id_table = i2c_id,
    367};
    368
    369module_i2c_driver(i2c_driver);
    370
    371MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
    372MODULE_DESCRIPTION("I2C Hardware Dependent Module");
    373MODULE_LICENSE("GPL");