cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

soc-generic-dmaengine-pcm.c (14051B)


      1// SPDX-License-Identifier: GPL-2.0+
      2//
      3//  Copyright (C) 2013, Analog Devices Inc.
      4//	Author: Lars-Peter Clausen <lars@metafoo.de>
      5
      6#include <linux/module.h>
      7#include <linux/init.h>
      8#include <linux/dmaengine.h>
      9#include <linux/slab.h>
     10#include <sound/pcm.h>
     11#include <sound/pcm_params.h>
     12#include <sound/soc.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/of.h>
     15
     16#include <sound/dmaengine_pcm.h>
     17
     18static unsigned int prealloc_buffer_size_kbytes = 512;
     19module_param(prealloc_buffer_size_kbytes, uint, 0444);
     20MODULE_PARM_DESC(prealloc_buffer_size_kbytes, "Preallocate DMA buffer size (KB).");
     21
     22/*
     23 * The platforms dmaengine driver does not support reporting the amount of
     24 * bytes that are still left to transfer.
     25 */
     26#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
     27
     28static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
     29	struct snd_pcm_substream *substream)
     30{
     31	if (!pcm->chan[substream->stream])
     32		return NULL;
     33
     34	return pcm->chan[substream->stream]->device->dev;
     35}
     36
     37/**
     38 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
     39 * @substream: PCM substream
     40 * @params: hw_params
     41 * @slave_config: DMA slave config to prepare
     42 *
     43 * This function can be used as a generic prepare_slave_config callback for
     44 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
     45 * DAI DMA data. Internally the function will first call
     46 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
     47 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
     48 * remaining fields based on the DAI DMA data.
     49 */
     50int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
     51	struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
     52{
     53	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
     54	struct snd_dmaengine_dai_dma_data *dma_data;
     55	int ret;
     56
     57	if (rtd->num_cpus > 1) {
     58		dev_err(rtd->dev,
     59			"%s doesn't support Multi CPU yet\n", __func__);
     60		return -EINVAL;
     61	}
     62
     63	dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
     64
     65	ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
     66	if (ret)
     67		return ret;
     68
     69	snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
     70		slave_config);
     71
     72	return 0;
     73}
     74EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
     75
     76static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
     77				   struct snd_pcm_substream *substream,
     78				   struct snd_pcm_hw_params *params)
     79{
     80	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
     81	struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
     82	struct dma_slave_config slave_config;
     83	int ret;
     84
     85	if (!pcm->config->prepare_slave_config)
     86		return 0;
     87
     88	memset(&slave_config, 0, sizeof(slave_config));
     89
     90	ret = pcm->config->prepare_slave_config(substream, params, &slave_config);
     91	if (ret)
     92		return ret;
     93
     94	return dmaengine_slave_config(chan, &slave_config);
     95}
     96
     97static int
     98dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
     99				   struct snd_pcm_substream *substream)
    100{
    101	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
    102	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    103	struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
    104	struct dma_chan *chan = pcm->chan[substream->stream];
    105	struct snd_dmaengine_dai_dma_data *dma_data;
    106	struct snd_pcm_hardware hw;
    107
    108	if (rtd->num_cpus > 1) {
    109		dev_err(rtd->dev,
    110			"%s doesn't support Multi CPU yet\n", __func__);
    111		return -EINVAL;
    112	}
    113
    114	if (pcm->config->pcm_hardware)
    115		return snd_soc_set_runtime_hwparams(substream,
    116				pcm->config->pcm_hardware);
    117
    118	dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
    119
    120	memset(&hw, 0, sizeof(hw));
    121	hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
    122			SNDRV_PCM_INFO_INTERLEAVED;
    123	hw.periods_min = 2;
    124	hw.periods_max = UINT_MAX;
    125	hw.period_bytes_min = dma_data->maxburst * DMA_SLAVE_BUSWIDTH_8_BYTES;
    126	if (!hw.period_bytes_min)
    127		hw.period_bytes_min = 256;
    128	hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
    129	hw.buffer_bytes_max = SIZE_MAX;
    130	hw.fifo_size = dma_data->fifo_size;
    131
    132	if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
    133		hw.info |= SNDRV_PCM_INFO_BATCH;
    134
    135	/**
    136	 * FIXME: Remove the return value check to align with the code
    137	 * before adding snd_dmaengine_pcm_refine_runtime_hwparams
    138	 * function.
    139	 */
    140	snd_dmaengine_pcm_refine_runtime_hwparams(substream,
    141						  dma_data,
    142						  &hw,
    143						  chan);
    144
    145	return snd_soc_set_runtime_hwparams(substream, &hw);
    146}
    147
    148static int dmaengine_pcm_open(struct snd_soc_component *component,
    149			      struct snd_pcm_substream *substream)
    150{
    151	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    152	struct dma_chan *chan = pcm->chan[substream->stream];
    153	int ret;
    154
    155	ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
    156	if (ret)
    157		return ret;
    158
    159	return snd_dmaengine_pcm_open(substream, chan);
    160}
    161
    162static int dmaengine_pcm_close(struct snd_soc_component *component,
    163			       struct snd_pcm_substream *substream)
    164{
    165	return snd_dmaengine_pcm_close(substream);
    166}
    167
    168static int dmaengine_pcm_trigger(struct snd_soc_component *component,
    169				 struct snd_pcm_substream *substream, int cmd)
    170{
    171	return snd_dmaengine_pcm_trigger(substream, cmd);
    172}
    173
    174static struct dma_chan *dmaengine_pcm_compat_request_channel(
    175	struct snd_soc_component *component,
    176	struct snd_soc_pcm_runtime *rtd,
    177	struct snd_pcm_substream *substream)
    178{
    179	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    180	struct snd_dmaengine_dai_dma_data *dma_data;
    181
    182	if (rtd->num_cpus > 1) {
    183		dev_err(rtd->dev,
    184			"%s doesn't support Multi CPU yet\n", __func__);
    185		return NULL;
    186	}
    187
    188	dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
    189
    190	if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
    191		return pcm->chan[0];
    192
    193	if (pcm->config->compat_request_channel)
    194		return pcm->config->compat_request_channel(rtd, substream);
    195
    196	return snd_dmaengine_pcm_request_channel(pcm->config->compat_filter_fn,
    197						 dma_data->filter_data);
    198}
    199
    200static bool dmaengine_pcm_can_report_residue(struct device *dev,
    201	struct dma_chan *chan)
    202{
    203	struct dma_slave_caps dma_caps;
    204	int ret;
    205
    206	ret = dma_get_slave_caps(chan, &dma_caps);
    207	if (ret != 0) {
    208		dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
    209			 ret);
    210		return false;
    211	}
    212
    213	if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
    214		return false;
    215
    216	return true;
    217}
    218
    219static int dmaengine_pcm_new(struct snd_soc_component *component,
    220			     struct snd_soc_pcm_runtime *rtd)
    221{
    222	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    223	const struct snd_dmaengine_pcm_config *config = pcm->config;
    224	struct device *dev = component->dev;
    225	size_t prealloc_buffer_size;
    226	size_t max_buffer_size;
    227	unsigned int i;
    228
    229	if (config->prealloc_buffer_size)
    230		prealloc_buffer_size = config->prealloc_buffer_size;
    231	else
    232		prealloc_buffer_size = prealloc_buffer_size_kbytes * 1024;
    233
    234	if (config->pcm_hardware && config->pcm_hardware->buffer_bytes_max)
    235		max_buffer_size = config->pcm_hardware->buffer_bytes_max;
    236	else
    237		max_buffer_size = SIZE_MAX;
    238
    239	for_each_pcm_streams(i) {
    240		struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream;
    241		if (!substream)
    242			continue;
    243
    244		if (!pcm->chan[i] && config->chan_names[i])
    245			pcm->chan[i] = dma_request_slave_channel(dev,
    246				config->chan_names[i]);
    247
    248		if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
    249			pcm->chan[i] = dmaengine_pcm_compat_request_channel(
    250				component, rtd, substream);
    251		}
    252
    253		if (!pcm->chan[i]) {
    254			dev_err(component->dev,
    255				"Missing dma channel for stream: %d\n", i);
    256			return -EINVAL;
    257		}
    258
    259		snd_pcm_set_managed_buffer(substream,
    260				SNDRV_DMA_TYPE_DEV_IRAM,
    261				dmaengine_dma_dev(pcm, substream),
    262				prealloc_buffer_size,
    263				max_buffer_size);
    264
    265		if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
    266			pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
    267
    268		if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
    269			strscpy_pad(rtd->pcm->streams[i].pcm->name,
    270				    rtd->pcm->streams[i].pcm->id,
    271				    sizeof(rtd->pcm->streams[i].pcm->name));
    272		}
    273	}
    274
    275	return 0;
    276}
    277
    278static snd_pcm_uframes_t dmaengine_pcm_pointer(
    279	struct snd_soc_component *component,
    280	struct snd_pcm_substream *substream)
    281{
    282	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    283
    284	if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
    285		return snd_dmaengine_pcm_pointer_no_residue(substream);
    286	else
    287		return snd_dmaengine_pcm_pointer(substream);
    288}
    289
    290static int dmaengine_copy_user(struct snd_soc_component *component,
    291			       struct snd_pcm_substream *substream,
    292			       int channel, unsigned long hwoff,
    293			       void __user *buf, unsigned long bytes)
    294{
    295	struct snd_pcm_runtime *runtime = substream->runtime;
    296	struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
    297	int (*process)(struct snd_pcm_substream *substream,
    298		       int channel, unsigned long hwoff,
    299		       void *buf, unsigned long bytes) = pcm->config->process;
    300	bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
    301	void *dma_ptr = runtime->dma_area + hwoff +
    302			channel * (runtime->dma_bytes / runtime->channels);
    303
    304	if (is_playback)
    305		if (copy_from_user(dma_ptr, buf, bytes))
    306			return -EFAULT;
    307
    308	if (process) {
    309		int ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
    310		if (ret < 0)
    311			return ret;
    312	}
    313
    314	if (!is_playback)
    315		if (copy_to_user(buf, dma_ptr, bytes))
    316			return -EFAULT;
    317
    318	return 0;
    319}
    320
    321static const struct snd_soc_component_driver dmaengine_pcm_component = {
    322	.name		= SND_DMAENGINE_PCM_DRV_NAME,
    323	.probe_order	= SND_SOC_COMP_ORDER_LATE,
    324	.open		= dmaengine_pcm_open,
    325	.close		= dmaengine_pcm_close,
    326	.hw_params	= dmaengine_pcm_hw_params,
    327	.trigger	= dmaengine_pcm_trigger,
    328	.pointer	= dmaengine_pcm_pointer,
    329	.pcm_construct	= dmaengine_pcm_new,
    330};
    331
    332static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
    333	.name		= SND_DMAENGINE_PCM_DRV_NAME,
    334	.probe_order	= SND_SOC_COMP_ORDER_LATE,
    335	.open		= dmaengine_pcm_open,
    336	.close		= dmaengine_pcm_close,
    337	.hw_params	= dmaengine_pcm_hw_params,
    338	.trigger	= dmaengine_pcm_trigger,
    339	.pointer	= dmaengine_pcm_pointer,
    340	.copy_user	= dmaengine_copy_user,
    341	.pcm_construct	= dmaengine_pcm_new,
    342};
    343
    344static const char * const dmaengine_pcm_dma_channel_names[] = {
    345	[SNDRV_PCM_STREAM_PLAYBACK] = "tx",
    346	[SNDRV_PCM_STREAM_CAPTURE] = "rx",
    347};
    348
    349static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
    350	struct device *dev, const struct snd_dmaengine_pcm_config *config)
    351{
    352	unsigned int i;
    353	const char *name;
    354	struct dma_chan *chan;
    355
    356	if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
    357	    !(config->dma_dev && config->dma_dev->of_node)))
    358		return 0;
    359
    360	if (config->dma_dev) {
    361		/*
    362		 * If this warning is seen, it probably means that your Linux
    363		 * device structure does not match your HW device structure.
    364		 * It would be best to refactor the Linux device structure to
    365		 * correctly match the HW structure.
    366		 */
    367		dev_warn(dev, "DMA channels sourced from device %s",
    368			 dev_name(config->dma_dev));
    369		dev = config->dma_dev;
    370	}
    371
    372	for_each_pcm_streams(i) {
    373		if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
    374			name = "rx-tx";
    375		else
    376			name = dmaengine_pcm_dma_channel_names[i];
    377		if (config->chan_names[i])
    378			name = config->chan_names[i];
    379		chan = dma_request_chan(dev, name);
    380		if (IS_ERR(chan)) {
    381			/*
    382			 * Only report probe deferral errors, channels
    383			 * might not be present for devices that
    384			 * support only TX or only RX.
    385			 */
    386			if (PTR_ERR(chan) == -EPROBE_DEFER)
    387				return -EPROBE_DEFER;
    388			pcm->chan[i] = NULL;
    389		} else {
    390			pcm->chan[i] = chan;
    391		}
    392		if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
    393			break;
    394	}
    395
    396	if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
    397		pcm->chan[1] = pcm->chan[0];
    398
    399	return 0;
    400}
    401
    402static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
    403{
    404	unsigned int i;
    405
    406	for_each_pcm_streams(i) {
    407		if (!pcm->chan[i])
    408			continue;
    409		dma_release_channel(pcm->chan[i]);
    410		if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
    411			break;
    412	}
    413}
    414
    415static const struct snd_dmaengine_pcm_config snd_dmaengine_pcm_default_config = {
    416	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
    417};
    418
    419/**
    420 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
    421 * @dev: The parent device for the PCM device
    422 * @config: Platform specific PCM configuration
    423 * @flags: Platform specific quirks
    424 */
    425int snd_dmaengine_pcm_register(struct device *dev,
    426	const struct snd_dmaengine_pcm_config *config, unsigned int flags)
    427{
    428	const struct snd_soc_component_driver *driver;
    429	struct dmaengine_pcm *pcm;
    430	int ret;
    431
    432	pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
    433	if (!pcm)
    434		return -ENOMEM;
    435
    436#ifdef CONFIG_DEBUG_FS
    437	pcm->component.debugfs_prefix = "dma";
    438#endif
    439	if (!config)
    440		config = &snd_dmaengine_pcm_default_config;
    441	pcm->config = config;
    442	pcm->flags = flags;
    443
    444	ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
    445	if (ret)
    446		goto err_free_dma;
    447
    448	if (config->process)
    449		driver = &dmaengine_pcm_component_process;
    450	else
    451		driver = &dmaengine_pcm_component;
    452
    453	ret = snd_soc_component_initialize(&pcm->component, driver, dev);
    454	if (ret)
    455		goto err_free_dma;
    456
    457	ret = snd_soc_add_component(&pcm->component, NULL, 0);
    458	if (ret)
    459		goto err_free_dma;
    460
    461	return 0;
    462
    463err_free_dma:
    464	dmaengine_pcm_release_chan(pcm);
    465	kfree(pcm);
    466	return ret;
    467}
    468EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
    469
    470/**
    471 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
    472 * @dev: Parent device the PCM was register with
    473 *
    474 * Removes a dmaengine based PCM device previously registered with
    475 * snd_dmaengine_pcm_register.
    476 */
    477void snd_dmaengine_pcm_unregister(struct device *dev)
    478{
    479	struct snd_soc_component *component;
    480	struct dmaengine_pcm *pcm;
    481
    482	component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
    483	if (!component)
    484		return;
    485
    486	pcm = soc_component_to_pcm(component);
    487
    488	snd_soc_unregister_component_by_driver(dev, component->driver);
    489	dmaengine_pcm_release_chan(pcm);
    490	kfree(pcm);
    491}
    492EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
    493
    494MODULE_LICENSE("GPL");