cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

aio-dma.c (7640B)


      1// SPDX-License-Identifier: GPL-2.0
      2//
      3// Socionext UniPhier AIO DMA driver.
      4//
      5// Copyright (c) 2016-2018 Socionext Inc.
      6
      7#include <linux/dma-mapping.h>
      8#include <linux/errno.h>
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <sound/core.h>
     12#include <sound/pcm.h>
     13#include <sound/soc.h>
     14
     15#include "aio.h"
     16
     17static struct snd_pcm_hardware uniphier_aiodma_hw = {
     18	.info = SNDRV_PCM_INFO_MMAP |
     19		SNDRV_PCM_INFO_MMAP_VALID |
     20		SNDRV_PCM_INFO_INTERLEAVED,
     21	.period_bytes_min = 256,
     22	.period_bytes_max = 4096,
     23	.periods_min      = 4,
     24	.periods_max      = 1024,
     25	.buffer_bytes_max = 128 * 1024,
     26};
     27
     28static void aiodma_pcm_irq(struct uniphier_aio_sub *sub)
     29{
     30	struct snd_pcm_runtime *runtime = sub->substream->runtime;
     31	int bytes = runtime->period_size *
     32		runtime->channels * samples_to_bytes(runtime, 1);
     33	int ret;
     34
     35	spin_lock(&sub->lock);
     36	ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes,
     37				      sub->threshold + bytes);
     38	if (!ret)
     39		sub->threshold += bytes;
     40
     41	aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
     42	aiodma_rb_clear_irq(sub);
     43	spin_unlock(&sub->lock);
     44
     45	snd_pcm_period_elapsed(sub->substream);
     46}
     47
     48static void aiodma_compr_irq(struct uniphier_aio_sub *sub)
     49{
     50	struct snd_compr_runtime *runtime = sub->cstream->runtime;
     51	int bytes = runtime->fragment_size;
     52	int ret;
     53
     54	spin_lock(&sub->lock);
     55	ret = aiodma_rb_set_threshold(sub, sub->compr_bytes,
     56				      sub->threshold + bytes);
     57	if (!ret)
     58		sub->threshold += bytes;
     59
     60	aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
     61	aiodma_rb_clear_irq(sub);
     62	spin_unlock(&sub->lock);
     63
     64	snd_compr_fragment_elapsed(sub->cstream);
     65}
     66
     67static irqreturn_t aiodma_irq(int irq, void *p)
     68{
     69	struct platform_device *pdev = p;
     70	struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
     71	irqreturn_t ret = IRQ_NONE;
     72	int i, j;
     73
     74	for (i = 0; i < chip->num_aios; i++) {
     75		struct uniphier_aio *aio = &chip->aios[i];
     76
     77		for (j = 0; j < ARRAY_SIZE(aio->sub); j++) {
     78			struct uniphier_aio_sub *sub = &aio->sub[j];
     79
     80			/* Skip channel that does not trigger */
     81			if (!sub->running || !aiodma_rb_is_irq(sub))
     82				continue;
     83
     84			if (sub->substream)
     85				aiodma_pcm_irq(sub);
     86			if (sub->cstream)
     87				aiodma_compr_irq(sub);
     88
     89			ret = IRQ_HANDLED;
     90		}
     91	}
     92
     93	return ret;
     94}
     95
     96static int uniphier_aiodma_open(struct snd_soc_component *component,
     97				struct snd_pcm_substream *substream)
     98{
     99	struct snd_pcm_runtime *runtime = substream->runtime;
    100
    101	snd_soc_set_runtime_hwparams(substream, &uniphier_aiodma_hw);
    102
    103	return snd_pcm_hw_constraint_step(runtime, 0,
    104		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
    105}
    106
    107static int uniphier_aiodma_prepare(struct snd_soc_component *component,
    108				   struct snd_pcm_substream *substream)
    109{
    110	struct snd_pcm_runtime *runtime = substream->runtime;
    111	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
    112	struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
    113	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
    114	int bytes = runtime->period_size *
    115		runtime->channels * samples_to_bytes(runtime, 1);
    116	unsigned long flags;
    117	int ret;
    118
    119	ret = aiodma_ch_set_param(sub);
    120	if (ret)
    121		return ret;
    122
    123	spin_lock_irqsave(&sub->lock, flags);
    124	ret = aiodma_rb_set_buffer(sub, runtime->dma_addr,
    125				   runtime->dma_addr + runtime->dma_bytes,
    126				   bytes);
    127	spin_unlock_irqrestore(&sub->lock, flags);
    128	if (ret)
    129		return ret;
    130
    131	return 0;
    132}
    133
    134static int uniphier_aiodma_trigger(struct snd_soc_component *component,
    135				   struct snd_pcm_substream *substream, int cmd)
    136{
    137	struct snd_pcm_runtime *runtime = substream->runtime;
    138	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
    139	struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
    140	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
    141	struct device *dev = &aio->chip->pdev->dev;
    142	int bytes = runtime->period_size *
    143		runtime->channels * samples_to_bytes(runtime, 1);
    144	unsigned long flags;
    145
    146	spin_lock_irqsave(&sub->lock, flags);
    147	switch (cmd) {
    148	case SNDRV_PCM_TRIGGER_START:
    149		aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes,
    150			       bytes);
    151		aiodma_ch_set_enable(sub, 1);
    152		sub->running = 1;
    153
    154		break;
    155	case SNDRV_PCM_TRIGGER_STOP:
    156		sub->running = 0;
    157		aiodma_ch_set_enable(sub, 0);
    158
    159		break;
    160	default:
    161		dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd);
    162		break;
    163	}
    164	spin_unlock_irqrestore(&sub->lock, flags);
    165
    166	return 0;
    167}
    168
    169static snd_pcm_uframes_t uniphier_aiodma_pointer(
    170					struct snd_soc_component *component,
    171					struct snd_pcm_substream *substream)
    172{
    173	struct snd_pcm_runtime *runtime = substream->runtime;
    174	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
    175	struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
    176	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
    177	int bytes = runtime->period_size *
    178		runtime->channels * samples_to_bytes(runtime, 1);
    179	unsigned long flags;
    180	snd_pcm_uframes_t pos;
    181
    182	spin_lock_irqsave(&sub->lock, flags);
    183	aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
    184
    185	if (sub->swm->dir == PORT_DIR_OUTPUT)
    186		pos = bytes_to_frames(runtime, sub->rd_offs);
    187	else
    188		pos = bytes_to_frames(runtime, sub->wr_offs);
    189	spin_unlock_irqrestore(&sub->lock, flags);
    190
    191	return pos;
    192}
    193
    194static int uniphier_aiodma_mmap(struct snd_soc_component *component,
    195				struct snd_pcm_substream *substream,
    196				struct vm_area_struct *vma)
    197{
    198	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
    199
    200	return remap_pfn_range(vma, vma->vm_start,
    201			       substream->runtime->dma_addr >> PAGE_SHIFT,
    202			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
    203}
    204
    205static int uniphier_aiodma_new(struct snd_soc_component *component,
    206			       struct snd_soc_pcm_runtime *rtd)
    207{
    208	struct device *dev = rtd->card->snd_card->dev;
    209	struct snd_pcm *pcm = rtd->pcm;
    210	int ret;
    211
    212	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33));
    213	if (ret)
    214		return ret;
    215
    216	snd_pcm_set_managed_buffer_all(pcm,
    217		SNDRV_DMA_TYPE_DEV, dev,
    218		uniphier_aiodma_hw.buffer_bytes_max,
    219		uniphier_aiodma_hw.buffer_bytes_max);
    220	return 0;
    221}
    222
    223static const struct snd_soc_component_driver uniphier_soc_platform = {
    224	.open		= uniphier_aiodma_open,
    225	.prepare	= uniphier_aiodma_prepare,
    226	.trigger	= uniphier_aiodma_trigger,
    227	.pointer	= uniphier_aiodma_pointer,
    228	.mmap		= uniphier_aiodma_mmap,
    229	.pcm_construct	= uniphier_aiodma_new,
    230	.compress_ops	= &uniphier_aio_compress_ops,
    231};
    232
    233static const struct regmap_config aiodma_regmap_config = {
    234	.reg_bits      = 32,
    235	.reg_stride    = 4,
    236	.val_bits      = 32,
    237	.max_register  = 0x7fffc,
    238	.cache_type    = REGCACHE_NONE,
    239};
    240
    241/**
    242 * uniphier_aiodma_soc_register_platform - register the AIO DMA
    243 * @pdev: the platform device
    244 *
    245 * Register and setup the DMA of AIO to transfer the sound data to device.
    246 * This function need to call once at driver startup and need NOT to call
    247 * unregister function.
    248 *
    249 * Return: Zero if successful, otherwise a negative value on error.
    250 */
    251int uniphier_aiodma_soc_register_platform(struct platform_device *pdev)
    252{
    253	struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
    254	struct device *dev = &pdev->dev;
    255	void __iomem *preg;
    256	int irq, ret;
    257
    258	preg = devm_platform_ioremap_resource(pdev, 0);
    259	if (IS_ERR(preg))
    260		return PTR_ERR(preg);
    261
    262	chip->regmap = devm_regmap_init_mmio(dev, preg,
    263					     &aiodma_regmap_config);
    264	if (IS_ERR(chip->regmap))
    265		return PTR_ERR(chip->regmap);
    266
    267	irq = platform_get_irq(pdev, 0);
    268	if (irq < 0)
    269		return irq;
    270
    271	ret = devm_request_irq(dev, irq, aiodma_irq,
    272			       IRQF_SHARED, dev_name(dev), pdev);
    273	if (ret)
    274		return ret;
    275
    276	return devm_snd_soc_register_component(dev, &uniphier_soc_platform,
    277					       NULL, 0);
    278}
    279EXPORT_SYMBOL_GPL(uniphier_aiodma_soc_register_platform);