cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpass-cpu.c (36170B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2010-2011,2013-2015 The Linux Foundation. All rights reserved.
      4 *
      5 * lpass-cpu.c -- ALSA SoC CPU DAI driver for QTi LPASS
      6 */
      7
      8#include <linux/clk.h>
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/of.h>
     12#include <linux/of_device.h>
     13#include <linux/platform_device.h>
     14#include <sound/pcm.h>
     15#include <sound/pcm_params.h>
     16#include <linux/regmap.h>
     17#include <sound/soc.h>
     18#include <sound/soc-dai.h>
     19#include "lpass-lpaif-reg.h"
     20#include "lpass.h"
     21
     22#define LPASS_CPU_MAX_MI2S_LINES	4
     23#define LPASS_CPU_I2S_SD0_MASK		BIT(0)
     24#define LPASS_CPU_I2S_SD1_MASK		BIT(1)
     25#define LPASS_CPU_I2S_SD2_MASK		BIT(2)
     26#define LPASS_CPU_I2S_SD3_MASK		BIT(3)
     27#define LPASS_CPU_I2S_SD0_1_MASK	GENMASK(1, 0)
     28#define LPASS_CPU_I2S_SD2_3_MASK	GENMASK(3, 2)
     29#define LPASS_CPU_I2S_SD0_1_2_MASK	GENMASK(2, 0)
     30#define LPASS_CPU_I2S_SD0_1_2_3_MASK	GENMASK(3, 0)
     31#define LPASS_REG_READ 1
     32#define LPASS_REG_WRITE 0
     33
     34/*
     35 * Channel maps for Quad channel playbacks on MI2S Secondary
     36 */
     37static struct snd_pcm_chmap_elem lpass_quad_chmaps[] = {
     38		{ .channels = 4,
     39		  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_RL,
     40				SNDRV_CHMAP_FR, SNDRV_CHMAP_RR } },
     41		{ }
     42};
     43static int lpass_cpu_init_i2sctl_bitfields(struct device *dev,
     44			struct lpaif_i2sctl *i2sctl, struct regmap *map)
     45{
     46	struct lpass_data *drvdata = dev_get_drvdata(dev);
     47	struct lpass_variant *v = drvdata->variant;
     48
     49	i2sctl->loopback = devm_regmap_field_alloc(dev, map, v->loopback);
     50	i2sctl->spken = devm_regmap_field_alloc(dev, map, v->spken);
     51	i2sctl->spkmode = devm_regmap_field_alloc(dev, map, v->spkmode);
     52	i2sctl->spkmono = devm_regmap_field_alloc(dev, map, v->spkmono);
     53	i2sctl->micen = devm_regmap_field_alloc(dev, map, v->micen);
     54	i2sctl->micmode = devm_regmap_field_alloc(dev, map, v->micmode);
     55	i2sctl->micmono = devm_regmap_field_alloc(dev, map, v->micmono);
     56	i2sctl->wssrc = devm_regmap_field_alloc(dev, map, v->wssrc);
     57	i2sctl->bitwidth = devm_regmap_field_alloc(dev, map, v->bitwidth);
     58
     59	if (IS_ERR(i2sctl->loopback) || IS_ERR(i2sctl->spken) ||
     60	    IS_ERR(i2sctl->spkmode) || IS_ERR(i2sctl->spkmono) ||
     61	    IS_ERR(i2sctl->micen) || IS_ERR(i2sctl->micmode) ||
     62	    IS_ERR(i2sctl->micmono) || IS_ERR(i2sctl->wssrc) ||
     63	    IS_ERR(i2sctl->bitwidth))
     64		return -EINVAL;
     65
     66	return 0;
     67}
     68
     69static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
     70		unsigned int freq, int dir)
     71{
     72	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
     73	int ret;
     74
     75	ret = clk_set_rate(drvdata->mi2s_osr_clk[dai->driver->id], freq);
     76	if (ret)
     77		dev_err(dai->dev, "error setting mi2s osrclk to %u: %d\n",
     78			freq, ret);
     79
     80	return ret;
     81}
     82
     83static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream,
     84		struct snd_soc_dai *dai)
     85{
     86	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
     87	int ret;
     88
     89	ret = clk_prepare_enable(drvdata->mi2s_osr_clk[dai->driver->id]);
     90	if (ret) {
     91		dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret);
     92		return ret;
     93	}
     94	ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]);
     95	if (ret) {
     96		dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
     97		clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
     98		return ret;
     99	}
    100	return 0;
    101}
    102
    103static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
    104		struct snd_soc_dai *dai)
    105{
    106	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    107	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
    108	unsigned int id = dai->driver->id;
    109
    110	clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
    111	/*
    112	 * Ensure LRCLK is disabled even in device node validation.
    113	 * Will not impact if disabled in lpass_cpu_daiops_trigger()
    114	 * suspend.
    115	 */
    116	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
    117		regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
    118	else
    119		regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
    120
    121	/*
    122	 * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
    123	 * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
    124	 * lpass_cpu_daiops_prepare.
    125	 */
    126	if (drvdata->mi2s_was_prepared[dai->driver->id]) {
    127		drvdata->mi2s_was_prepared[dai->driver->id] = false;
    128		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
    129	}
    130
    131	clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
    132}
    133
    134static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
    135		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
    136{
    137	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    138	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
    139	unsigned int id = dai->driver->id;
    140	snd_pcm_format_t format = params_format(params);
    141	unsigned int channels = params_channels(params);
    142	unsigned int rate = params_rate(params);
    143	unsigned int mode;
    144	unsigned int regval;
    145	int bitwidth, ret;
    146
    147	bitwidth = snd_pcm_format_width(format);
    148	if (bitwidth < 0) {
    149		dev_err(dai->dev, "invalid bit width given: %d\n", bitwidth);
    150		return bitwidth;
    151	}
    152
    153	ret = regmap_fields_write(i2sctl->loopback, id,
    154				 LPAIF_I2SCTL_LOOPBACK_DISABLE);
    155	if (ret) {
    156		dev_err(dai->dev, "error updating loopback field: %d\n", ret);
    157		return ret;
    158	}
    159
    160	ret = regmap_fields_write(i2sctl->wssrc, id,
    161				 LPAIF_I2SCTL_WSSRC_INTERNAL);
    162	if (ret) {
    163		dev_err(dai->dev, "error updating wssrc field: %d\n", ret);
    164		return ret;
    165	}
    166
    167	switch (bitwidth) {
    168	case 16:
    169		regval = LPAIF_I2SCTL_BITWIDTH_16;
    170		break;
    171	case 24:
    172		regval = LPAIF_I2SCTL_BITWIDTH_24;
    173		break;
    174	case 32:
    175		regval = LPAIF_I2SCTL_BITWIDTH_32;
    176		break;
    177	default:
    178		dev_err(dai->dev, "invalid bitwidth given: %d\n", bitwidth);
    179		return -EINVAL;
    180	}
    181
    182	ret = regmap_fields_write(i2sctl->bitwidth, id, regval);
    183	if (ret) {
    184		dev_err(dai->dev, "error updating bitwidth field: %d\n", ret);
    185		return ret;
    186	}
    187
    188	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
    189		mode = drvdata->mi2s_playback_sd_mode[id];
    190	else
    191		mode = drvdata->mi2s_capture_sd_mode[id];
    192
    193	if (!mode) {
    194		dev_err(dai->dev, "no line is assigned\n");
    195		return -EINVAL;
    196	}
    197
    198	switch (channels) {
    199	case 1:
    200	case 2:
    201		switch (mode) {
    202		case LPAIF_I2SCTL_MODE_QUAD01:
    203		case LPAIF_I2SCTL_MODE_6CH:
    204		case LPAIF_I2SCTL_MODE_8CH:
    205			mode = LPAIF_I2SCTL_MODE_SD0;
    206			break;
    207		case LPAIF_I2SCTL_MODE_QUAD23:
    208			mode = LPAIF_I2SCTL_MODE_SD2;
    209			break;
    210		}
    211
    212		break;
    213	case 4:
    214		if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
    215			dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
    216				mode);
    217			return -EINVAL;
    218		}
    219
    220		switch (mode) {
    221		case LPAIF_I2SCTL_MODE_6CH:
    222		case LPAIF_I2SCTL_MODE_8CH:
    223			mode = LPAIF_I2SCTL_MODE_QUAD01;
    224			break;
    225		}
    226		break;
    227	case 6:
    228		if (mode < LPAIF_I2SCTL_MODE_6CH) {
    229			dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
    230				mode);
    231			return -EINVAL;
    232		}
    233
    234		switch (mode) {
    235		case LPAIF_I2SCTL_MODE_8CH:
    236			mode = LPAIF_I2SCTL_MODE_6CH;
    237			break;
    238		}
    239		break;
    240	case 8:
    241		if (mode < LPAIF_I2SCTL_MODE_8CH) {
    242			dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
    243				mode);
    244			return -EINVAL;
    245		}
    246		break;
    247	default:
    248		dev_err(dai->dev, "invalid channels given: %u\n", channels);
    249		return -EINVAL;
    250	}
    251
    252	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
    253		ret = regmap_fields_write(i2sctl->spkmode, id,
    254					 LPAIF_I2SCTL_SPKMODE(mode));
    255		if (ret) {
    256			dev_err(dai->dev, "error writing to i2sctl spkr mode: %d\n",
    257				ret);
    258			return ret;
    259		}
    260		if (channels >= 2)
    261			ret = regmap_fields_write(i2sctl->spkmono, id,
    262						 LPAIF_I2SCTL_SPKMONO_STEREO);
    263		else
    264			ret = regmap_fields_write(i2sctl->spkmono, id,
    265						 LPAIF_I2SCTL_SPKMONO_MONO);
    266	} else {
    267		ret = regmap_fields_write(i2sctl->micmode, id,
    268					 LPAIF_I2SCTL_MICMODE(mode));
    269		if (ret) {
    270			dev_err(dai->dev, "error writing to i2sctl mic mode: %d\n",
    271				ret);
    272			return ret;
    273		}
    274		if (channels >= 2)
    275			ret = regmap_fields_write(i2sctl->micmono, id,
    276						 LPAIF_I2SCTL_MICMONO_STEREO);
    277		else
    278			ret = regmap_fields_write(i2sctl->micmono, id,
    279						 LPAIF_I2SCTL_MICMONO_MONO);
    280	}
    281
    282	if (ret) {
    283		dev_err(dai->dev, "error writing to i2sctl channels mode: %d\n",
    284			ret);
    285		return ret;
    286	}
    287
    288	ret = clk_set_rate(drvdata->mi2s_bit_clk[id],
    289			   rate * bitwidth * 2);
    290	if (ret) {
    291		dev_err(dai->dev, "error setting mi2s bitclk to %u: %d\n",
    292			rate * bitwidth * 2, ret);
    293		return ret;
    294	}
    295
    296	return 0;
    297}
    298
    299static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
    300		int cmd, struct snd_soc_dai *dai)
    301{
    302	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    303	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
    304	unsigned int id = dai->driver->id;
    305	int ret = -EINVAL;
    306
    307	switch (cmd) {
    308	case SNDRV_PCM_TRIGGER_START:
    309	case SNDRV_PCM_TRIGGER_RESUME:
    310	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
    311		/*
    312		 * Ensure lpass BCLK/LRCLK is enabled during
    313		 * device resume as lpass_cpu_daiops_prepare() is not called
    314		 * after the device resumes. We don't check mi2s_was_prepared before
    315		 * enable/disable BCLK in trigger events because:
    316		 *  1. These trigger events are paired, so the BCLK
    317		 *     enable_count is balanced.
    318		 *  2. the BCLK can be shared (ex: headset and headset mic),
    319		 *     we need to increase the enable_count so that we don't
    320		 *     turn off the shared BCLK while other devices are using
    321		 *     it.
    322		 */
    323		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
    324			ret = regmap_fields_write(i2sctl->spken, id,
    325						 LPAIF_I2SCTL_SPKEN_ENABLE);
    326		} else  {
    327			ret = regmap_fields_write(i2sctl->micen, id,
    328						 LPAIF_I2SCTL_MICEN_ENABLE);
    329		}
    330		if (ret)
    331			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
    332				ret);
    333
    334		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
    335		if (ret) {
    336			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
    337			clk_disable(drvdata->mi2s_osr_clk[id]);
    338			return ret;
    339		}
    340		break;
    341	case SNDRV_PCM_TRIGGER_STOP:
    342	case SNDRV_PCM_TRIGGER_SUSPEND:
    343	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
    344		/*
    345		 * To ensure lpass BCLK/LRCLK is disabled during
    346		 * device suspend.
    347		 */
    348		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
    349			ret = regmap_fields_write(i2sctl->spken, id,
    350						 LPAIF_I2SCTL_SPKEN_DISABLE);
    351		} else  {
    352			ret = regmap_fields_write(i2sctl->micen, id,
    353						 LPAIF_I2SCTL_MICEN_DISABLE);
    354		}
    355		if (ret)
    356			dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
    357				ret);
    358
    359		clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
    360
    361		break;
    362	}
    363
    364	return ret;
    365}
    366
    367static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
    368		struct snd_soc_dai *dai)
    369{
    370	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    371	struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
    372	unsigned int id = dai->driver->id;
    373	int ret;
    374
    375	/*
    376	 * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
    377	 * data flow starts. This allows other codec to have some delay before
    378	 * the data flow.
    379	 * (ex: to drop start up pop noise before capture starts).
    380	 */
    381	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
    382		ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
    383	else
    384		ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
    385
    386	if (ret) {
    387		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
    388		return ret;
    389	}
    390
    391	/*
    392	 * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
    393	 * be called multiple times. It's paired with the clk_disable in
    394	 * lpass_cpu_daiops_shutdown.
    395	 */
    396	if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
    397		ret = clk_enable(drvdata->mi2s_bit_clk[id]);
    398		if (ret) {
    399			dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
    400			return ret;
    401		}
    402		drvdata->mi2s_was_prepared[dai->driver->id] = true;
    403	}
    404	return 0;
    405}
    406
    407const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
    408	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
    409	.startup	= lpass_cpu_daiops_startup,
    410	.shutdown	= lpass_cpu_daiops_shutdown,
    411	.hw_params	= lpass_cpu_daiops_hw_params,
    412	.trigger	= lpass_cpu_daiops_trigger,
    413	.prepare	= lpass_cpu_daiops_prepare,
    414};
    415EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
    416
    417int lpass_cpu_pcm_new(struct snd_soc_pcm_runtime *rtd,
    418				struct snd_soc_dai *dai)
    419{
    420	int ret;
    421	struct snd_soc_dai_driver *drv = dai->driver;
    422	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    423
    424	if (drvdata->mi2s_playback_sd_mode[dai->id] == LPAIF_I2SCTL_MODE_QUAD01) {
    425		ret =  snd_pcm_add_chmap_ctls(rtd->pcm, SNDRV_PCM_STREAM_PLAYBACK,
    426				lpass_quad_chmaps, drv->playback.channels_max, 0,
    427				NULL);
    428		if (ret < 0)
    429			return ret;
    430	}
    431
    432	return 0;
    433}
    434EXPORT_SYMBOL_GPL(lpass_cpu_pcm_new);
    435
    436int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
    437{
    438	struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
    439	int ret;
    440
    441	/* ensure audio hardware is disabled */
    442	ret = regmap_write(drvdata->lpaif_map,
    443			LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), 0);
    444	if (ret)
    445		dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
    446
    447	return ret;
    448}
    449EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
    450
    451static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
    452				   const struct of_phandle_args *args,
    453				   const char **dai_name)
    454{
    455	struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
    456	struct lpass_variant *variant = drvdata->variant;
    457	int id = args->args[0];
    458	int ret = -EINVAL;
    459	int i;
    460
    461	for (i = 0; i  < variant->num_dai; i++) {
    462		if (variant->dai_driver[i].id == id) {
    463			*dai_name = variant->dai_driver[i].name;
    464			ret = 0;
    465			break;
    466		}
    467	}
    468
    469	return ret;
    470}
    471
    472static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
    473	.name = "lpass-cpu",
    474	.of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
    475};
    476
    477static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
    478{
    479	struct lpass_data *drvdata = dev_get_drvdata(dev);
    480	struct lpass_variant *v = drvdata->variant;
    481	int i;
    482
    483	for (i = 0; i < v->i2s_ports; ++i)
    484		if (reg == LPAIF_I2SCTL_REG(v, i))
    485			return true;
    486
    487	for (i = 0; i < v->irq_ports; ++i) {
    488		if (reg == LPAIF_IRQEN_REG(v, i))
    489			return true;
    490		if (reg == LPAIF_IRQCLEAR_REG(v, i))
    491			return true;
    492	}
    493
    494	for (i = 0; i < v->rdma_channels; ++i) {
    495		if (reg == LPAIF_RDMACTL_REG(v, i))
    496			return true;
    497		if (reg == LPAIF_RDMABASE_REG(v, i))
    498			return true;
    499		if (reg == LPAIF_RDMABUFF_REG(v, i))
    500			return true;
    501		if (reg == LPAIF_RDMAPER_REG(v, i))
    502			return true;
    503	}
    504
    505	for (i = 0; i < v->wrdma_channels; ++i) {
    506		if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
    507			return true;
    508		if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
    509			return true;
    510		if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
    511			return true;
    512		if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
    513			return true;
    514	}
    515
    516	return false;
    517}
    518
    519static bool lpass_cpu_regmap_readable(struct device *dev, unsigned int reg)
    520{
    521	struct lpass_data *drvdata = dev_get_drvdata(dev);
    522	struct lpass_variant *v = drvdata->variant;
    523	int i;
    524
    525	for (i = 0; i < v->i2s_ports; ++i)
    526		if (reg == LPAIF_I2SCTL_REG(v, i))
    527			return true;
    528
    529	for (i = 0; i < v->irq_ports; ++i) {
    530		if (reg == LPAIF_IRQCLEAR_REG(v, i))
    531			return true;
    532		if (reg == LPAIF_IRQEN_REG(v, i))
    533			return true;
    534		if (reg == LPAIF_IRQSTAT_REG(v, i))
    535			return true;
    536	}
    537
    538	for (i = 0; i < v->rdma_channels; ++i) {
    539		if (reg == LPAIF_RDMACTL_REG(v, i))
    540			return true;
    541		if (reg == LPAIF_RDMABASE_REG(v, i))
    542			return true;
    543		if (reg == LPAIF_RDMABUFF_REG(v, i))
    544			return true;
    545		if (reg == LPAIF_RDMACURR_REG(v, i))
    546			return true;
    547		if (reg == LPAIF_RDMAPER_REG(v, i))
    548			return true;
    549	}
    550
    551	for (i = 0; i < v->wrdma_channels; ++i) {
    552		if (reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
    553			return true;
    554		if (reg == LPAIF_WRDMABASE_REG(v, i + v->wrdma_channel_start))
    555			return true;
    556		if (reg == LPAIF_WRDMABUFF_REG(v, i + v->wrdma_channel_start))
    557			return true;
    558		if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
    559			return true;
    560		if (reg == LPAIF_WRDMAPER_REG(v, i + v->wrdma_channel_start))
    561			return true;
    562	}
    563
    564	return false;
    565}
    566
    567static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
    568{
    569	struct lpass_data *drvdata = dev_get_drvdata(dev);
    570	struct lpass_variant *v = drvdata->variant;
    571	int i;
    572
    573	for (i = 0; i < v->irq_ports; ++i) {
    574		if (reg == LPAIF_IRQCLEAR_REG(v, i))
    575			return true;
    576		if (reg == LPAIF_IRQSTAT_REG(v, i))
    577			return true;
    578	}
    579
    580	for (i = 0; i < v->rdma_channels; ++i)
    581		if (reg == LPAIF_RDMACURR_REG(v, i))
    582			return true;
    583
    584	for (i = 0; i < v->wrdma_channels; ++i)
    585		if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
    586			return true;
    587
    588	return false;
    589}
    590
    591static struct regmap_config lpass_cpu_regmap_config = {
    592	.name = "lpass_cpu",
    593	.reg_bits = 32,
    594	.reg_stride = 4,
    595	.val_bits = 32,
    596	.writeable_reg = lpass_cpu_regmap_writeable,
    597	.readable_reg = lpass_cpu_regmap_readable,
    598	.volatile_reg = lpass_cpu_regmap_volatile,
    599	.cache_type = REGCACHE_FLAT,
    600};
    601
    602static int lpass_hdmi_init_bitfields(struct device *dev, struct regmap *map)
    603{
    604	struct lpass_data *drvdata = dev_get_drvdata(dev);
    605	struct lpass_variant *v = drvdata->variant;
    606	unsigned int i;
    607	struct lpass_hdmi_tx_ctl *tx_ctl;
    608	struct regmap_field *legacy_en;
    609	struct lpass_vbit_ctrl *vbit_ctl;
    610	struct regmap_field *tx_parity;
    611	struct lpass_dp_metadata_ctl *meta_ctl;
    612	struct lpass_sstream_ctl *sstream_ctl;
    613	struct regmap_field *ch_msb;
    614	struct regmap_field *ch_lsb;
    615	struct lpass_hdmitx_dmactl *tx_dmactl;
    616	int rval;
    617
    618	tx_ctl = devm_kzalloc(dev, sizeof(*tx_ctl), GFP_KERNEL);
    619	if (!tx_ctl)
    620		return -ENOMEM;
    621
    622	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->soft_reset, tx_ctl->soft_reset);
    623	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->force_reset, tx_ctl->force_reset);
    624	drvdata->tx_ctl = tx_ctl;
    625
    626	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->legacy_en, legacy_en);
    627	drvdata->hdmitx_legacy_en = legacy_en;
    628
    629	vbit_ctl = devm_kzalloc(dev, sizeof(*vbit_ctl), GFP_KERNEL);
    630	if (!vbit_ctl)
    631		return -ENOMEM;
    632
    633	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->replace_vbit, vbit_ctl->replace_vbit);
    634	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->vbit_stream, vbit_ctl->vbit_stream);
    635	drvdata->vbit_ctl = vbit_ctl;
    636
    637
    638	QCOM_REGMAP_FIELD_ALLOC(dev, map, v->calc_en, tx_parity);
    639	drvdata->hdmitx_parity_calc_en = tx_parity;
    640
    641	meta_ctl = devm_kzalloc(dev, sizeof(*meta_ctl), GFP_KERNEL);
    642	if (!meta_ctl)
    643		return -ENOMEM;
    644
    645	rval = devm_regmap_field_bulk_alloc(dev, map, &meta_ctl->mute, &v->mute, 7);
    646	if (rval)
    647		return rval;
    648	drvdata->meta_ctl = meta_ctl;
    649
    650	sstream_ctl = devm_kzalloc(dev, sizeof(*sstream_ctl), GFP_KERNEL);
    651	if (!sstream_ctl)
    652		return -ENOMEM;
    653
    654	rval = devm_regmap_field_bulk_alloc(dev, map, &sstream_ctl->sstream_en, &v->sstream_en, 9);
    655	if (rval)
    656		return rval;
    657
    658	drvdata->sstream_ctl = sstream_ctl;
    659
    660	for (i = 0; i < LPASS_MAX_HDMI_DMA_CHANNELS; i++) {
    661		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->msb_bits, ch_msb);
    662		drvdata->hdmitx_ch_msb[i] = ch_msb;
    663
    664		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->lsb_bits, ch_lsb);
    665		drvdata->hdmitx_ch_lsb[i] = ch_lsb;
    666
    667		tx_dmactl = devm_kzalloc(dev, sizeof(*tx_dmactl), GFP_KERNEL);
    668		if (!tx_dmactl)
    669			return -ENOMEM;
    670
    671		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_chs, tx_dmactl->use_hw_chs);
    672		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->use_hw_usr, tx_dmactl->use_hw_usr);
    673		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_chs_sel, tx_dmactl->hw_chs_sel);
    674		QCOM_REGMAP_FIELD_ALLOC(dev, map, v->hw_usr_sel, tx_dmactl->hw_usr_sel);
    675		drvdata->hdmi_tx_dmactl[i] = tx_dmactl;
    676	}
    677	return 0;
    678}
    679
    680static bool lpass_hdmi_regmap_writeable(struct device *dev, unsigned int reg)
    681{
    682	struct lpass_data *drvdata = dev_get_drvdata(dev);
    683	struct lpass_variant *v = drvdata->variant;
    684	int i;
    685
    686	if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
    687		return true;
    688	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
    689		return true;
    690	if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
    691		return true;
    692	if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
    693		return true;
    694	if (reg == LPASS_HDMI_TX_DP_ADDR(v))
    695		return true;
    696	if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
    697		return true;
    698	if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
    699		return true;
    700	if (reg == LPASS_HDMITX_APP_IRQCLEAR_REG(v))
    701		return true;
    702
    703	for (i = 0; i < v->hdmi_rdma_channels; i++) {
    704		if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
    705			return true;
    706		if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
    707			return true;
    708		if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
    709			return true;
    710	}
    711
    712	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
    713		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
    714			return true;
    715		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
    716			return true;
    717		if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
    718			return true;
    719		if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
    720			return true;
    721	}
    722	return false;
    723}
    724
    725static bool lpass_hdmi_regmap_readable(struct device *dev, unsigned int reg)
    726{
    727	struct lpass_data *drvdata = dev_get_drvdata(dev);
    728	struct lpass_variant *v = drvdata->variant;
    729	int i;
    730
    731	if (reg == LPASS_HDMI_TX_CTL_ADDR(v))
    732		return true;
    733	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
    734		return true;
    735	if (reg == LPASS_HDMI_TX_VBIT_CTL_ADDR(v))
    736		return true;
    737
    738	for (i = 0; i < v->hdmi_rdma_channels; i++) {
    739		if (reg == LPASS_HDMI_TX_CH_LSB_ADDR(v, i))
    740			return true;
    741		if (reg == LPASS_HDMI_TX_CH_MSB_ADDR(v, i))
    742			return true;
    743		if (reg == LPASS_HDMI_TX_DMA_ADDR(v, i))
    744			return true;
    745	}
    746
    747	if (reg == LPASS_HDMI_TX_PARITY_ADDR(v))
    748		return true;
    749	if (reg == LPASS_HDMI_TX_DP_ADDR(v))
    750		return true;
    751	if (reg == LPASS_HDMI_TX_SSTREAM_ADDR(v))
    752		return true;
    753	if (reg == LPASS_HDMITX_APP_IRQEN_REG(v))
    754		return true;
    755	if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
    756		return true;
    757
    758	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
    759		if (reg == LPAIF_HDMI_RDMACTL_REG(v, i))
    760			return true;
    761		if (reg == LPAIF_HDMI_RDMABASE_REG(v, i))
    762			return true;
    763		if (reg == LPAIF_HDMI_RDMABUFF_REG(v, i))
    764			return true;
    765		if (reg == LPAIF_HDMI_RDMAPER_REG(v, i))
    766			return true;
    767		if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
    768			return true;
    769	}
    770
    771	return false;
    772}
    773
    774static bool lpass_hdmi_regmap_volatile(struct device *dev, unsigned int reg)
    775{
    776	struct lpass_data *drvdata = dev_get_drvdata(dev);
    777	struct lpass_variant *v = drvdata->variant;
    778	int i;
    779
    780	if (reg == LPASS_HDMITX_APP_IRQSTAT_REG(v))
    781		return true;
    782	if (reg == LPASS_HDMI_TX_LEGACY_ADDR(v))
    783		return true;
    784
    785	for (i = 0; i < v->hdmi_rdma_channels; ++i) {
    786		if (reg == LPAIF_HDMI_RDMACURR_REG(v, i))
    787			return true;
    788	}
    789	return false;
    790}
    791
    792static struct regmap_config lpass_hdmi_regmap_config = {
    793	.name = "lpass_hdmi",
    794	.reg_bits = 32,
    795	.reg_stride = 4,
    796	.val_bits = 32,
    797	.writeable_reg = lpass_hdmi_regmap_writeable,
    798	.readable_reg = lpass_hdmi_regmap_readable,
    799	.volatile_reg = lpass_hdmi_regmap_volatile,
    800	.cache_type = REGCACHE_FLAT,
    801};
    802
    803static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
    804{
    805	struct lpass_data *drvdata = dev_get_drvdata(dev);
    806	struct lpass_variant *v = drvdata->variant;
    807	int i;
    808
    809	for (i = 0; i < v->rxtx_irq_ports; ++i) {
    810		if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
    811			return true;
    812		if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
    813			return true;
    814		if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
    815			return true;
    816	}
    817
    818	for (i = 0; i < v->rxtx_rdma_channels; ++i) {
    819		if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
    820			return true;
    821		if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
    822			return true;
    823		if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
    824			return true;
    825		if (rw == LPASS_REG_READ) {
    826			if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
    827				return true;
    828		}
    829		if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
    830			return true;
    831		if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
    832			return true;
    833	}
    834
    835	for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
    836		if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
    837							LPASS_CDC_DMA_TX3))
    838			return true;
    839		if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
    840							LPASS_CDC_DMA_TX3))
    841			return true;
    842		if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
    843							LPASS_CDC_DMA_TX3))
    844			return true;
    845		if (rw == LPASS_REG_READ) {
    846			if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
    847				return true;
    848		}
    849		if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
    850							LPASS_CDC_DMA_TX3))
    851			return true;
    852		if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
    853							LPASS_CDC_DMA_TX3))
    854			return true;
    855	}
    856	return false;
    857}
    858
    859static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
    860{
    861	return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
    862}
    863
    864static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
    865{
    866	return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
    867}
    868
    869static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
    870{
    871	struct lpass_data *drvdata = dev_get_drvdata(dev);
    872	struct lpass_variant *v = drvdata->variant;
    873	int i;
    874
    875	for (i = 0; i < v->rxtx_irq_ports; ++i) {
    876		if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
    877			return true;
    878		if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
    879			return true;
    880	}
    881
    882	for (i = 0; i < v->rxtx_rdma_channels; ++i)
    883		if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
    884			return true;
    885
    886	for (i = 0; i < v->rxtx_wrdma_channels; ++i)
    887		if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
    888							LPASS_CDC_DMA_TX3))
    889			return true;
    890
    891	return false;
    892}
    893
    894static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
    895{
    896	struct lpass_data *drvdata = dev_get_drvdata(dev);
    897	struct lpass_variant *v = drvdata->variant;
    898	int i;
    899
    900	for (i = 0; i < v->va_irq_ports; ++i) {
    901		if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
    902			return true;
    903		if (reg == LPAIF_VA_IRQEN_REG(v, i))
    904			return true;
    905		if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
    906			return true;
    907	}
    908
    909	for (i = 0; i < v->va_wrdma_channels; ++i) {
    910		if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
    911							LPASS_CDC_DMA_VA_TX0))
    912			return true;
    913		if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
    914							LPASS_CDC_DMA_VA_TX0))
    915			return true;
    916		if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
    917							LPASS_CDC_DMA_VA_TX0))
    918			return true;
    919		if (rw == LPASS_REG_READ) {
    920			if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
    921							LPASS_CDC_DMA_VA_TX0))
    922				return true;
    923		}
    924		if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
    925							LPASS_CDC_DMA_VA_TX0))
    926			return true;
    927		if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
    928							LPASS_CDC_DMA_VA_TX0))
    929			return true;
    930	}
    931	return false;
    932}
    933
    934static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
    935{
    936	return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
    937}
    938
    939static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
    940{
    941	return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
    942}
    943
    944static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
    945{
    946	struct lpass_data *drvdata = dev_get_drvdata(dev);
    947	struct lpass_variant *v = drvdata->variant;
    948	int i;
    949
    950	for (i = 0; i < v->va_irq_ports; ++i) {
    951		if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
    952			return true;
    953		if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
    954			return true;
    955	}
    956
    957	for (i = 0; i < v->va_wrdma_channels; ++i) {
    958		if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
    959							LPASS_CDC_DMA_VA_TX0))
    960			return true;
    961	}
    962
    963	return false;
    964}
    965
    966static struct regmap_config lpass_rxtx_regmap_config = {
    967	.reg_bits = 32,
    968	.reg_stride = 4,
    969	.val_bits = 32,
    970	.writeable_reg = lpass_rxtx_regmap_writeable,
    971	.readable_reg = lpass_rxtx_regmap_readable,
    972	.volatile_reg = lpass_rxtx_regmap_volatile,
    973	.cache_type = REGCACHE_FLAT,
    974};
    975
    976static struct regmap_config lpass_va_regmap_config = {
    977	.reg_bits = 32,
    978	.reg_stride = 4,
    979	.val_bits = 32,
    980	.writeable_reg = lpass_va_regmap_writeable,
    981	.readable_reg = lpass_va_regmap_readable,
    982	.volatile_reg = lpass_va_regmap_volatile,
    983	.cache_type = REGCACHE_FLAT,
    984};
    985
    986static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
    987						struct device_node *node,
    988						const char *name)
    989{
    990	unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
    991	unsigned int sd_line_mask = 0;
    992	int num_lines, i;
    993
    994	num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
    995							LPASS_CPU_MAX_MI2S_LINES);
    996	if (num_lines < 0)
    997		return LPAIF_I2SCTL_MODE_NONE;
    998
    999	for (i = 0; i < num_lines; i++)
   1000		sd_line_mask |= BIT(lines[i]);
   1001
   1002	switch (sd_line_mask) {
   1003	case LPASS_CPU_I2S_SD0_MASK:
   1004		return LPAIF_I2SCTL_MODE_SD0;
   1005	case LPASS_CPU_I2S_SD1_MASK:
   1006		return LPAIF_I2SCTL_MODE_SD1;
   1007	case LPASS_CPU_I2S_SD2_MASK:
   1008		return LPAIF_I2SCTL_MODE_SD2;
   1009	case LPASS_CPU_I2S_SD3_MASK:
   1010		return LPAIF_I2SCTL_MODE_SD3;
   1011	case LPASS_CPU_I2S_SD0_1_MASK:
   1012		return LPAIF_I2SCTL_MODE_QUAD01;
   1013	case LPASS_CPU_I2S_SD2_3_MASK:
   1014		return LPAIF_I2SCTL_MODE_QUAD23;
   1015	case LPASS_CPU_I2S_SD0_1_2_MASK:
   1016		return LPAIF_I2SCTL_MODE_6CH;
   1017	case LPASS_CPU_I2S_SD0_1_2_3_MASK:
   1018		return LPAIF_I2SCTL_MODE_8CH;
   1019	default:
   1020		dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
   1021		return LPAIF_I2SCTL_MODE_NONE;
   1022	}
   1023}
   1024
   1025static void of_lpass_cpu_parse_dai_data(struct device *dev,
   1026					struct lpass_data *data)
   1027{
   1028	struct device_node *node;
   1029	int ret, id;
   1030
   1031	/* Allow all channels by default for backwards compatibility */
   1032	for (id = 0; id < data->variant->num_dai; id++) {
   1033		data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
   1034		data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
   1035	}
   1036
   1037	for_each_child_of_node(dev->of_node, node) {
   1038		ret = of_property_read_u32(node, "reg", &id);
   1039		if (ret || id < 0) {
   1040			dev_err(dev, "valid dai id not found: %d\n", ret);
   1041			continue;
   1042		}
   1043		if (id == LPASS_DP_RX) {
   1044			data->hdmi_port_enable = 1;
   1045		} else if (is_cdc_dma_port(id)) {
   1046			data->codec_dma_enable = 1;
   1047		} else {
   1048			data->mi2s_playback_sd_mode[id] =
   1049				of_lpass_cpu_parse_sd_lines(dev, node,
   1050							    "qcom,playback-sd-lines");
   1051			data->mi2s_capture_sd_mode[id] =
   1052				of_lpass_cpu_parse_sd_lines(dev, node,
   1053						    "qcom,capture-sd-lines");
   1054		}
   1055	}
   1056}
   1057
   1058static int of_lpass_cdc_dma_clks_parse(struct device *dev,
   1059					struct lpass_data *data)
   1060{
   1061	data->codec_mem0 = devm_clk_get(dev, "audio_cc_codec_mem0");
   1062	if (IS_ERR(data->codec_mem0))
   1063		return PTR_ERR(data->codec_mem0);
   1064
   1065	data->codec_mem1 = devm_clk_get(dev, "audio_cc_codec_mem1");
   1066	if (IS_ERR(data->codec_mem1))
   1067		return PTR_ERR(data->codec_mem1);
   1068
   1069	data->codec_mem2 = devm_clk_get(dev, "audio_cc_codec_mem2");
   1070	if (IS_ERR(data->codec_mem2))
   1071		return PTR_ERR(data->codec_mem2);
   1072
   1073	data->va_mem0 = devm_clk_get(dev, "aon_cc_va_mem0");
   1074	if (IS_ERR(data->va_mem0))
   1075		return PTR_ERR(data->va_mem0);
   1076
   1077	return 0;
   1078}
   1079
   1080int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
   1081{
   1082	struct lpass_data *drvdata;
   1083	struct device_node *dsp_of_node;
   1084	struct resource *res;
   1085	struct lpass_variant *variant;
   1086	struct device *dev = &pdev->dev;
   1087	const struct of_device_id *match;
   1088	int ret, i, dai_id;
   1089
   1090	dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
   1091	if (dsp_of_node) {
   1092		dev_err(dev, "DSP exists and holds audio resources\n");
   1093		return -EBUSY;
   1094	}
   1095
   1096	drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
   1097	if (!drvdata)
   1098		return -ENOMEM;
   1099	platform_set_drvdata(pdev, drvdata);
   1100
   1101	match = of_match_device(dev->driver->of_match_table, dev);
   1102	if (!match || !match->data)
   1103		return -EINVAL;
   1104
   1105	drvdata->variant = (struct lpass_variant *)match->data;
   1106	variant = drvdata->variant;
   1107
   1108	of_lpass_cpu_parse_dai_data(dev, drvdata);
   1109
   1110	if (drvdata->codec_dma_enable) {
   1111		drvdata->rxtx_lpaif =
   1112				devm_platform_ioremap_resource_byname(pdev, "lpass-rxtx-lpaif");
   1113		if (IS_ERR(drvdata->rxtx_lpaif))
   1114			return PTR_ERR(drvdata->rxtx_lpaif);
   1115
   1116		drvdata->va_lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-va-lpaif");
   1117		if (IS_ERR(drvdata->va_lpaif))
   1118			return PTR_ERR(drvdata->va_lpaif);
   1119
   1120		lpass_rxtx_regmap_config.max_register = LPAIF_CDC_RXTX_WRDMAPER_REG(variant,
   1121					variant->rxtx_wrdma_channels +
   1122					variant->rxtx_wrdma_channel_start, LPASS_CDC_DMA_TX3);
   1123
   1124		drvdata->rxtx_lpaif_map = devm_regmap_init_mmio(dev, drvdata->rxtx_lpaif,
   1125					&lpass_rxtx_regmap_config);
   1126		if (IS_ERR(drvdata->rxtx_lpaif_map))
   1127			return PTR_ERR(drvdata->rxtx_lpaif_map);
   1128
   1129		lpass_va_regmap_config.max_register = LPAIF_CDC_VA_WRDMAPER_REG(variant,
   1130					variant->va_wrdma_channels +
   1131					variant->va_wrdma_channel_start, LPASS_CDC_DMA_VA_TX0);
   1132
   1133		drvdata->va_lpaif_map = devm_regmap_init_mmio(dev, drvdata->va_lpaif,
   1134					&lpass_va_regmap_config);
   1135		if (IS_ERR(drvdata->va_lpaif_map))
   1136			return PTR_ERR(drvdata->va_lpaif_map);
   1137
   1138		ret = of_lpass_cdc_dma_clks_parse(dev, drvdata);
   1139		if (ret) {
   1140			dev_err(dev, "failed to get cdc dma clocks %d\n", ret);
   1141			return ret;
   1142		}
   1143
   1144		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-rxtx-cdc-dma-lpm");
   1145		drvdata->rxtx_cdc_dma_lpm_buf = res->start;
   1146
   1147		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-va-cdc-dma-lpm");
   1148		drvdata->va_cdc_dma_lpm_buf = res->start;
   1149	}
   1150
   1151	drvdata->lpaif = devm_platform_ioremap_resource_byname(pdev, "lpass-lpaif");
   1152	if (IS_ERR(drvdata->lpaif))
   1153		return PTR_ERR(drvdata->lpaif);
   1154
   1155	lpass_cpu_regmap_config.max_register = LPAIF_WRDMAPER_REG(variant,
   1156						variant->wrdma_channels +
   1157						variant->wrdma_channel_start);
   1158
   1159	drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
   1160			&lpass_cpu_regmap_config);
   1161	if (IS_ERR(drvdata->lpaif_map)) {
   1162		dev_err(dev, "error initializing regmap: %ld\n",
   1163			PTR_ERR(drvdata->lpaif_map));
   1164		return PTR_ERR(drvdata->lpaif_map);
   1165	}
   1166
   1167	if (drvdata->hdmi_port_enable) {
   1168		drvdata->hdmiif = devm_platform_ioremap_resource_byname(pdev, "lpass-hdmiif");
   1169		if (IS_ERR(drvdata->hdmiif))
   1170			return PTR_ERR(drvdata->hdmiif);
   1171
   1172		lpass_hdmi_regmap_config.max_register = LPAIF_HDMI_RDMAPER_REG(variant,
   1173					variant->hdmi_rdma_channels - 1);
   1174		drvdata->hdmiif_map = devm_regmap_init_mmio(dev, drvdata->hdmiif,
   1175					&lpass_hdmi_regmap_config);
   1176		if (IS_ERR(drvdata->hdmiif_map)) {
   1177			dev_err(dev, "error initializing regmap: %ld\n",
   1178			PTR_ERR(drvdata->hdmiif_map));
   1179			return PTR_ERR(drvdata->hdmiif_map);
   1180		}
   1181	}
   1182
   1183	if (variant->init) {
   1184		ret = variant->init(pdev);
   1185		if (ret) {
   1186			dev_err(dev, "error initializing variant: %d\n", ret);
   1187			return ret;
   1188		}
   1189	}
   1190
   1191	for (i = 0; i < variant->num_dai; i++) {
   1192		dai_id = variant->dai_driver[i].id;
   1193		if (dai_id == LPASS_DP_RX || is_cdc_dma_port(dai_id))
   1194			continue;
   1195
   1196		drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev,
   1197					     variant->dai_osr_clk_names[i]);
   1198		drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
   1199						variant->dai_bit_clk_names[i]);
   1200		if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
   1201			dev_err(dev,
   1202				"error getting %s: %ld\n",
   1203				variant->dai_bit_clk_names[i],
   1204				PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
   1205			return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
   1206		}
   1207		if (drvdata->mi2s_playback_sd_mode[dai_id] ==
   1208			LPAIF_I2SCTL_MODE_QUAD01) {
   1209			variant->dai_driver[dai_id].playback.channels_min = 4;
   1210			variant->dai_driver[dai_id].playback.channels_max = 4;
   1211		}
   1212	}
   1213
   1214	/* Allocation for i2sctl regmap fields */
   1215	drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
   1216					GFP_KERNEL);
   1217
   1218	/* Initialize bitfields for dai I2SCTL register */
   1219	ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
   1220						drvdata->lpaif_map);
   1221	if (ret) {
   1222		dev_err(dev, "error init i2sctl field: %d\n", ret);
   1223		return ret;
   1224	}
   1225
   1226	if (drvdata->hdmi_port_enable) {
   1227		ret = lpass_hdmi_init_bitfields(dev, drvdata->hdmiif_map);
   1228		if (ret) {
   1229			dev_err(dev, "%s error  hdmi init failed\n", __func__);
   1230			return ret;
   1231		}
   1232	}
   1233	ret = devm_snd_soc_register_component(dev,
   1234					      &lpass_cpu_comp_driver,
   1235					      variant->dai_driver,
   1236					      variant->num_dai);
   1237	if (ret) {
   1238		dev_err(dev, "error registering cpu driver: %d\n", ret);
   1239		goto err;
   1240	}
   1241
   1242	ret = asoc_qcom_lpass_platform_register(pdev);
   1243	if (ret) {
   1244		dev_err(dev, "error registering platform driver: %d\n", ret);
   1245		goto err;
   1246	}
   1247
   1248err:
   1249	return ret;
   1250}
   1251EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
   1252
   1253int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
   1254{
   1255	struct lpass_data *drvdata = platform_get_drvdata(pdev);
   1256
   1257	if (drvdata->variant->exit)
   1258		drvdata->variant->exit(pdev);
   1259
   1260
   1261	return 0;
   1262}
   1263EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
   1264
   1265void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev)
   1266{
   1267	struct lpass_data *drvdata = platform_get_drvdata(pdev);
   1268
   1269	if (drvdata->variant->exit)
   1270		drvdata->variant->exit(pdev);
   1271
   1272}
   1273EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown);
   1274
   1275MODULE_DESCRIPTION("QTi LPASS CPU Driver");
   1276MODULE_LICENSE("GPL v2");