cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmp_ctrl.c (15219B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * linux/drivers/video/mmp/hw/mmp_ctrl.c
      4 * Marvell MMP series Display Controller support
      5 *
      6 * Copyright (C) 2012 Marvell Technology Group Ltd.
      7 * Authors:  Guoqing Li <ligq@marvell.com>
      8 *          Lisa Du <cldu@marvell.com>
      9 *          Zhou Zhu <zzhu3@marvell.com>
     10 */
     11#include <linux/module.h>
     12#include <linux/moduleparam.h>
     13#include <linux/kernel.h>
     14#include <linux/errno.h>
     15#include <linux/string.h>
     16#include <linux/interrupt.h>
     17#include <linux/slab.h>
     18#include <linux/delay.h>
     19#include <linux/platform_device.h>
     20#include <linux/dma-mapping.h>
     21#include <linux/clk.h>
     22#include <linux/err.h>
     23#include <linux/vmalloc.h>
     24#include <linux/uaccess.h>
     25#include <linux/kthread.h>
     26#include <linux/io.h>
     27
     28#include "mmp_ctrl.h"
     29
     30static irqreturn_t ctrl_handle_irq(int irq, void *dev_id)
     31{
     32	struct mmphw_ctrl *ctrl = (struct mmphw_ctrl *)dev_id;
     33	u32 isr, imask, tmp;
     34
     35	isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
     36	imask = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
     37
     38	do {
     39		/* clear clock only */
     40		tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR);
     41		if (tmp & isr)
     42			writel_relaxed(~isr, ctrl->reg_base + SPU_IRQ_ISR);
     43	} while ((isr = readl_relaxed(ctrl->reg_base + SPU_IRQ_ISR)) & imask);
     44
     45	return IRQ_HANDLED;
     46}
     47
     48static u32 fmt_to_reg(struct mmp_overlay *overlay, int pix_fmt)
     49{
     50	u32 rbswap = 0, uvswap = 0, yuvswap = 0,
     51		csc_en = 0, val = 0,
     52		vid = overlay_is_vid(overlay);
     53
     54	switch (pix_fmt) {
     55	case PIXFMT_RGB565:
     56	case PIXFMT_RGB1555:
     57	case PIXFMT_RGB888PACK:
     58	case PIXFMT_RGB888UNPACK:
     59	case PIXFMT_RGBA888:
     60		rbswap = 1;
     61		break;
     62	case PIXFMT_VYUY:
     63	case PIXFMT_YVU422P:
     64	case PIXFMT_YVU420P:
     65		uvswap = 1;
     66		break;
     67	case PIXFMT_YUYV:
     68		yuvswap = 1;
     69		break;
     70	default:
     71		break;
     72	}
     73
     74	switch (pix_fmt) {
     75	case PIXFMT_RGB565:
     76	case PIXFMT_BGR565:
     77		break;
     78	case PIXFMT_RGB1555:
     79	case PIXFMT_BGR1555:
     80		val = 0x1;
     81		break;
     82	case PIXFMT_RGB888PACK:
     83	case PIXFMT_BGR888PACK:
     84		val = 0x2;
     85		break;
     86	case PIXFMT_RGB888UNPACK:
     87	case PIXFMT_BGR888UNPACK:
     88		val = 0x3;
     89		break;
     90	case PIXFMT_RGBA888:
     91	case PIXFMT_BGRA888:
     92		val = 0x4;
     93		break;
     94	case PIXFMT_UYVY:
     95	case PIXFMT_VYUY:
     96	case PIXFMT_YUYV:
     97		val = 0x5;
     98		csc_en = 1;
     99		break;
    100	case PIXFMT_YUV422P:
    101	case PIXFMT_YVU422P:
    102		val = 0x6;
    103		csc_en = 1;
    104		break;
    105	case PIXFMT_YUV420P:
    106	case PIXFMT_YVU420P:
    107		val = 0x7;
    108		csc_en = 1;
    109		break;
    110	default:
    111		break;
    112	}
    113
    114	return (dma_palette(0) | dma_fmt(vid, val) |
    115		dma_swaprb(vid, rbswap) | dma_swapuv(vid, uvswap) |
    116		dma_swapyuv(vid, yuvswap) | dma_csc(vid, csc_en));
    117}
    118
    119static void dmafetch_set_fmt(struct mmp_overlay *overlay)
    120{
    121	u32 tmp;
    122	struct mmp_path *path = overlay->path;
    123	tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
    124	tmp &= ~dma_mask(overlay_is_vid(overlay));
    125	tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt);
    126	writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
    127}
    128
    129static void overlay_set_win(struct mmp_overlay *overlay, struct mmp_win *win)
    130{
    131	struct lcd_regs *regs = path_regs(overlay->path);
    132
    133	/* assert win supported */
    134	memcpy(&overlay->win, win, sizeof(struct mmp_win));
    135
    136	mutex_lock(&overlay->access_ok);
    137
    138	if (overlay_is_vid(overlay)) {
    139		writel_relaxed(win->pitch[0],
    140				(void __iomem *)&regs->v_pitch_yc);
    141		writel_relaxed(win->pitch[2] << 16 | win->pitch[1],
    142				(void __iomem *)&regs->v_pitch_uv);
    143
    144		writel_relaxed((win->ysrc << 16) | win->xsrc,
    145				(void __iomem *)&regs->v_size);
    146		writel_relaxed((win->ydst << 16) | win->xdst,
    147				(void __iomem *)&regs->v_size_z);
    148		writel_relaxed(win->ypos << 16 | win->xpos,
    149				(void __iomem *)&regs->v_start);
    150	} else {
    151		writel_relaxed(win->pitch[0], (void __iomem *)&regs->g_pitch);
    152
    153		writel_relaxed((win->ysrc << 16) | win->xsrc,
    154				(void __iomem *)&regs->g_size);
    155		writel_relaxed((win->ydst << 16) | win->xdst,
    156				(void __iomem *)&regs->g_size_z);
    157		writel_relaxed(win->ypos << 16 | win->xpos,
    158				(void __iomem *)&regs->g_start);
    159	}
    160
    161	dmafetch_set_fmt(overlay);
    162	mutex_unlock(&overlay->access_ok);
    163}
    164
    165static void dmafetch_onoff(struct mmp_overlay *overlay, int on)
    166{
    167	u32 mask = overlay_is_vid(overlay) ? CFG_DMA_ENA_MASK :
    168		   CFG_GRA_ENA_MASK;
    169	u32 enable = overlay_is_vid(overlay) ? CFG_DMA_ENA(1) : CFG_GRA_ENA(1);
    170	u32 tmp;
    171	struct mmp_path *path = overlay->path;
    172
    173	mutex_lock(&overlay->access_ok);
    174	tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
    175	tmp &= ~mask;
    176	tmp |= (on ? enable : 0);
    177	writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
    178	mutex_unlock(&overlay->access_ok);
    179}
    180
    181static void path_enabledisable(struct mmp_path *path, int on)
    182{
    183	u32 tmp;
    184	mutex_lock(&path->access_ok);
    185	tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
    186	if (on)
    187		tmp &= ~SCLK_DISABLE;
    188	else
    189		tmp |= SCLK_DISABLE;
    190	writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
    191	mutex_unlock(&path->access_ok);
    192}
    193
    194static void path_onoff(struct mmp_path *path, int on)
    195{
    196	if (path->status == on) {
    197		dev_info(path->dev, "path %s is already %s\n",
    198				path->name, stat_name(path->status));
    199		return;
    200	}
    201
    202	if (on) {
    203		path_enabledisable(path, 1);
    204
    205		if (path->panel && path->panel->set_onoff)
    206			path->panel->set_onoff(path->panel, 1);
    207	} else {
    208		if (path->panel && path->panel->set_onoff)
    209			path->panel->set_onoff(path->panel, 0);
    210
    211		path_enabledisable(path, 0);
    212	}
    213	path->status = on;
    214}
    215
    216static void overlay_set_onoff(struct mmp_overlay *overlay, int on)
    217{
    218	if (overlay->status == on) {
    219		dev_info(overlay_to_ctrl(overlay)->dev, "overlay %s is already %s\n",
    220			overlay->path->name, stat_name(overlay->status));
    221		return;
    222	}
    223	overlay->status = on;
    224	dmafetch_onoff(overlay, on);
    225	if (overlay->path->ops.check_status(overlay->path)
    226			!= overlay->path->status)
    227		path_onoff(overlay->path, on);
    228}
    229
    230static void overlay_set_fetch(struct mmp_overlay *overlay, int fetch_id)
    231{
    232	overlay->dmafetch_id = fetch_id;
    233}
    234
    235static int overlay_set_addr(struct mmp_overlay *overlay, struct mmp_addr *addr)
    236{
    237	struct lcd_regs *regs = path_regs(overlay->path);
    238
    239	/* FIXME: assert addr supported */
    240	memcpy(&overlay->addr, addr, sizeof(struct mmp_addr));
    241
    242	if (overlay_is_vid(overlay)) {
    243		writel_relaxed(addr->phys[0], (void __iomem *)&regs->v_y0);
    244		writel_relaxed(addr->phys[1], (void __iomem *)&regs->v_u0);
    245		writel_relaxed(addr->phys[2], (void __iomem *)&regs->v_v0);
    246	} else
    247		writel_relaxed(addr->phys[0], (void __iomem *)&regs->g_0);
    248
    249	return overlay->addr.phys[0];
    250}
    251
    252static void path_set_mode(struct mmp_path *path, struct mmp_mode *mode)
    253{
    254	struct lcd_regs *regs = path_regs(path);
    255	u32 total_x, total_y, vsync_ctrl, tmp, sclk_src, sclk_div,
    256		link_config = path_to_path_plat(path)->link_config,
    257		dsi_rbswap = path_to_path_plat(path)->link_config;
    258
    259	/* FIXME: assert videomode supported */
    260	memcpy(&path->mode, mode, sizeof(struct mmp_mode));
    261
    262	mutex_lock(&path->access_ok);
    263
    264	/* polarity of timing signals */
    265	tmp = readl_relaxed(ctrl_regs(path) + intf_ctrl(path->id)) & 0x1;
    266	tmp |= mode->vsync_invert ? 0 : 0x8;
    267	tmp |= mode->hsync_invert ? 0 : 0x4;
    268	tmp |= link_config & CFG_DUMBMODE_MASK;
    269	tmp |= CFG_DUMB_ENA(1);
    270	writel_relaxed(tmp, ctrl_regs(path) + intf_ctrl(path->id));
    271
    272	/* interface rb_swap setting */
    273	tmp = readl_relaxed(ctrl_regs(path) + intf_rbswap_ctrl(path->id)) &
    274		(~(CFG_INTFRBSWAP_MASK));
    275	tmp |= dsi_rbswap & CFG_INTFRBSWAP_MASK;
    276	writel_relaxed(tmp, ctrl_regs(path) + intf_rbswap_ctrl(path->id));
    277
    278	writel_relaxed((mode->yres << 16) | mode->xres,
    279		(void __iomem *)&regs->screen_active);
    280	writel_relaxed((mode->left_margin << 16) | mode->right_margin,
    281		(void __iomem *)&regs->screen_h_porch);
    282	writel_relaxed((mode->upper_margin << 16) | mode->lower_margin,
    283		(void __iomem *)&regs->screen_v_porch);
    284	total_x = mode->xres + mode->left_margin + mode->right_margin +
    285		mode->hsync_len;
    286	total_y = mode->yres + mode->upper_margin + mode->lower_margin +
    287		mode->vsync_len;
    288	writel_relaxed((total_y << 16) | total_x,
    289		(void __iomem *)&regs->screen_size);
    290
    291	/* vsync ctrl */
    292	if (path->output_type == PATH_OUT_DSI)
    293		vsync_ctrl = 0x01330133;
    294	else
    295		vsync_ctrl = ((mode->xres + mode->right_margin) << 16)
    296					| (mode->xres + mode->right_margin);
    297	writel_relaxed(vsync_ctrl, (void __iomem *)&regs->vsync_ctrl);
    298
    299	/* set pixclock div */
    300	sclk_src = clk_get_rate(path_to_ctrl(path)->clk);
    301	sclk_div = sclk_src / mode->pixclock_freq;
    302	if (sclk_div * mode->pixclock_freq < sclk_src)
    303		sclk_div++;
    304
    305	dev_info(path->dev, "%s sclk_src %d sclk_div 0x%x pclk %d\n",
    306			__func__, sclk_src, sclk_div, mode->pixclock_freq);
    307
    308	tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
    309	tmp &= ~CLK_INT_DIV_MASK;
    310	tmp |= sclk_div;
    311	writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
    312
    313	mutex_unlock(&path->access_ok);
    314}
    315
    316static struct mmp_overlay_ops mmphw_overlay_ops = {
    317	.set_fetch = overlay_set_fetch,
    318	.set_onoff = overlay_set_onoff,
    319	.set_win = overlay_set_win,
    320	.set_addr = overlay_set_addr,
    321};
    322
    323static void ctrl_set_default(struct mmphw_ctrl *ctrl)
    324{
    325	u32 tmp, irq_mask;
    326
    327	/*
    328	 * LCD Global control(LCD_TOP_CTRL) should be configed before
    329	 * any other LCD registers read/write, or there maybe issues.
    330	 */
    331	tmp = readl_relaxed(ctrl->reg_base + LCD_TOP_CTRL);
    332	tmp |= 0xfff0;
    333	writel_relaxed(tmp, ctrl->reg_base + LCD_TOP_CTRL);
    334
    335
    336	/* disable all interrupts */
    337	irq_mask = path_imasks(0) | err_imask(0) |
    338		   path_imasks(1) | err_imask(1);
    339	tmp = readl_relaxed(ctrl->reg_base + SPU_IRQ_ENA);
    340	tmp &= ~irq_mask;
    341	tmp |= irq_mask;
    342	writel_relaxed(tmp, ctrl->reg_base + SPU_IRQ_ENA);
    343}
    344
    345static void path_set_default(struct mmp_path *path)
    346{
    347	struct lcd_regs *regs = path_regs(path);
    348	u32 dma_ctrl1, mask, tmp, path_config;
    349
    350	path_config = path_to_path_plat(path)->path_config;
    351
    352	/* Configure IOPAD: should be parallel only */
    353	if (PATH_OUT_PARALLEL == path->output_type) {
    354		mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK;
    355		tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL);
    356		tmp &= ~mask;
    357		tmp |= path_config;
    358		writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL);
    359	}
    360
    361	/* Select path clock source */
    362	tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path));
    363	tmp &= ~SCLK_SRC_SEL_MASK;
    364	tmp |= path_config;
    365	writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path));
    366
    367	/*
    368	 * Configure default bits: vsync triggers DMA,
    369	 * power save enable, configure alpha registers to
    370	 * display 100% graphics, and set pixel command.
    371	 */
    372	dma_ctrl1 = 0x2032ff81;
    373
    374	dma_ctrl1 |= CFG_VSYNC_INV_MASK;
    375	writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id));
    376
    377	/* Configure default register values */
    378	writel_relaxed(0x00000000, (void __iomem *)&regs->blank_color);
    379	writel_relaxed(0x00000000, (void __iomem *)&regs->g_1);
    380	writel_relaxed(0x00000000, (void __iomem *)&regs->g_start);
    381
    382	/*
    383	 * 1.enable multiple burst request in DMA AXI
    384	 * bus arbiter for faster read if not tv path;
    385	 * 2.enable horizontal smooth filter;
    386	 */
    387	mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1);
    388	tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id));
    389	tmp |= mask;
    390	if (PATH_TV == path->id)
    391		tmp &= ~CFG_ARBFAST_ENA(1);
    392	writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id));
    393}
    394
    395static int path_init(struct mmphw_path_plat *path_plat,
    396		struct mmp_mach_path_config *config)
    397{
    398	struct mmphw_ctrl *ctrl = path_plat->ctrl;
    399	struct mmp_path_info *path_info;
    400	struct mmp_path *path = NULL;
    401
    402	dev_info(ctrl->dev, "%s: %s\n", __func__, config->name);
    403
    404	/* init driver data */
    405	path_info = kzalloc(sizeof(*path_info), GFP_KERNEL);
    406	if (!path_info)
    407		return 0;
    408
    409	path_info->name = config->name;
    410	path_info->id = path_plat->id;
    411	path_info->dev = ctrl->dev;
    412	path_info->overlay_num = config->overlay_num;
    413	path_info->overlay_ops = &mmphw_overlay_ops;
    414	path_info->set_mode = path_set_mode;
    415	path_info->plat_data = path_plat;
    416
    417	/* create/register platform device */
    418	path = mmp_register_path(path_info);
    419	if (!path) {
    420		kfree(path_info);
    421		return 0;
    422	}
    423	path_plat->path = path;
    424	path_plat->path_config = config->path_config;
    425	path_plat->link_config = config->link_config;
    426	path_plat->dsi_rbswap = config->dsi_rbswap;
    427	path_set_default(path);
    428
    429	kfree(path_info);
    430	return 1;
    431}
    432
    433static void path_deinit(struct mmphw_path_plat *path_plat)
    434{
    435	if (!path_plat)
    436		return;
    437
    438	mmp_unregister_path(path_plat->path);
    439}
    440
    441static int mmphw_probe(struct platform_device *pdev)
    442{
    443	struct mmp_mach_plat_info *mi;
    444	struct resource *res;
    445	int ret, i, irq;
    446	struct mmphw_path_plat *path_plat;
    447	struct mmphw_ctrl *ctrl = NULL;
    448
    449	/* get resources from platform data */
    450	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    451	if (res == NULL) {
    452		dev_err(&pdev->dev, "%s: no IO memory defined\n", __func__);
    453		ret = -ENOENT;
    454		goto failed;
    455	}
    456
    457	irq = platform_get_irq(pdev, 0);
    458	if (irq < 0) {
    459		ret = -ENOENT;
    460		goto failed;
    461	}
    462
    463	/* get configs from platform data */
    464	mi = pdev->dev.platform_data;
    465	if (mi == NULL || !mi->path_num || !mi->paths) {
    466		dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
    467		ret = -EINVAL;
    468		goto failed;
    469	}
    470
    471	/* allocate */
    472	ctrl = devm_kzalloc(&pdev->dev,
    473			    struct_size(ctrl, path_plats, mi->path_num),
    474			    GFP_KERNEL);
    475	if (!ctrl) {
    476		ret = -ENOMEM;
    477		goto failed;
    478	}
    479
    480	ctrl->name = mi->name;
    481	ctrl->path_num = mi->path_num;
    482	ctrl->dev = &pdev->dev;
    483	ctrl->irq = irq;
    484	platform_set_drvdata(pdev, ctrl);
    485	mutex_init(&ctrl->access_ok);
    486
    487	/* map registers.*/
    488	if (!devm_request_mem_region(ctrl->dev, res->start,
    489			resource_size(res), ctrl->name)) {
    490		dev_err(ctrl->dev,
    491			"can't request region for resource %pR\n", res);
    492		ret = -EINVAL;
    493		goto failed;
    494	}
    495
    496	ctrl->reg_base = devm_ioremap(ctrl->dev,
    497			res->start, resource_size(res));
    498	if (ctrl->reg_base == NULL) {
    499		dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
    500		ret = -ENOMEM;
    501		goto failed;
    502	}
    503
    504	/* request irq */
    505	ret = devm_request_irq(ctrl->dev, ctrl->irq, ctrl_handle_irq,
    506		IRQF_SHARED, "lcd_controller", ctrl);
    507	if (ret < 0) {
    508		dev_err(ctrl->dev, "%s unable to request IRQ %d\n",
    509				__func__, ctrl->irq);
    510		ret = -ENXIO;
    511		goto failed;
    512	}
    513
    514	/* get clock */
    515	ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
    516	if (IS_ERR(ctrl->clk)) {
    517		dev_err_probe(ctrl->dev, ret,
    518			      "unable to get clk %s\n", mi->clk_name);
    519		ret = -ENOENT;
    520		goto failed;
    521	}
    522	clk_prepare_enable(ctrl->clk);
    523
    524	/* init global regs */
    525	ctrl_set_default(ctrl);
    526
    527	/* init pathes from machine info and register them */
    528	for (i = 0; i < ctrl->path_num; i++) {
    529		/* get from config and machine info */
    530		path_plat = &ctrl->path_plats[i];
    531		path_plat->id = i;
    532		path_plat->ctrl = ctrl;
    533
    534		/* path init */
    535		if (!path_init(path_plat, &mi->paths[i])) {
    536			ret = -EINVAL;
    537			goto failed_path_init;
    538		}
    539	}
    540
    541#ifdef CONFIG_MMP_DISP_SPI
    542	ret = lcd_spi_register(ctrl);
    543	if (ret < 0)
    544		goto failed_path_init;
    545#endif
    546
    547	dev_info(ctrl->dev, "device init done\n");
    548
    549	return 0;
    550
    551failed_path_init:
    552	for (i = 0; i < ctrl->path_num; i++) {
    553		path_plat = &ctrl->path_plats[i];
    554		path_deinit(path_plat);
    555	}
    556
    557	clk_disable_unprepare(ctrl->clk);
    558failed:
    559	dev_err(&pdev->dev, "device init failed\n");
    560
    561	return ret;
    562}
    563
    564static struct platform_driver mmphw_driver = {
    565	.driver		= {
    566		.name	= "mmp-disp",
    567	},
    568	.probe		= mmphw_probe,
    569};
    570
    571static int mmphw_init(void)
    572{
    573	return platform_driver_register(&mmphw_driver);
    574}
    575module_init(mmphw_init);
    576
    577MODULE_AUTHOR("Li Guoqing<ligq@marvell.com>");
    578MODULE_DESCRIPTION("Framebuffer driver for mmp");
    579MODULE_LICENSE("GPL");