cal.c (32658B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * TI Camera Access Layer (CAL) - Driver 4 * 5 * Copyright (c) 2015-2020 Texas Instruments Inc. 6 * 7 * Authors: 8 * Benoit Parrot <bparrot@ti.com> 9 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 */ 11 12#include <linux/clk.h> 13#include <linux/interrupt.h> 14#include <linux/mfd/syscon.h> 15#include <linux/module.h> 16#include <linux/of_device.h> 17#include <linux/platform_device.h> 18#include <linux/pm_runtime.h> 19#include <linux/regmap.h> 20#include <linux/slab.h> 21#include <linux/videodev2.h> 22 23#include <media/media-device.h> 24#include <media/v4l2-async.h> 25#include <media/v4l2-common.h> 26#include <media/v4l2-device.h> 27#include <media/videobuf2-core.h> 28#include <media/videobuf2-dma-contig.h> 29 30#include "cal.h" 31#include "cal_regs.h" 32 33MODULE_DESCRIPTION("TI CAL driver"); 34MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>"); 35MODULE_LICENSE("GPL v2"); 36MODULE_VERSION("0.1.0"); 37 38int cal_video_nr = -1; 39module_param_named(video_nr, cal_video_nr, uint, 0644); 40MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); 41 42unsigned int cal_debug; 43module_param_named(debug, cal_debug, uint, 0644); 44MODULE_PARM_DESC(debug, "activates debug info"); 45 46#ifdef CONFIG_VIDEO_TI_CAL_MC 47#define CAL_MC_API_DEFAULT 1 48#else 49#define CAL_MC_API_DEFAULT 0 50#endif 51 52bool cal_mc_api = CAL_MC_API_DEFAULT; 53module_param_named(mc_api, cal_mc_api, bool, 0444); 54MODULE_PARM_DESC(mc_api, "activates the MC API"); 55 56/* ------------------------------------------------------------------ 57 * Format Handling 58 * ------------------------------------------------------------------ 59 */ 60 61const struct cal_format_info cal_formats[] = { 62 { 63 .fourcc = V4L2_PIX_FMT_YUYV, 64 .code = MEDIA_BUS_FMT_YUYV8_2X8, 65 .bpp = 16, 66 }, { 67 .fourcc = V4L2_PIX_FMT_UYVY, 68 .code = MEDIA_BUS_FMT_UYVY8_2X8, 69 .bpp = 16, 70 }, { 71 .fourcc = V4L2_PIX_FMT_YVYU, 72 .code = MEDIA_BUS_FMT_YVYU8_2X8, 73 .bpp = 16, 74 }, { 75 .fourcc = V4L2_PIX_FMT_VYUY, 76 .code = MEDIA_BUS_FMT_VYUY8_2X8, 77 .bpp = 16, 78 }, { 79 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 80 .code = MEDIA_BUS_FMT_RGB565_2X8_LE, 81 .bpp = 16, 82 }, { 83 .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ 84 .code = MEDIA_BUS_FMT_RGB565_2X8_BE, 85 .bpp = 16, 86 }, { 87 .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */ 88 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, 89 .bpp = 16, 90 }, { 91 .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */ 92 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, 93 .bpp = 16, 94 }, { 95 .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */ 96 .code = MEDIA_BUS_FMT_RGB888_2X12_LE, 97 .bpp = 24, 98 }, { 99 .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */ 100 .code = MEDIA_BUS_FMT_RGB888_2X12_BE, 101 .bpp = 24, 102 }, { 103 .fourcc = V4L2_PIX_FMT_RGB32, /* argb */ 104 .code = MEDIA_BUS_FMT_ARGB8888_1X32, 105 .bpp = 32, 106 }, { 107 .fourcc = V4L2_PIX_FMT_SBGGR8, 108 .code = MEDIA_BUS_FMT_SBGGR8_1X8, 109 .bpp = 8, 110 }, { 111 .fourcc = V4L2_PIX_FMT_SGBRG8, 112 .code = MEDIA_BUS_FMT_SGBRG8_1X8, 113 .bpp = 8, 114 }, { 115 .fourcc = V4L2_PIX_FMT_SGRBG8, 116 .code = MEDIA_BUS_FMT_SGRBG8_1X8, 117 .bpp = 8, 118 }, { 119 .fourcc = V4L2_PIX_FMT_SRGGB8, 120 .code = MEDIA_BUS_FMT_SRGGB8_1X8, 121 .bpp = 8, 122 }, { 123 .fourcc = V4L2_PIX_FMT_SBGGR10, 124 .code = MEDIA_BUS_FMT_SBGGR10_1X10, 125 .bpp = 10, 126 }, { 127 .fourcc = V4L2_PIX_FMT_SGBRG10, 128 .code = MEDIA_BUS_FMT_SGBRG10_1X10, 129 .bpp = 10, 130 }, { 131 .fourcc = V4L2_PIX_FMT_SGRBG10, 132 .code = MEDIA_BUS_FMT_SGRBG10_1X10, 133 .bpp = 10, 134 }, { 135 .fourcc = V4L2_PIX_FMT_SRGGB10, 136 .code = MEDIA_BUS_FMT_SRGGB10_1X10, 137 .bpp = 10, 138 }, { 139 .fourcc = V4L2_PIX_FMT_SBGGR12, 140 .code = MEDIA_BUS_FMT_SBGGR12_1X12, 141 .bpp = 12, 142 }, { 143 .fourcc = V4L2_PIX_FMT_SGBRG12, 144 .code = MEDIA_BUS_FMT_SGBRG12_1X12, 145 .bpp = 12, 146 }, { 147 .fourcc = V4L2_PIX_FMT_SGRBG12, 148 .code = MEDIA_BUS_FMT_SGRBG12_1X12, 149 .bpp = 12, 150 }, { 151 .fourcc = V4L2_PIX_FMT_SRGGB12, 152 .code = MEDIA_BUS_FMT_SRGGB12_1X12, 153 .bpp = 12, 154 }, 155}; 156 157const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats); 158 159const struct cal_format_info *cal_format_by_fourcc(u32 fourcc) 160{ 161 unsigned int i; 162 163 for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { 164 if (cal_formats[i].fourcc == fourcc) 165 return &cal_formats[i]; 166 } 167 168 return NULL; 169} 170 171const struct cal_format_info *cal_format_by_code(u32 code) 172{ 173 unsigned int i; 174 175 for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { 176 if (cal_formats[i].code == code) 177 return &cal_formats[i]; 178 } 179 180 return NULL; 181} 182 183/* ------------------------------------------------------------------ 184 * Platform Data 185 * ------------------------------------------------------------------ 186 */ 187 188static const struct cal_camerarx_data dra72x_cal_camerarx[] = { 189 { 190 .fields = { 191 [F_CTRLCLKEN] = { 10, 10 }, 192 [F_CAMMODE] = { 11, 12 }, 193 [F_LANEENABLE] = { 13, 16 }, 194 [F_CSI_MODE] = { 17, 17 }, 195 }, 196 .num_lanes = 4, 197 }, 198 { 199 .fields = { 200 [F_CTRLCLKEN] = { 0, 0 }, 201 [F_CAMMODE] = { 1, 2 }, 202 [F_LANEENABLE] = { 3, 4 }, 203 [F_CSI_MODE] = { 5, 5 }, 204 }, 205 .num_lanes = 2, 206 }, 207}; 208 209static const struct cal_data dra72x_cal_data = { 210 .camerarx = dra72x_cal_camerarx, 211 .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), 212}; 213 214static const struct cal_data dra72x_es1_cal_data = { 215 .camerarx = dra72x_cal_camerarx, 216 .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), 217 .flags = DRA72_CAL_PRE_ES2_LDO_DISABLE, 218}; 219 220static const struct cal_camerarx_data dra76x_cal_csi_phy[] = { 221 { 222 .fields = { 223 [F_CTRLCLKEN] = { 8, 8 }, 224 [F_CAMMODE] = { 9, 10 }, 225 [F_CSI_MODE] = { 11, 11 }, 226 [F_LANEENABLE] = { 27, 31 }, 227 }, 228 .num_lanes = 5, 229 }, 230 { 231 .fields = { 232 [F_CTRLCLKEN] = { 0, 0 }, 233 [F_CAMMODE] = { 1, 2 }, 234 [F_CSI_MODE] = { 3, 3 }, 235 [F_LANEENABLE] = { 24, 26 }, 236 }, 237 .num_lanes = 3, 238 }, 239}; 240 241static const struct cal_data dra76x_cal_data = { 242 .camerarx = dra76x_cal_csi_phy, 243 .num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy), 244}; 245 246static const struct cal_camerarx_data am654_cal_csi_phy[] = { 247 { 248 .fields = { 249 [F_CTRLCLKEN] = { 15, 15 }, 250 [F_CAMMODE] = { 24, 25 }, 251 [F_LANEENABLE] = { 0, 4 }, 252 }, 253 .num_lanes = 5, 254 }, 255}; 256 257static const struct cal_data am654_cal_data = { 258 .camerarx = am654_cal_csi_phy, 259 .num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy), 260}; 261 262/* ------------------------------------------------------------------ 263 * I/O Register Accessors 264 * ------------------------------------------------------------------ 265 */ 266 267void cal_quickdump_regs(struct cal_dev *cal) 268{ 269 unsigned int i; 270 271 cal_info(cal, "CAL Registers @ 0x%pa:\n", &cal->res->start); 272 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, 273 (__force const void *)cal->base, 274 resource_size(cal->res), false); 275 276 for (i = 0; i < cal->data->num_csi2_phy; ++i) { 277 struct cal_camerarx *phy = cal->phy[i]; 278 279 cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i, 280 &phy->res->start); 281 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, 282 (__force const void *)phy->base, 283 resource_size(phy->res), 284 false); 285 } 286} 287 288/* ------------------------------------------------------------------ 289 * Context Management 290 * ------------------------------------------------------------------ 291 */ 292 293#define CAL_MAX_PIX_PROC 4 294 295static int cal_reserve_pix_proc(struct cal_dev *cal) 296{ 297 unsigned long ret; 298 299 spin_lock(&cal->v4l2_dev.lock); 300 301 ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC); 302 303 if (ret == CAL_MAX_PIX_PROC) { 304 spin_unlock(&cal->v4l2_dev.lock); 305 return -ENOSPC; 306 } 307 308 cal->reserved_pix_proc_mask |= BIT(ret); 309 310 spin_unlock(&cal->v4l2_dev.lock); 311 312 return ret; 313} 314 315static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num) 316{ 317 spin_lock(&cal->v4l2_dev.lock); 318 319 cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num); 320 321 spin_unlock(&cal->v4l2_dev.lock); 322} 323 324static void cal_ctx_csi2_config(struct cal_ctx *ctx) 325{ 326 u32 val; 327 328 val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)); 329 cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK); 330 /* 331 * DT type: MIPI CSI-2 Specs 332 * 0x1: All - DT filter is disabled 333 * 0x24: RGB888 1 pixel = 3 bytes 334 * 0x2B: RAW10 4 pixels = 5 bytes 335 * 0x2A: RAW8 1 pixel = 1 byte 336 * 0x1E: YUV422 2 pixels = 4 bytes 337 */ 338 cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK); 339 cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK); 340 cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK); 341 cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK); 342 cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE, 343 CAL_CSI2_CTX_PACK_MODE_MASK); 344 cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val); 345 ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n", 346 ctx->phy->instance, ctx->csi2_ctx, 347 cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx))); 348} 349 350static void cal_ctx_pix_proc_config(struct cal_ctx *ctx) 351{ 352 u32 val, extract, pack; 353 354 switch (ctx->fmtinfo->bpp) { 355 case 8: 356 extract = CAL_PIX_PROC_EXTRACT_B8; 357 pack = CAL_PIX_PROC_PACK_B8; 358 break; 359 case 10: 360 extract = CAL_PIX_PROC_EXTRACT_B10_MIPI; 361 pack = CAL_PIX_PROC_PACK_B16; 362 break; 363 case 12: 364 extract = CAL_PIX_PROC_EXTRACT_B12_MIPI; 365 pack = CAL_PIX_PROC_PACK_B16; 366 break; 367 case 16: 368 extract = CAL_PIX_PROC_EXTRACT_B16_LE; 369 pack = CAL_PIX_PROC_PACK_B16; 370 break; 371 default: 372 /* 373 * If you see this warning then it means that you added 374 * some new entry in the cal_formats[] array with a different 375 * bit per pixel values then the one supported below. 376 * Either add support for the new bpp value below or adjust 377 * the new entry to use one of the value below. 378 * 379 * Instead of failing here just use 8 bpp as a default. 380 */ 381 dev_warn_once(ctx->cal->dev, 382 "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n", 383 __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp); 384 extract = CAL_PIX_PROC_EXTRACT_B8; 385 pack = CAL_PIX_PROC_PACK_B8; 386 break; 387 } 388 389 val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)); 390 cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK); 391 cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK); 392 cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK); 393 cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK); 394 cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK); 395 cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK); 396 cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val); 397 ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc, 398 cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc))); 399} 400 401static void cal_ctx_wr_dma_config(struct cal_ctx *ctx) 402{ 403 unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline; 404 u32 val; 405 406 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); 407 cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK); 408 cal_set_field(&val, ctx->v_fmt.fmt.pix.height, 409 CAL_WR_DMA_CTRL_YSIZE_MASK); 410 cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, 411 CAL_WR_DMA_CTRL_DTAG_MASK); 412 cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR, 413 CAL_WR_DMA_CTRL_PATTERN_MASK); 414 cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK); 415 cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); 416 ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx, 417 cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx))); 418 419 cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx), 420 stride / 16, CAL_WR_DMA_OFST_MASK); 421 ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx, 422 cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx))); 423 424 val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)); 425 /* 64 bit word means no skipping */ 426 cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); 427 /* 428 * The XSIZE field is expressed in 64-bit units and prevents overflows 429 * in case of synchronization issues by limiting the number of bytes 430 * written per line. 431 */ 432 cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK); 433 cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val); 434 ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx, 435 cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx))); 436} 437 438void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr) 439{ 440 cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr); 441} 442 443static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx) 444{ 445 u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); 446 447 cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, 448 CAL_WR_DMA_CTRL_MODE_MASK); 449 cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); 450} 451 452static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx) 453{ 454 u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); 455 456 cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS, 457 CAL_WR_DMA_CTRL_MODE_MASK); 458 cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); 459} 460 461static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx) 462{ 463 bool stopped; 464 465 spin_lock_irq(&ctx->dma.lock); 466 stopped = ctx->dma.state == CAL_DMA_STOPPED; 467 spin_unlock_irq(&ctx->dma.lock); 468 469 return stopped; 470} 471 472static int 473cal_get_remote_frame_desc_entry(struct cal_camerarx *phy, 474 struct v4l2_mbus_frame_desc_entry *entry) 475{ 476 struct v4l2_mbus_frame_desc fd; 477 int ret; 478 479 ret = cal_camerarx_get_remote_frame_desc(phy, &fd); 480 if (ret) { 481 if (ret != -ENOIOCTLCMD) 482 dev_err(phy->cal->dev, 483 "Failed to get remote frame desc: %d\n", ret); 484 return ret; 485 } 486 487 if (fd.num_entries == 0) { 488 dev_err(phy->cal->dev, 489 "No streams found in the remote frame descriptor\n"); 490 491 return -ENODEV; 492 } 493 494 if (fd.num_entries > 1) 495 dev_dbg(phy->cal->dev, 496 "Multiple streams not supported in remote frame descriptor, using the first one\n"); 497 498 *entry = fd.entry[0]; 499 500 return 0; 501} 502 503int cal_ctx_prepare(struct cal_ctx *ctx) 504{ 505 struct v4l2_mbus_frame_desc_entry entry; 506 int ret; 507 508 ret = cal_get_remote_frame_desc_entry(ctx->phy, &entry); 509 510 if (ret == -ENOIOCTLCMD) { 511 ctx->vc = 0; 512 ctx->datatype = CAL_CSI2_CTX_DT_ANY; 513 } else if (!ret) { 514 ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n", 515 entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt); 516 517 ctx->vc = entry.bus.csi2.vc; 518 ctx->datatype = entry.bus.csi2.dt; 519 } else { 520 return ret; 521 } 522 523 ctx->use_pix_proc = !ctx->fmtinfo->meta; 524 525 if (ctx->use_pix_proc) { 526 ret = cal_reserve_pix_proc(ctx->cal); 527 if (ret < 0) { 528 ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret); 529 return ret; 530 } 531 532 ctx->pix_proc = ret; 533 } 534 535 return 0; 536} 537 538void cal_ctx_unprepare(struct cal_ctx *ctx) 539{ 540 if (ctx->use_pix_proc) 541 cal_release_pix_proc(ctx->cal, ctx->pix_proc); 542} 543 544void cal_ctx_start(struct cal_ctx *ctx) 545{ 546 ctx->sequence = 0; 547 ctx->dma.state = CAL_DMA_RUNNING; 548 549 /* Configure the CSI-2, pixel processing and write DMA contexts. */ 550 cal_ctx_csi2_config(ctx); 551 if (ctx->use_pix_proc) 552 cal_ctx_pix_proc_config(ctx); 553 cal_ctx_wr_dma_config(ctx); 554 555 /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */ 556 cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1), 557 CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); 558 cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2), 559 CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); 560 561 cal_ctx_wr_dma_enable(ctx); 562} 563 564void cal_ctx_stop(struct cal_ctx *ctx) 565{ 566 long timeout; 567 568 /* 569 * Request DMA stop and wait until it completes. If completion times 570 * out, forcefully disable the DMA. 571 */ 572 spin_lock_irq(&ctx->dma.lock); 573 ctx->dma.state = CAL_DMA_STOP_REQUESTED; 574 spin_unlock_irq(&ctx->dma.lock); 575 576 timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), 577 msecs_to_jiffies(500)); 578 if (!timeout) { 579 ctx_err(ctx, "failed to disable dma cleanly\n"); 580 cal_ctx_wr_dma_disable(ctx); 581 } 582 583 /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */ 584 cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1), 585 CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); 586 cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2), 587 CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); 588 589 ctx->dma.state = CAL_DMA_STOPPED; 590 591 /* Disable CSI2 context */ 592 cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0); 593 594 /* Disable pix proc */ 595 if (ctx->use_pix_proc) 596 cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0); 597} 598 599/* ------------------------------------------------------------------ 600 * IRQ Handling 601 * ------------------------------------------------------------------ 602 */ 603 604static inline void cal_irq_wdma_start(struct cal_ctx *ctx) 605{ 606 spin_lock(&ctx->dma.lock); 607 608 if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) { 609 /* 610 * If a stop is requested, disable the write DMA context 611 * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed, 612 * the current frame will complete and the DMA will then stop. 613 */ 614 cal_ctx_wr_dma_disable(ctx); 615 ctx->dma.state = CAL_DMA_STOP_PENDING; 616 } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) { 617 /* 618 * Otherwise, if a new buffer is available, queue it to the 619 * hardware. 620 */ 621 struct cal_buffer *buf; 622 dma_addr_t addr; 623 624 buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, 625 list); 626 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); 627 cal_ctx_set_dma_addr(ctx, addr); 628 629 ctx->dma.pending = buf; 630 list_del(&buf->list); 631 } 632 633 spin_unlock(&ctx->dma.lock); 634} 635 636static inline void cal_irq_wdma_end(struct cal_ctx *ctx) 637{ 638 struct cal_buffer *buf = NULL; 639 640 spin_lock(&ctx->dma.lock); 641 642 /* If the DMA context was stopping, it is now stopped. */ 643 if (ctx->dma.state == CAL_DMA_STOP_PENDING) { 644 ctx->dma.state = CAL_DMA_STOPPED; 645 wake_up(&ctx->dma.wait); 646 } 647 648 /* If a new buffer was queued, complete the current buffer. */ 649 if (ctx->dma.pending) { 650 buf = ctx->dma.active; 651 ctx->dma.active = ctx->dma.pending; 652 ctx->dma.pending = NULL; 653 } 654 655 spin_unlock(&ctx->dma.lock); 656 657 if (buf) { 658 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 659 buf->vb.field = ctx->v_fmt.fmt.pix.field; 660 buf->vb.sequence = ctx->sequence++; 661 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 662 } 663} 664 665static irqreturn_t cal_irq(int irq_cal, void *data) 666{ 667 struct cal_dev *cal = data; 668 u32 status; 669 670 status = cal_read(cal, CAL_HL_IRQSTATUS(0)); 671 if (status) { 672 unsigned int i; 673 674 cal_write(cal, CAL_HL_IRQSTATUS(0), status); 675 676 if (status & CAL_HL_IRQ_OCPO_ERR_MASK) 677 dev_err_ratelimited(cal->dev, "OCPO ERROR\n"); 678 679 for (i = 0; i < cal->data->num_csi2_phy; ++i) { 680 if (status & CAL_HL_IRQ_CIO_MASK(i)) { 681 u32 cio_stat = cal_read(cal, 682 CAL_CSI2_COMPLEXIO_IRQSTATUS(i)); 683 684 dev_err_ratelimited(cal->dev, 685 "CIO%u error: %#08x\n", i, cio_stat); 686 687 cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i), 688 cio_stat); 689 } 690 691 if (status & CAL_HL_IRQ_VC_MASK(i)) { 692 u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i)); 693 694 dev_err_ratelimited(cal->dev, 695 "CIO%u VC error: %#08x\n", 696 i, vc_stat); 697 698 cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat); 699 } 700 } 701 } 702 703 /* Check which DMA just finished */ 704 status = cal_read(cal, CAL_HL_IRQSTATUS(1)); 705 if (status) { 706 unsigned int i; 707 708 /* Clear Interrupt status */ 709 cal_write(cal, CAL_HL_IRQSTATUS(1), status); 710 711 for (i = 0; i < cal->num_contexts; ++i) { 712 if (status & CAL_HL_IRQ_WDMA_END_MASK(i)) 713 cal_irq_wdma_end(cal->ctx[i]); 714 } 715 } 716 717 /* Check which DMA just started */ 718 status = cal_read(cal, CAL_HL_IRQSTATUS(2)); 719 if (status) { 720 unsigned int i; 721 722 /* Clear Interrupt status */ 723 cal_write(cal, CAL_HL_IRQSTATUS(2), status); 724 725 for (i = 0; i < cal->num_contexts; ++i) { 726 if (status & CAL_HL_IRQ_WDMA_START_MASK(i)) 727 cal_irq_wdma_start(cal->ctx[i]); 728 } 729 } 730 731 return IRQ_HANDLED; 732} 733 734/* ------------------------------------------------------------------ 735 * Asynchronous V4L2 subdev binding 736 * ------------------------------------------------------------------ 737 */ 738 739struct cal_v4l2_async_subdev { 740 struct v4l2_async_subdev asd; /* Must be first */ 741 struct cal_camerarx *phy; 742}; 743 744static inline struct cal_v4l2_async_subdev * 745to_cal_asd(struct v4l2_async_subdev *asd) 746{ 747 return container_of(asd, struct cal_v4l2_async_subdev, asd); 748} 749 750static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier, 751 struct v4l2_subdev *subdev, 752 struct v4l2_async_subdev *asd) 753{ 754 struct cal_camerarx *phy = to_cal_asd(asd)->phy; 755 int pad; 756 int ret; 757 758 if (phy->source) { 759 phy_info(phy, "Rejecting subdev %s (Already set!!)", 760 subdev->name); 761 return 0; 762 } 763 764 phy->source = subdev; 765 phy_dbg(1, phy, "Using source %s for capture\n", subdev->name); 766 767 pad = media_entity_get_fwnode_pad(&subdev->entity, 768 of_fwnode_handle(phy->source_ep_node), 769 MEDIA_PAD_FL_SOURCE); 770 if (pad < 0) { 771 phy_err(phy, "Source %s has no connected source pad\n", 772 subdev->name); 773 return pad; 774 } 775 776 ret = media_create_pad_link(&subdev->entity, pad, 777 &phy->subdev.entity, CAL_CAMERARX_PAD_SINK, 778 MEDIA_LNK_FL_IMMUTABLE | 779 MEDIA_LNK_FL_ENABLED); 780 if (ret) { 781 phy_err(phy, "Failed to create media link for source %s\n", 782 subdev->name); 783 return ret; 784 } 785 786 return 0; 787} 788 789static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier) 790{ 791 struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier); 792 unsigned int i; 793 int ret; 794 795 for (i = 0; i < cal->num_contexts; ++i) { 796 ret = cal_ctx_v4l2_register(cal->ctx[i]); 797 if (ret) 798 goto err_ctx_unreg; 799 } 800 801 if (!cal_mc_api) 802 return 0; 803 804 ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev); 805 if (ret) 806 goto err_ctx_unreg; 807 808 return 0; 809 810err_ctx_unreg: 811 for (; i > 0; --i) { 812 if (!cal->ctx[i - 1]) 813 continue; 814 815 cal_ctx_v4l2_unregister(cal->ctx[i - 1]); 816 } 817 818 return ret; 819} 820 821static const struct v4l2_async_notifier_operations cal_async_notifier_ops = { 822 .bound = cal_async_notifier_bound, 823 .complete = cal_async_notifier_complete, 824}; 825 826static int cal_async_notifier_register(struct cal_dev *cal) 827{ 828 unsigned int i; 829 int ret; 830 831 v4l2_async_nf_init(&cal->notifier); 832 cal->notifier.ops = &cal_async_notifier_ops; 833 834 for (i = 0; i < cal->data->num_csi2_phy; ++i) { 835 struct cal_camerarx *phy = cal->phy[i]; 836 struct cal_v4l2_async_subdev *casd; 837 struct fwnode_handle *fwnode; 838 839 if (!phy->source_node) 840 continue; 841 842 fwnode = of_fwnode_handle(phy->source_node); 843 casd = v4l2_async_nf_add_fwnode(&cal->notifier, 844 fwnode, 845 struct cal_v4l2_async_subdev); 846 if (IS_ERR(casd)) { 847 phy_err(phy, "Failed to add subdev to notifier\n"); 848 ret = PTR_ERR(casd); 849 goto error; 850 } 851 852 casd->phy = phy; 853 } 854 855 ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier); 856 if (ret) { 857 cal_err(cal, "Error registering async notifier\n"); 858 goto error; 859 } 860 861 return 0; 862 863error: 864 v4l2_async_nf_cleanup(&cal->notifier); 865 return ret; 866} 867 868static void cal_async_notifier_unregister(struct cal_dev *cal) 869{ 870 v4l2_async_nf_unregister(&cal->notifier); 871 v4l2_async_nf_cleanup(&cal->notifier); 872} 873 874/* ------------------------------------------------------------------ 875 * Media and V4L2 device handling 876 * ------------------------------------------------------------------ 877 */ 878 879/* 880 * Register user-facing devices. To be called at the end of the probe function 881 * when all resources are initialized and ready. 882 */ 883static int cal_media_register(struct cal_dev *cal) 884{ 885 int ret; 886 887 ret = media_device_register(&cal->mdev); 888 if (ret) { 889 cal_err(cal, "Failed to register media device\n"); 890 return ret; 891 } 892 893 /* 894 * Register the async notifier. This may trigger registration of the 895 * V4L2 video devices if all subdevs are ready. 896 */ 897 ret = cal_async_notifier_register(cal); 898 if (ret) { 899 media_device_unregister(&cal->mdev); 900 return ret; 901 } 902 903 return 0; 904} 905 906/* 907 * Unregister the user-facing devices, but don't free memory yet. To be called 908 * at the beginning of the remove function, to disallow access from userspace. 909 */ 910static void cal_media_unregister(struct cal_dev *cal) 911{ 912 unsigned int i; 913 914 /* Unregister all the V4L2 video devices. */ 915 for (i = 0; i < cal->num_contexts; i++) 916 cal_ctx_v4l2_unregister(cal->ctx[i]); 917 918 cal_async_notifier_unregister(cal); 919 media_device_unregister(&cal->mdev); 920} 921 922/* 923 * Initialize the in-kernel objects. To be called at the beginning of the probe 924 * function, before the V4L2 device is used by the driver. 925 */ 926static int cal_media_init(struct cal_dev *cal) 927{ 928 struct media_device *mdev = &cal->mdev; 929 int ret; 930 931 mdev->dev = cal->dev; 932 mdev->hw_revision = cal->revision; 933 strscpy(mdev->model, "CAL", sizeof(mdev->model)); 934 media_device_init(mdev); 935 936 /* 937 * Initialize the V4L2 device (despite the function name, this performs 938 * initialization, not registration). 939 */ 940 cal->v4l2_dev.mdev = mdev; 941 ret = v4l2_device_register(cal->dev, &cal->v4l2_dev); 942 if (ret) { 943 cal_err(cal, "Failed to register V4L2 device\n"); 944 return ret; 945 } 946 947 vb2_dma_contig_set_max_seg_size(cal->dev, DMA_BIT_MASK(32)); 948 949 return 0; 950} 951 952/* 953 * Cleanup the in-kernel objects, freeing memory. To be called at the very end 954 * of the remove sequence, when nothing (including userspace) can access the 955 * objects anymore. 956 */ 957static void cal_media_cleanup(struct cal_dev *cal) 958{ 959 v4l2_device_unregister(&cal->v4l2_dev); 960 media_device_cleanup(&cal->mdev); 961 962 vb2_dma_contig_clear_max_seg_size(cal->dev); 963} 964 965/* ------------------------------------------------------------------ 966 * Initialization and module stuff 967 * ------------------------------------------------------------------ 968 */ 969 970static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst) 971{ 972 struct cal_ctx *ctx; 973 int ret; 974 975 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 976 if (!ctx) 977 return NULL; 978 979 ctx->cal = cal; 980 ctx->phy = cal->phy[inst]; 981 ctx->dma_ctx = inst; 982 ctx->csi2_ctx = inst; 983 ctx->cport = inst; 984 985 ret = cal_ctx_v4l2_init(ctx); 986 if (ret) 987 return NULL; 988 989 return ctx; 990} 991 992static void cal_ctx_destroy(struct cal_ctx *ctx) 993{ 994 cal_ctx_v4l2_cleanup(ctx); 995 996 kfree(ctx); 997} 998 999static const struct of_device_id cal_of_match[] = { 1000 { 1001 .compatible = "ti,dra72-cal", 1002 .data = (void *)&dra72x_cal_data, 1003 }, 1004 { 1005 .compatible = "ti,dra72-pre-es2-cal", 1006 .data = (void *)&dra72x_es1_cal_data, 1007 }, 1008 { 1009 .compatible = "ti,dra76-cal", 1010 .data = (void *)&dra76x_cal_data, 1011 }, 1012 { 1013 .compatible = "ti,am654-cal", 1014 .data = (void *)&am654_cal_data, 1015 }, 1016 {}, 1017}; 1018MODULE_DEVICE_TABLE(of, cal_of_match); 1019 1020/* Get hardware revision and info. */ 1021 1022#define CAL_HL_HWINFO_VALUE 0xa3c90469 1023 1024static void cal_get_hwinfo(struct cal_dev *cal) 1025{ 1026 u32 hwinfo; 1027 1028 cal->revision = cal_read(cal, CAL_HL_REVISION); 1029 switch (FIELD_GET(CAL_HL_REVISION_SCHEME_MASK, cal->revision)) { 1030 case CAL_HL_REVISION_SCHEME_H08: 1031 cal_dbg(3, cal, "CAL HW revision %lu.%lu.%lu (0x%08x)\n", 1032 FIELD_GET(CAL_HL_REVISION_MAJOR_MASK, cal->revision), 1033 FIELD_GET(CAL_HL_REVISION_MINOR_MASK, cal->revision), 1034 FIELD_GET(CAL_HL_REVISION_RTL_MASK, cal->revision), 1035 cal->revision); 1036 break; 1037 1038 case CAL_HL_REVISION_SCHEME_LEGACY: 1039 default: 1040 cal_info(cal, "Unexpected CAL HW revision 0x%08x\n", 1041 cal->revision); 1042 break; 1043 } 1044 1045 hwinfo = cal_read(cal, CAL_HL_HWINFO); 1046 if (hwinfo != CAL_HL_HWINFO_VALUE) 1047 cal_info(cal, "CAL_HL_HWINFO = 0x%08x, expected 0x%08x\n", 1048 hwinfo, CAL_HL_HWINFO_VALUE); 1049} 1050 1051static int cal_init_camerarx_regmap(struct cal_dev *cal) 1052{ 1053 struct platform_device *pdev = to_platform_device(cal->dev); 1054 struct device_node *np = cal->dev->of_node; 1055 struct regmap_config config = { }; 1056 struct regmap *syscon; 1057 struct resource *res; 1058 unsigned int offset; 1059 void __iomem *base; 1060 1061 syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,camerrx-control", 1062 1, &offset); 1063 if (!IS_ERR(syscon)) { 1064 cal->syscon_camerrx = syscon; 1065 cal->syscon_camerrx_offset = offset; 1066 return 0; 1067 } 1068 1069 dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n", 1070 PTR_ERR(syscon)); 1071 1072 /* 1073 * Backward DTS compatibility. If syscon entry is not present then 1074 * check if the camerrx_control resource is present. 1075 */ 1076 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1077 "camerrx_control"); 1078 base = devm_ioremap_resource(cal->dev, res); 1079 if (IS_ERR(base)) { 1080 cal_err(cal, "failed to ioremap camerrx_control\n"); 1081 return PTR_ERR(base); 1082 } 1083 1084 cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", 1085 res->name, &res->start, &res->end); 1086 1087 config.reg_bits = 32; 1088 config.reg_stride = 4; 1089 config.val_bits = 32; 1090 config.max_register = resource_size(res) - 4; 1091 1092 syscon = regmap_init_mmio(NULL, base, &config); 1093 if (IS_ERR(syscon)) { 1094 pr_err("regmap init failed\n"); 1095 return PTR_ERR(syscon); 1096 } 1097 1098 /* 1099 * In this case the base already point to the direct CM register so no 1100 * need for an offset. 1101 */ 1102 cal->syscon_camerrx = syscon; 1103 cal->syscon_camerrx_offset = 0; 1104 1105 return 0; 1106} 1107 1108static int cal_probe(struct platform_device *pdev) 1109{ 1110 struct cal_dev *cal; 1111 bool connected = false; 1112 unsigned int i; 1113 int ret; 1114 int irq; 1115 1116 cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL); 1117 if (!cal) 1118 return -ENOMEM; 1119 1120 cal->data = of_device_get_match_data(&pdev->dev); 1121 if (!cal->data) { 1122 dev_err(&pdev->dev, "Could not get feature data based on compatible version\n"); 1123 return -ENODEV; 1124 } 1125 1126 cal->dev = &pdev->dev; 1127 platform_set_drvdata(pdev, cal); 1128 1129 /* Acquire resources: clocks, CAMERARX regmap, I/O memory and IRQ. */ 1130 cal->fclk = devm_clk_get(&pdev->dev, "fck"); 1131 if (IS_ERR(cal->fclk)) { 1132 dev_err(&pdev->dev, "cannot get CAL fclk\n"); 1133 return PTR_ERR(cal->fclk); 1134 } 1135 1136 ret = cal_init_camerarx_regmap(cal); 1137 if (ret < 0) 1138 return ret; 1139 1140 cal->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1141 "cal_top"); 1142 cal->base = devm_ioremap_resource(&pdev->dev, cal->res); 1143 if (IS_ERR(cal->base)) 1144 return PTR_ERR(cal->base); 1145 1146 cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", 1147 cal->res->name, &cal->res->start, &cal->res->end); 1148 1149 irq = platform_get_irq(pdev, 0); 1150 cal_dbg(1, cal, "got irq# %d\n", irq); 1151 ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME, 1152 cal); 1153 if (ret) 1154 return ret; 1155 1156 /* Read the revision and hardware info to verify hardware access. */ 1157 pm_runtime_enable(&pdev->dev); 1158 ret = pm_runtime_resume_and_get(&pdev->dev); 1159 if (ret) 1160 goto error_pm_runtime; 1161 1162 cal_get_hwinfo(cal); 1163 pm_runtime_put_sync(&pdev->dev); 1164 1165 /* Initialize the media device. */ 1166 ret = cal_media_init(cal); 1167 if (ret < 0) 1168 goto error_pm_runtime; 1169 1170 /* Create CAMERARX PHYs. */ 1171 for (i = 0; i < cal->data->num_csi2_phy; ++i) { 1172 cal->phy[i] = cal_camerarx_create(cal, i); 1173 if (IS_ERR(cal->phy[i])) { 1174 ret = PTR_ERR(cal->phy[i]); 1175 cal->phy[i] = NULL; 1176 goto error_camerarx; 1177 } 1178 1179 if (cal->phy[i]->source_node) 1180 connected = true; 1181 } 1182 1183 if (!connected) { 1184 cal_err(cal, "Neither port is configured, no point in staying up\n"); 1185 ret = -ENODEV; 1186 goto error_camerarx; 1187 } 1188 1189 /* Create contexts. */ 1190 for (i = 0; i < cal->data->num_csi2_phy; ++i) { 1191 if (!cal->phy[i]->source_node) 1192 continue; 1193 1194 cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i); 1195 if (!cal->ctx[cal->num_contexts]) { 1196 cal_err(cal, "Failed to create context %u\n", cal->num_contexts); 1197 ret = -ENODEV; 1198 goto error_context; 1199 } 1200 1201 cal->num_contexts++; 1202 } 1203 1204 /* Register the media device. */ 1205 ret = cal_media_register(cal); 1206 if (ret) 1207 goto error_context; 1208 1209 return 0; 1210 1211error_context: 1212 for (i = 0; i < cal->num_contexts; i++) 1213 cal_ctx_destroy(cal->ctx[i]); 1214 1215error_camerarx: 1216 for (i = 0; i < cal->data->num_csi2_phy; i++) 1217 cal_camerarx_destroy(cal->phy[i]); 1218 1219 cal_media_cleanup(cal); 1220 1221error_pm_runtime: 1222 pm_runtime_disable(&pdev->dev); 1223 1224 return ret; 1225} 1226 1227static int cal_remove(struct platform_device *pdev) 1228{ 1229 struct cal_dev *cal = platform_get_drvdata(pdev); 1230 unsigned int i; 1231 int ret; 1232 1233 cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME); 1234 1235 ret = pm_runtime_resume_and_get(&pdev->dev); 1236 1237 cal_media_unregister(cal); 1238 1239 for (i = 0; i < cal->data->num_csi2_phy; i++) 1240 cal_camerarx_disable(cal->phy[i]); 1241 1242 for (i = 0; i < cal->num_contexts; i++) 1243 cal_ctx_destroy(cal->ctx[i]); 1244 1245 for (i = 0; i < cal->data->num_csi2_phy; i++) 1246 cal_camerarx_destroy(cal->phy[i]); 1247 1248 cal_media_cleanup(cal); 1249 1250 if (ret >= 0) 1251 pm_runtime_put_sync(&pdev->dev); 1252 pm_runtime_disable(&pdev->dev); 1253 1254 return 0; 1255} 1256 1257static int cal_runtime_resume(struct device *dev) 1258{ 1259 struct cal_dev *cal = dev_get_drvdata(dev); 1260 unsigned int i; 1261 u32 val; 1262 1263 if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) { 1264 /* 1265 * Apply errata on both port everytime we (re-)enable 1266 * the clock 1267 */ 1268 for (i = 0; i < cal->data->num_csi2_phy; i++) 1269 cal_camerarx_i913_errata(cal->phy[i]); 1270 } 1271 1272 /* 1273 * Enable global interrupts that are not related to a particular 1274 * CAMERARAX or context. 1275 */ 1276 cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK); 1277 1278 val = cal_read(cal, CAL_CTRL); 1279 cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, 1280 CAL_CTRL_BURSTSIZE_MASK); 1281 cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK); 1282 cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED, 1283 CAL_CTRL_POSTED_WRITES_MASK); 1284 cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK); 1285 cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK); 1286 cal_write(cal, CAL_CTRL, val); 1287 cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL)); 1288 1289 return 0; 1290} 1291 1292static const struct dev_pm_ops cal_pm_ops = { 1293 .runtime_resume = cal_runtime_resume, 1294}; 1295 1296static struct platform_driver cal_pdrv = { 1297 .probe = cal_probe, 1298 .remove = cal_remove, 1299 .driver = { 1300 .name = CAL_MODULE_NAME, 1301 .pm = &cal_pm_ops, 1302 .of_match_table = cal_of_match, 1303 }, 1304}; 1305 1306module_platform_driver(cal_pdrv);