core.c (58740B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Intel Core SoC Power Management Controller Driver 4 * 5 * Copyright (c) 2016, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com> 9 * Vishwanath Somayaji <vishwanath.somayaji@intel.com> 10 */ 11 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14#include <linux/acpi.h> 15#include <linux/bitfield.h> 16#include <linux/debugfs.h> 17#include <linux/delay.h> 18#include <linux/dmi.h> 19#include <linux/io.h> 20#include <linux/module.h> 21#include <linux/pci.h> 22#include <linux/platform_device.h> 23#include <linux/slab.h> 24#include <linux/suspend.h> 25#include <linux/uaccess.h> 26#include <linux/uuid.h> 27 28#include <acpi/acpi_bus.h> 29#include <asm/cpu_device_id.h> 30#include <asm/intel-family.h> 31#include <asm/msr.h> 32#include <asm/tsc.h> 33 34#include "core.h" 35 36#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972" 37#define ACPI_GET_LOW_MODE_REGISTERS 1 38 39/* PKGC MSRs are common across Intel Core SoCs */ 40static const struct pmc_bit_map msr_map[] = { 41 {"Package C2", MSR_PKG_C2_RESIDENCY}, 42 {"Package C3", MSR_PKG_C3_RESIDENCY}, 43 {"Package C6", MSR_PKG_C6_RESIDENCY}, 44 {"Package C7", MSR_PKG_C7_RESIDENCY}, 45 {"Package C8", MSR_PKG_C8_RESIDENCY}, 46 {"Package C9", MSR_PKG_C9_RESIDENCY}, 47 {"Package C10", MSR_PKG_C10_RESIDENCY}, 48 {} 49}; 50 51static const struct pmc_bit_map spt_pll_map[] = { 52 {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0}, 53 {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, 54 {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, 55 {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, 56 {} 57}; 58 59static const struct pmc_bit_map spt_mphy_map[] = { 60 {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0}, 61 {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1}, 62 {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2}, 63 {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3}, 64 {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4}, 65 {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5}, 66 {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6}, 67 {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7}, 68 {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8}, 69 {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9}, 70 {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10}, 71 {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11}, 72 {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12}, 73 {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, 74 {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, 75 {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, 76 {} 77}; 78 79static const struct pmc_bit_map spt_pfear_map[] = { 80 {"PMC", SPT_PMC_BIT_PMC}, 81 {"OPI-DMI", SPT_PMC_BIT_OPI}, 82 {"SPI / eSPI", SPT_PMC_BIT_SPI}, 83 {"XHCI", SPT_PMC_BIT_XHCI}, 84 {"SPA", SPT_PMC_BIT_SPA}, 85 {"SPB", SPT_PMC_BIT_SPB}, 86 {"SPC", SPT_PMC_BIT_SPC}, 87 {"GBE", SPT_PMC_BIT_GBE}, 88 {"SATA", SPT_PMC_BIT_SATA}, 89 {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0}, 90 {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1}, 91 {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2}, 92 {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3}, 93 {"RSVD", SPT_PMC_BIT_RSVD_0B}, 94 {"LPSS", SPT_PMC_BIT_LPSS}, 95 {"LPC", SPT_PMC_BIT_LPC}, 96 {"SMB", SPT_PMC_BIT_SMB}, 97 {"ISH", SPT_PMC_BIT_ISH}, 98 {"P2SB", SPT_PMC_BIT_P2SB}, 99 {"DFX", SPT_PMC_BIT_DFX}, 100 {"SCC", SPT_PMC_BIT_SCC}, 101 {"RSVD", SPT_PMC_BIT_RSVD_0C}, 102 {"FUSE", SPT_PMC_BIT_FUSE}, 103 {"CAMERA", SPT_PMC_BIT_CAMREA}, 104 {"RSVD", SPT_PMC_BIT_RSVD_0D}, 105 {"USB3-OTG", SPT_PMC_BIT_USB3_OTG}, 106 {"EXI", SPT_PMC_BIT_EXI}, 107 {"CSE", SPT_PMC_BIT_CSE}, 108 {"CSME_KVM", SPT_PMC_BIT_CSME_KVM}, 109 {"CSME_PMT", SPT_PMC_BIT_CSME_PMT}, 110 {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK}, 111 {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO}, 112 {"CSME_USBR", SPT_PMC_BIT_CSME_USBR}, 113 {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM}, 114 {"CSME_SMT", SPT_PMC_BIT_CSME_SMT}, 115 {"RSVD", SPT_PMC_BIT_RSVD_1A}, 116 {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2}, 117 {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, 118 {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, 119 {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, 120 {} 121}; 122 123static const struct pmc_bit_map *ext_spt_pfear_map[] = { 124 /* 125 * Check intel_pmc_core_ids[] users of spt_reg_map for 126 * a list of core SoCs using this. 127 */ 128 spt_pfear_map, 129 NULL 130}; 131 132static const struct pmc_bit_map spt_ltr_show_map[] = { 133 {"SOUTHPORT_A", SPT_PMC_LTR_SPA}, 134 {"SOUTHPORT_B", SPT_PMC_LTR_SPB}, 135 {"SATA", SPT_PMC_LTR_SATA}, 136 {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE}, 137 {"XHCI", SPT_PMC_LTR_XHCI}, 138 {"Reserved", SPT_PMC_LTR_RESERVED}, 139 {"ME", SPT_PMC_LTR_ME}, 140 /* EVA is Enterprise Value Add, doesn't really exist on PCH */ 141 {"EVA", SPT_PMC_LTR_EVA}, 142 {"SOUTHPORT_C", SPT_PMC_LTR_SPC}, 143 {"HD_AUDIO", SPT_PMC_LTR_AZ}, 144 {"LPSS", SPT_PMC_LTR_LPSS}, 145 {"SOUTHPORT_D", SPT_PMC_LTR_SPD}, 146 {"SOUTHPORT_E", SPT_PMC_LTR_SPE}, 147 {"CAMERA", SPT_PMC_LTR_CAM}, 148 {"ESPI", SPT_PMC_LTR_ESPI}, 149 {"SCC", SPT_PMC_LTR_SCC}, 150 {"ISH", SPT_PMC_LTR_ISH}, 151 /* Below two cannot be used for LTR_IGNORE */ 152 {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT}, 153 {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT}, 154 {} 155}; 156 157static const struct pmc_reg_map spt_reg_map = { 158 .pfear_sts = ext_spt_pfear_map, 159 .mphy_sts = spt_mphy_map, 160 .pll_sts = spt_pll_map, 161 .ltr_show_sts = spt_ltr_show_map, 162 .msr_sts = msr_map, 163 .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET, 164 .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP, 165 .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET, 166 .regmap_length = SPT_PMC_MMIO_REG_LEN, 167 .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A, 168 .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES, 169 .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET, 170 .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT, 171 .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED, 172 .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, 173}; 174 175/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */ 176static const struct pmc_bit_map cnp_pfear_map[] = { 177 {"PMC", BIT(0)}, 178 {"OPI-DMI", BIT(1)}, 179 {"SPI/eSPI", BIT(2)}, 180 {"XHCI", BIT(3)}, 181 {"SPA", BIT(4)}, 182 {"SPB", BIT(5)}, 183 {"SPC", BIT(6)}, 184 {"GBE", BIT(7)}, 185 186 {"SATA", BIT(0)}, 187 {"HDA_PGD0", BIT(1)}, 188 {"HDA_PGD1", BIT(2)}, 189 {"HDA_PGD2", BIT(3)}, 190 {"HDA_PGD3", BIT(4)}, 191 {"SPD", BIT(5)}, 192 {"LPSS", BIT(6)}, 193 {"LPC", BIT(7)}, 194 195 {"SMB", BIT(0)}, 196 {"ISH", BIT(1)}, 197 {"P2SB", BIT(2)}, 198 {"NPK_VNN", BIT(3)}, 199 {"SDX", BIT(4)}, 200 {"SPE", BIT(5)}, 201 {"Fuse", BIT(6)}, 202 {"SBR8", BIT(7)}, 203 204 {"CSME_FSC", BIT(0)}, 205 {"USB3_OTG", BIT(1)}, 206 {"EXI", BIT(2)}, 207 {"CSE", BIT(3)}, 208 {"CSME_KVM", BIT(4)}, 209 {"CSME_PMT", BIT(5)}, 210 {"CSME_CLINK", BIT(6)}, 211 {"CSME_PTIO", BIT(7)}, 212 213 {"CSME_USBR", BIT(0)}, 214 {"CSME_SUSRAM", BIT(1)}, 215 {"CSME_SMT1", BIT(2)}, 216 {"CSME_SMT4", BIT(3)}, 217 {"CSME_SMS2", BIT(4)}, 218 {"CSME_SMS1", BIT(5)}, 219 {"CSME_RTC", BIT(6)}, 220 {"CSME_PSF", BIT(7)}, 221 222 {"SBR0", BIT(0)}, 223 {"SBR1", BIT(1)}, 224 {"SBR2", BIT(2)}, 225 {"SBR3", BIT(3)}, 226 {"SBR4", BIT(4)}, 227 {"SBR5", BIT(5)}, 228 {"CSME_PECI", BIT(6)}, 229 {"PSF1", BIT(7)}, 230 231 {"PSF2", BIT(0)}, 232 {"PSF3", BIT(1)}, 233 {"PSF4", BIT(2)}, 234 {"CNVI", BIT(3)}, 235 {"UFS0", BIT(4)}, 236 {"EMMC", BIT(5)}, 237 {"SPF", BIT(6)}, 238 {"SBR6", BIT(7)}, 239 240 {"SBR7", BIT(0)}, 241 {"NPK_AON", BIT(1)}, 242 {"HDA_PGD4", BIT(2)}, 243 {"HDA_PGD5", BIT(3)}, 244 {"HDA_PGD6", BIT(4)}, 245 {"PSF6", BIT(5)}, 246 {"PSF7", BIT(6)}, 247 {"PSF8", BIT(7)}, 248 {} 249}; 250 251static const struct pmc_bit_map *ext_cnp_pfear_map[] = { 252 /* 253 * Check intel_pmc_core_ids[] users of cnp_reg_map for 254 * a list of core SoCs using this. 255 */ 256 cnp_pfear_map, 257 NULL 258}; 259 260static const struct pmc_bit_map icl_pfear_map[] = { 261 {"RES_65", BIT(0)}, 262 {"RES_66", BIT(1)}, 263 {"RES_67", BIT(2)}, 264 {"TAM", BIT(3)}, 265 {"GBETSN", BIT(4)}, 266 {"TBTLSX", BIT(5)}, 267 {"RES_71", BIT(6)}, 268 {"RES_72", BIT(7)}, 269 {} 270}; 271 272static const struct pmc_bit_map *ext_icl_pfear_map[] = { 273 /* 274 * Check intel_pmc_core_ids[] users of icl_reg_map for 275 * a list of core SoCs using this. 276 */ 277 cnp_pfear_map, 278 icl_pfear_map, 279 NULL 280}; 281 282static const struct pmc_bit_map tgl_pfear_map[] = { 283 {"PSF9", BIT(0)}, 284 {"RES_66", BIT(1)}, 285 {"RES_67", BIT(2)}, 286 {"RES_68", BIT(3)}, 287 {"RES_69", BIT(4)}, 288 {"RES_70", BIT(5)}, 289 {"TBTLSX", BIT(6)}, 290 {} 291}; 292 293static const struct pmc_bit_map *ext_tgl_pfear_map[] = { 294 /* 295 * Check intel_pmc_core_ids[] users of tgl_reg_map for 296 * a list of core SoCs using this. 297 */ 298 cnp_pfear_map, 299 tgl_pfear_map, 300 NULL 301}; 302 303static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { 304 {"AUDIO_D3", BIT(0)}, 305 {"OTG_D3", BIT(1)}, 306 {"XHCI_D3", BIT(2)}, 307 {"LPIO_D3", BIT(3)}, 308 {"SDX_D3", BIT(4)}, 309 {"SATA_D3", BIT(5)}, 310 {"UFS0_D3", BIT(6)}, 311 {"UFS1_D3", BIT(7)}, 312 {"EMMC_D3", BIT(8)}, 313 {} 314}; 315 316static const struct pmc_bit_map cnp_slps0_dbg1_map[] = { 317 {"SDIO_PLL_OFF", BIT(0)}, 318 {"USB2_PLL_OFF", BIT(1)}, 319 {"AUDIO_PLL_OFF", BIT(2)}, 320 {"OC_PLL_OFF", BIT(3)}, 321 {"MAIN_PLL_OFF", BIT(4)}, 322 {"XOSC_OFF", BIT(5)}, 323 {"LPC_CLKS_GATED", BIT(6)}, 324 {"PCIE_CLKREQS_IDLE", BIT(7)}, 325 {"AUDIO_ROSC_OFF", BIT(8)}, 326 {"HPET_XOSC_CLK_REQ", BIT(9)}, 327 {"PMC_ROSC_SLOW_CLK", BIT(10)}, 328 {"AON2_ROSC_GATED", BIT(11)}, 329 {"CLKACKS_DEASSERTED", BIT(12)}, 330 {} 331}; 332 333static const struct pmc_bit_map cnp_slps0_dbg2_map[] = { 334 {"MPHY_CORE_GATED", BIT(0)}, 335 {"CSME_GATED", BIT(1)}, 336 {"USB2_SUS_GATED", BIT(2)}, 337 {"DYN_FLEX_IO_IDLE", BIT(3)}, 338 {"GBE_NO_LINK", BIT(4)}, 339 {"THERM_SEN_DISABLED", BIT(5)}, 340 {"PCIE_LOW_POWER", BIT(6)}, 341 {"ISH_VNNAON_REQ_ACT", BIT(7)}, 342 {"ISH_VNN_REQ_ACT", BIT(8)}, 343 {"CNV_VNNAON_REQ_ACT", BIT(9)}, 344 {"CNV_VNN_REQ_ACT", BIT(10)}, 345 {"NPK_VNNON_REQ_ACT", BIT(11)}, 346 {"PMSYNC_STATE_IDLE", BIT(12)}, 347 {"ALST_GT_THRES", BIT(13)}, 348 {"PMC_ARC_PG_READY", BIT(14)}, 349 {} 350}; 351 352static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { 353 cnp_slps0_dbg0_map, 354 cnp_slps0_dbg1_map, 355 cnp_slps0_dbg2_map, 356 NULL 357}; 358 359static const struct pmc_bit_map cnp_ltr_show_map[] = { 360 {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, 361 {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, 362 {"SATA", CNP_PMC_LTR_SATA}, 363 {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, 364 {"XHCI", CNP_PMC_LTR_XHCI}, 365 {"Reserved", CNP_PMC_LTR_RESERVED}, 366 {"ME", CNP_PMC_LTR_ME}, 367 /* EVA is Enterprise Value Add, doesn't really exist on PCH */ 368 {"EVA", CNP_PMC_LTR_EVA}, 369 {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, 370 {"HD_AUDIO", CNP_PMC_LTR_AZ}, 371 {"CNV", CNP_PMC_LTR_CNV}, 372 {"LPSS", CNP_PMC_LTR_LPSS}, 373 {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, 374 {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, 375 {"CAMERA", CNP_PMC_LTR_CAM}, 376 {"ESPI", CNP_PMC_LTR_ESPI}, 377 {"SCC", CNP_PMC_LTR_SCC}, 378 {"ISH", CNP_PMC_LTR_ISH}, 379 {"UFSX2", CNP_PMC_LTR_UFSX2}, 380 {"EMMC", CNP_PMC_LTR_EMMC}, 381 /* 382 * Check intel_pmc_core_ids[] users of cnp_reg_map for 383 * a list of core SoCs using this. 384 */ 385 {"WIGIG", ICL_PMC_LTR_WIGIG}, 386 {"THC0", TGL_PMC_LTR_THC0}, 387 {"THC1", TGL_PMC_LTR_THC1}, 388 /* Below two cannot be used for LTR_IGNORE */ 389 {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, 390 {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, 391 {} 392}; 393 394static const struct pmc_reg_map cnp_reg_map = { 395 .pfear_sts = ext_cnp_pfear_map, 396 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, 397 .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP, 398 .slps0_dbg_maps = cnp_slps0_dbg_maps, 399 .ltr_show_sts = cnp_ltr_show_map, 400 .msr_sts = msr_map, 401 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, 402 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, 403 .regmap_length = CNP_PMC_MMIO_REG_LEN, 404 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, 405 .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, 406 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, 407 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, 408 .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED, 409 .etr3_offset = ETR3_OFFSET, 410}; 411 412static const struct pmc_reg_map icl_reg_map = { 413 .pfear_sts = ext_icl_pfear_map, 414 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, 415 .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP, 416 .slps0_dbg_maps = cnp_slps0_dbg_maps, 417 .ltr_show_sts = cnp_ltr_show_map, 418 .msr_sts = msr_map, 419 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, 420 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, 421 .regmap_length = CNP_PMC_MMIO_REG_LEN, 422 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, 423 .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, 424 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, 425 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, 426 .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, 427 .etr3_offset = ETR3_OFFSET, 428}; 429 430static const struct pmc_bit_map tgl_clocksource_status_map[] = { 431 {"USB2PLL_OFF_STS", BIT(18)}, 432 {"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)}, 433 {"PCIe_Gen3PLL_OFF_STS", BIT(20)}, 434 {"OPIOPLL_OFF_STS", BIT(21)}, 435 {"OCPLL_OFF_STS", BIT(22)}, 436 {"MainPLL_OFF_STS", BIT(23)}, 437 {"MIPIPLL_OFF_STS", BIT(24)}, 438 {"Fast_XTAL_Osc_OFF_STS", BIT(25)}, 439 {"AC_Ring_Osc_OFF_STS", BIT(26)}, 440 {"MC_Ring_Osc_OFF_STS", BIT(27)}, 441 {"SATAPLL_OFF_STS", BIT(29)}, 442 {"XTAL_USB2PLL_OFF_STS", BIT(31)}, 443 {} 444}; 445 446static const struct pmc_bit_map tgl_power_gating_status_map[] = { 447 {"CSME_PG_STS", BIT(0)}, 448 {"SATA_PG_STS", BIT(1)}, 449 {"xHCI_PG_STS", BIT(2)}, 450 {"UFSX2_PG_STS", BIT(3)}, 451 {"OTG_PG_STS", BIT(5)}, 452 {"SPA_PG_STS", BIT(6)}, 453 {"SPB_PG_STS", BIT(7)}, 454 {"SPC_PG_STS", BIT(8)}, 455 {"SPD_PG_STS", BIT(9)}, 456 {"SPE_PG_STS", BIT(10)}, 457 {"SPF_PG_STS", BIT(11)}, 458 {"LSX_PG_STS", BIT(13)}, 459 {"P2SB_PG_STS", BIT(14)}, 460 {"PSF_PG_STS", BIT(15)}, 461 {"SBR_PG_STS", BIT(16)}, 462 {"OPIDMI_PG_STS", BIT(17)}, 463 {"THC0_PG_STS", BIT(18)}, 464 {"THC1_PG_STS", BIT(19)}, 465 {"GBETSN_PG_STS", BIT(20)}, 466 {"GBE_PG_STS", BIT(21)}, 467 {"LPSS_PG_STS", BIT(22)}, 468 {"MMP_UFSX2_PG_STS", BIT(23)}, 469 {"MMP_UFSX2B_PG_STS", BIT(24)}, 470 {"FIA_PG_STS", BIT(25)}, 471 {} 472}; 473 474static const struct pmc_bit_map tgl_d3_status_map[] = { 475 {"ADSP_D3_STS", BIT(0)}, 476 {"SATA_D3_STS", BIT(1)}, 477 {"xHCI0_D3_STS", BIT(2)}, 478 {"xDCI1_D3_STS", BIT(5)}, 479 {"SDX_D3_STS", BIT(6)}, 480 {"EMMC_D3_STS", BIT(7)}, 481 {"IS_D3_STS", BIT(8)}, 482 {"THC0_D3_STS", BIT(9)}, 483 {"THC1_D3_STS", BIT(10)}, 484 {"GBE_D3_STS", BIT(11)}, 485 {"GBE_TSN_D3_STS", BIT(12)}, 486 {} 487}; 488 489static const struct pmc_bit_map tgl_vnn_req_status_map[] = { 490 {"GPIO_COM0_VNN_REQ_STS", BIT(1)}, 491 {"GPIO_COM1_VNN_REQ_STS", BIT(2)}, 492 {"GPIO_COM2_VNN_REQ_STS", BIT(3)}, 493 {"GPIO_COM3_VNN_REQ_STS", BIT(4)}, 494 {"GPIO_COM4_VNN_REQ_STS", BIT(5)}, 495 {"GPIO_COM5_VNN_REQ_STS", BIT(6)}, 496 {"Audio_VNN_REQ_STS", BIT(7)}, 497 {"ISH_VNN_REQ_STS", BIT(8)}, 498 {"CNVI_VNN_REQ_STS", BIT(9)}, 499 {"eSPI_VNN_REQ_STS", BIT(10)}, 500 {"Display_VNN_REQ_STS", BIT(11)}, 501 {"DTS_VNN_REQ_STS", BIT(12)}, 502 {"SMBUS_VNN_REQ_STS", BIT(14)}, 503 {"CSME_VNN_REQ_STS", BIT(15)}, 504 {"SMLINK0_VNN_REQ_STS", BIT(16)}, 505 {"SMLINK1_VNN_REQ_STS", BIT(17)}, 506 {"CLINK_VNN_REQ_STS", BIT(20)}, 507 {"DCI_VNN_REQ_STS", BIT(21)}, 508 {"ITH_VNN_REQ_STS", BIT(22)}, 509 {"CSME_VNN_REQ_STS", BIT(24)}, 510 {"GBE_VNN_REQ_STS", BIT(25)}, 511 {} 512}; 513 514static const struct pmc_bit_map tgl_vnn_misc_status_map[] = { 515 {"CPU_C10_REQ_STS_0", BIT(0)}, 516 {"PCIe_LPM_En_REQ_STS_3", BIT(3)}, 517 {"ITH_REQ_STS_5", BIT(5)}, 518 {"CNVI_REQ_STS_6", BIT(6)}, 519 {"ISH_REQ_STS_7", BIT(7)}, 520 {"USB2_SUS_PG_Sys_REQ_STS_10", BIT(10)}, 521 {"PCIe_Clk_REQ_STS_12", BIT(12)}, 522 {"MPHY_Core_DL_REQ_STS_16", BIT(16)}, 523 {"Break-even_En_REQ_STS_17", BIT(17)}, 524 {"Auto-demo_En_REQ_STS_18", BIT(18)}, 525 {"MPHY_SUS_REQ_STS_22", BIT(22)}, 526 {"xDCI_attached_REQ_STS_24", BIT(24)}, 527 {} 528}; 529 530static const struct pmc_bit_map tgl_signal_status_map[] = { 531 {"LSX_Wake0_En_STS", BIT(0)}, 532 {"LSX_Wake0_Pol_STS", BIT(1)}, 533 {"LSX_Wake1_En_STS", BIT(2)}, 534 {"LSX_Wake1_Pol_STS", BIT(3)}, 535 {"LSX_Wake2_En_STS", BIT(4)}, 536 {"LSX_Wake2_Pol_STS", BIT(5)}, 537 {"LSX_Wake3_En_STS", BIT(6)}, 538 {"LSX_Wake3_Pol_STS", BIT(7)}, 539 {"LSX_Wake4_En_STS", BIT(8)}, 540 {"LSX_Wake4_Pol_STS", BIT(9)}, 541 {"LSX_Wake5_En_STS", BIT(10)}, 542 {"LSX_Wake5_Pol_STS", BIT(11)}, 543 {"LSX_Wake6_En_STS", BIT(12)}, 544 {"LSX_Wake6_Pol_STS", BIT(13)}, 545 {"LSX_Wake7_En_STS", BIT(14)}, 546 {"LSX_Wake7_Pol_STS", BIT(15)}, 547 {"Intel_Se_IO_Wake0_En_STS", BIT(16)}, 548 {"Intel_Se_IO_Wake0_Pol_STS", BIT(17)}, 549 {"Intel_Se_IO_Wake1_En_STS", BIT(18)}, 550 {"Intel_Se_IO_Wake1_Pol_STS", BIT(19)}, 551 {"Int_Timer_SS_Wake0_En_STS", BIT(20)}, 552 {"Int_Timer_SS_Wake0_Pol_STS", BIT(21)}, 553 {"Int_Timer_SS_Wake1_En_STS", BIT(22)}, 554 {"Int_Timer_SS_Wake1_Pol_STS", BIT(23)}, 555 {"Int_Timer_SS_Wake2_En_STS", BIT(24)}, 556 {"Int_Timer_SS_Wake2_Pol_STS", BIT(25)}, 557 {"Int_Timer_SS_Wake3_En_STS", BIT(26)}, 558 {"Int_Timer_SS_Wake3_Pol_STS", BIT(27)}, 559 {"Int_Timer_SS_Wake4_En_STS", BIT(28)}, 560 {"Int_Timer_SS_Wake4_Pol_STS", BIT(29)}, 561 {"Int_Timer_SS_Wake5_En_STS", BIT(30)}, 562 {"Int_Timer_SS_Wake5_Pol_STS", BIT(31)}, 563 {} 564}; 565 566static const struct pmc_bit_map *tgl_lpm_maps[] = { 567 tgl_clocksource_status_map, 568 tgl_power_gating_status_map, 569 tgl_d3_status_map, 570 tgl_vnn_req_status_map, 571 tgl_vnn_misc_status_map, 572 tgl_signal_status_map, 573 NULL 574}; 575 576static const struct pmc_reg_map tgl_reg_map = { 577 .pfear_sts = ext_tgl_pfear_map, 578 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, 579 .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, 580 .ltr_show_sts = cnp_ltr_show_map, 581 .msr_sts = msr_map, 582 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, 583 .regmap_length = CNP_PMC_MMIO_REG_LEN, 584 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, 585 .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, 586 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, 587 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, 588 .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, 589 .lpm_num_maps = TGL_LPM_NUM_MAPS, 590 .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, 591 .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET, 592 .lpm_en_offset = TGL_LPM_EN_OFFSET, 593 .lpm_priority_offset = TGL_LPM_PRI_OFFSET, 594 .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET, 595 .lpm_sts = tgl_lpm_maps, 596 .lpm_status_offset = TGL_LPM_STATUS_OFFSET, 597 .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET, 598 .etr3_offset = ETR3_OFFSET, 599}; 600 601static void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev) 602{ 603 struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 604 const int num_maps = pmcdev->map->lpm_num_maps; 605 u32 lpm_size = LPM_MAX_NUM_MODES * num_maps * 4; 606 union acpi_object *out_obj; 607 struct acpi_device *adev; 608 guid_t s0ix_dsm_guid; 609 u32 *lpm_req_regs, *addr; 610 611 adev = ACPI_COMPANION(&pdev->dev); 612 if (!adev) 613 return; 614 615 guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid); 616 617 out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0, 618 ACPI_GET_LOW_MODE_REGISTERS, NULL); 619 if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) { 620 u32 size = out_obj->buffer.length; 621 622 if (size != lpm_size) { 623 acpi_handle_debug(adev->handle, 624 "_DSM returned unexpected buffer size, have %u, expect %u\n", 625 size, lpm_size); 626 goto free_acpi_obj; 627 } 628 } else { 629 acpi_handle_debug(adev->handle, 630 "_DSM function 0 evaluation failed\n"); 631 goto free_acpi_obj; 632 } 633 634 addr = (u32 *)out_obj->buffer.pointer; 635 636 lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32), 637 GFP_KERNEL); 638 if (!lpm_req_regs) 639 goto free_acpi_obj; 640 641 memcpy(lpm_req_regs, addr, lpm_size); 642 pmcdev->lpm_req_regs = lpm_req_regs; 643 644free_acpi_obj: 645 ACPI_FREE(out_obj); 646} 647 648/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */ 649static const struct pmc_bit_map adl_pfear_map[] = { 650 {"SPI/eSPI", BIT(2)}, 651 {"XHCI", BIT(3)}, 652 {"SPA", BIT(4)}, 653 {"SPB", BIT(5)}, 654 {"SPC", BIT(6)}, 655 {"GBE", BIT(7)}, 656 657 {"SATA", BIT(0)}, 658 {"HDA_PGD0", BIT(1)}, 659 {"HDA_PGD1", BIT(2)}, 660 {"HDA_PGD2", BIT(3)}, 661 {"HDA_PGD3", BIT(4)}, 662 {"SPD", BIT(5)}, 663 {"LPSS", BIT(6)}, 664 665 {"SMB", BIT(0)}, 666 {"ISH", BIT(1)}, 667 {"ITH", BIT(3)}, 668 669 {"XDCI", BIT(1)}, 670 {"DCI", BIT(2)}, 671 {"CSE", BIT(3)}, 672 {"CSME_KVM", BIT(4)}, 673 {"CSME_PMT", BIT(5)}, 674 {"CSME_CLINK", BIT(6)}, 675 {"CSME_PTIO", BIT(7)}, 676 677 {"CSME_USBR", BIT(0)}, 678 {"CSME_SUSRAM", BIT(1)}, 679 {"CSME_SMT1", BIT(2)}, 680 {"CSME_SMS2", BIT(4)}, 681 {"CSME_SMS1", BIT(5)}, 682 {"CSME_RTC", BIT(6)}, 683 {"CSME_PSF", BIT(7)}, 684 685 {"CNVI", BIT(3)}, 686 687 {"HDA_PGD4", BIT(2)}, 688 {"HDA_PGD5", BIT(3)}, 689 {"HDA_PGD6", BIT(4)}, 690 {} 691}; 692 693static const struct pmc_bit_map *ext_adl_pfear_map[] = { 694 /* 695 * Check intel_pmc_core_ids[] users of cnp_reg_map for 696 * a list of core SoCs using this. 697 */ 698 adl_pfear_map, 699 NULL 700}; 701 702static const struct pmc_bit_map adl_ltr_show_map[] = { 703 {"SOUTHPORT_A", CNP_PMC_LTR_SPA}, 704 {"SOUTHPORT_B", CNP_PMC_LTR_SPB}, 705 {"SATA", CNP_PMC_LTR_SATA}, 706 {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, 707 {"XHCI", CNP_PMC_LTR_XHCI}, 708 {"SOUTHPORT_F", ADL_PMC_LTR_SPF}, 709 {"ME", CNP_PMC_LTR_ME}, 710 /* EVA is Enterprise Value Add, doesn't really exist on PCH */ 711 {"SATA1", CNP_PMC_LTR_EVA}, 712 {"SOUTHPORT_C", CNP_PMC_LTR_SPC}, 713 {"HD_AUDIO", CNP_PMC_LTR_AZ}, 714 {"CNV", CNP_PMC_LTR_CNV}, 715 {"LPSS", CNP_PMC_LTR_LPSS}, 716 {"SOUTHPORT_D", CNP_PMC_LTR_SPD}, 717 {"SOUTHPORT_E", CNP_PMC_LTR_SPE}, 718 {"SATA2", CNP_PMC_LTR_CAM}, 719 {"ESPI", CNP_PMC_LTR_ESPI}, 720 {"SCC", CNP_PMC_LTR_SCC}, 721 {"ISH", CNP_PMC_LTR_ISH}, 722 {"UFSX2", CNP_PMC_LTR_UFSX2}, 723 {"EMMC", CNP_PMC_LTR_EMMC}, 724 /* 725 * Check intel_pmc_core_ids[] users of cnp_reg_map for 726 * a list of core SoCs using this. 727 */ 728 {"WIGIG", ICL_PMC_LTR_WIGIG}, 729 {"THC0", TGL_PMC_LTR_THC0}, 730 {"THC1", TGL_PMC_LTR_THC1}, 731 {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED}, 732 733 /* Below two cannot be used for LTR_IGNORE */ 734 {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, 735 {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, 736 {} 737}; 738 739static const struct pmc_bit_map adl_clocksource_status_map[] = { 740 {"CLKPART1_OFF_STS", BIT(0)}, 741 {"CLKPART2_OFF_STS", BIT(1)}, 742 {"CLKPART3_OFF_STS", BIT(2)}, 743 {"CLKPART4_OFF_STS", BIT(3)}, 744 {"CLKPART5_OFF_STS", BIT(4)}, 745 {"CLKPART6_OFF_STS", BIT(5)}, 746 {"CLKPART7_OFF_STS", BIT(6)}, 747 {"CLKPART8_OFF_STS", BIT(7)}, 748 {"PCIE0PLL_OFF_STS", BIT(10)}, 749 {"PCIE1PLL_OFF_STS", BIT(11)}, 750 {"PCIE2PLL_OFF_STS", BIT(12)}, 751 {"PCIE3PLL_OFF_STS", BIT(13)}, 752 {"PCIE4PLL_OFF_STS", BIT(14)}, 753 {"PCIE5PLL_OFF_STS", BIT(15)}, 754 {"PCIE6PLL_OFF_STS", BIT(16)}, 755 {"USB2PLL_OFF_STS", BIT(18)}, 756 {"OCPLL_OFF_STS", BIT(22)}, 757 {"AUDIOPLL_OFF_STS", BIT(23)}, 758 {"GBEPLL_OFF_STS", BIT(24)}, 759 {"Fast_XTAL_Osc_OFF_STS", BIT(25)}, 760 {"AC_Ring_Osc_OFF_STS", BIT(26)}, 761 {"MC_Ring_Osc_OFF_STS", BIT(27)}, 762 {"SATAPLL_OFF_STS", BIT(29)}, 763 {"USB3PLL_OFF_STS", BIT(31)}, 764 {} 765}; 766 767static const struct pmc_bit_map adl_power_gating_status_0_map[] = { 768 {"PMC_PGD0_PG_STS", BIT(0)}, 769 {"DMI_PGD0_PG_STS", BIT(1)}, 770 {"ESPISPI_PGD0_PG_STS", BIT(2)}, 771 {"XHCI_PGD0_PG_STS", BIT(3)}, 772 {"SPA_PGD0_PG_STS", BIT(4)}, 773 {"SPB_PGD0_PG_STS", BIT(5)}, 774 {"SPC_PGD0_PG_STS", BIT(6)}, 775 {"GBE_PGD0_PG_STS", BIT(7)}, 776 {"SATA_PGD0_PG_STS", BIT(8)}, 777 {"DSP_PGD0_PG_STS", BIT(9)}, 778 {"DSP_PGD1_PG_STS", BIT(10)}, 779 {"DSP_PGD2_PG_STS", BIT(11)}, 780 {"DSP_PGD3_PG_STS", BIT(12)}, 781 {"SPD_PGD0_PG_STS", BIT(13)}, 782 {"LPSS_PGD0_PG_STS", BIT(14)}, 783 {"SMB_PGD0_PG_STS", BIT(16)}, 784 {"ISH_PGD0_PG_STS", BIT(17)}, 785 {"NPK_PGD0_PG_STS", BIT(19)}, 786 {"PECI_PGD0_PG_STS", BIT(21)}, 787 {"XDCI_PGD0_PG_STS", BIT(25)}, 788 {"EXI_PGD0_PG_STS", BIT(26)}, 789 {"CSE_PGD0_PG_STS", BIT(27)}, 790 {"KVMCC_PGD0_PG_STS", BIT(28)}, 791 {"PMT_PGD0_PG_STS", BIT(29)}, 792 {"CLINK_PGD0_PG_STS", BIT(30)}, 793 {"PTIO_PGD0_PG_STS", BIT(31)}, 794 {} 795}; 796 797static const struct pmc_bit_map adl_power_gating_status_1_map[] = { 798 {"USBR0_PGD0_PG_STS", BIT(0)}, 799 {"SMT1_PGD0_PG_STS", BIT(2)}, 800 {"CSMERTC_PGD0_PG_STS", BIT(6)}, 801 {"CSMEPSF_PGD0_PG_STS", BIT(7)}, 802 {"CNVI_PGD0_PG_STS", BIT(19)}, 803 {"DSP_PGD4_PG_STS", BIT(26)}, 804 {"SPG_PGD0_PG_STS", BIT(27)}, 805 {"SPE_PGD0_PG_STS", BIT(28)}, 806 {} 807}; 808 809static const struct pmc_bit_map adl_power_gating_status_2_map[] = { 810 {"THC0_PGD0_PG_STS", BIT(7)}, 811 {"THC1_PGD0_PG_STS", BIT(8)}, 812 {"SPF_PGD0_PG_STS", BIT(14)}, 813 {} 814}; 815 816static const struct pmc_bit_map adl_d3_status_0_map[] = { 817 {"ISH_D3_STS", BIT(2)}, 818 {"LPSS_D3_STS", BIT(3)}, 819 {"XDCI_D3_STS", BIT(4)}, 820 {"XHCI_D3_STS", BIT(5)}, 821 {"SPA_D3_STS", BIT(12)}, 822 {"SPB_D3_STS", BIT(13)}, 823 {"SPC_D3_STS", BIT(14)}, 824 {"SPD_D3_STS", BIT(15)}, 825 {"SPE_D3_STS", BIT(16)}, 826 {"DSP_D3_STS", BIT(19)}, 827 {"SATA_D3_STS", BIT(20)}, 828 {"DMI_D3_STS", BIT(22)}, 829 {} 830}; 831 832static const struct pmc_bit_map adl_d3_status_1_map[] = { 833 {"GBE_D3_STS", BIT(19)}, 834 {"CNVI_D3_STS", BIT(27)}, 835 {} 836}; 837 838static const struct pmc_bit_map adl_d3_status_2_map[] = { 839 {"CSMERTC_D3_STS", BIT(1)}, 840 {"CSE_D3_STS", BIT(4)}, 841 {"KVMCC_D3_STS", BIT(5)}, 842 {"USBR0_D3_STS", BIT(6)}, 843 {"SMT1_D3_STS", BIT(8)}, 844 {"PTIO_D3_STS", BIT(16)}, 845 {"PMT_D3_STS", BIT(17)}, 846 {} 847}; 848 849static const struct pmc_bit_map adl_d3_status_3_map[] = { 850 {"THC0_D3_STS", BIT(14)}, 851 {"THC1_D3_STS", BIT(15)}, 852 {} 853}; 854 855static const struct pmc_bit_map adl_vnn_req_status_0_map[] = { 856 {"ISH_VNN_REQ_STS", BIT(2)}, 857 {"ESPISPI_VNN_REQ_STS", BIT(18)}, 858 {"DSP_VNN_REQ_STS", BIT(19)}, 859 {} 860}; 861 862static const struct pmc_bit_map adl_vnn_req_status_1_map[] = { 863 {"NPK_VNN_REQ_STS", BIT(4)}, 864 {"EXI_VNN_REQ_STS", BIT(9)}, 865 {"GBE_VNN_REQ_STS", BIT(19)}, 866 {"SMB_VNN_REQ_STS", BIT(25)}, 867 {"CNVI_VNN_REQ_STS", BIT(27)}, 868 {} 869}; 870 871static const struct pmc_bit_map adl_vnn_req_status_2_map[] = { 872 {"CSMERTC_VNN_REQ_STS", BIT(1)}, 873 {"CSE_VNN_REQ_STS", BIT(4)}, 874 {"SMT1_VNN_REQ_STS", BIT(8)}, 875 {"CLINK_VNN_REQ_STS", BIT(14)}, 876 {"GPIOCOM4_VNN_REQ_STS", BIT(20)}, 877 {"GPIOCOM3_VNN_REQ_STS", BIT(21)}, 878 {"GPIOCOM2_VNN_REQ_STS", BIT(22)}, 879 {"GPIOCOM1_VNN_REQ_STS", BIT(23)}, 880 {"GPIOCOM0_VNN_REQ_STS", BIT(24)}, 881 {} 882}; 883 884static const struct pmc_bit_map adl_vnn_req_status_3_map[] = { 885 {"GPIOCOM5_VNN_REQ_STS", BIT(11)}, 886 {} 887}; 888 889static const struct pmc_bit_map adl_vnn_misc_status_map[] = { 890 {"CPU_C10_REQ_STS", BIT(0)}, 891 {"PCIe_LPM_En_REQ_STS", BIT(3)}, 892 {"ITH_REQ_STS", BIT(5)}, 893 {"CNVI_REQ_STS", BIT(6)}, 894 {"ISH_REQ_STS", BIT(7)}, 895 {"USB2_SUS_PG_Sys_REQ_STS", BIT(10)}, 896 {"PCIe_Clk_REQ_STS", BIT(12)}, 897 {"MPHY_Core_DL_REQ_STS", BIT(16)}, 898 {"Break-even_En_REQ_STS", BIT(17)}, 899 {"MPHY_SUS_REQ_STS", BIT(22)}, 900 {"xDCI_attached_REQ_STS", BIT(24)}, 901 {} 902}; 903 904static const struct pmc_bit_map *adl_lpm_maps[] = { 905 adl_clocksource_status_map, 906 adl_power_gating_status_0_map, 907 adl_power_gating_status_1_map, 908 adl_power_gating_status_2_map, 909 adl_d3_status_0_map, 910 adl_d3_status_1_map, 911 adl_d3_status_2_map, 912 adl_d3_status_3_map, 913 adl_vnn_req_status_0_map, 914 adl_vnn_req_status_1_map, 915 adl_vnn_req_status_2_map, 916 adl_vnn_req_status_3_map, 917 adl_vnn_misc_status_map, 918 tgl_signal_status_map, 919 NULL 920}; 921 922static const struct pmc_reg_map adl_reg_map = { 923 .pfear_sts = ext_adl_pfear_map, 924 .slp_s0_offset = ADL_PMC_SLP_S0_RES_COUNTER_OFFSET, 925 .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP, 926 .ltr_show_sts = adl_ltr_show_map, 927 .msr_sts = msr_map, 928 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, 929 .regmap_length = CNP_PMC_MMIO_REG_LEN, 930 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, 931 .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES, 932 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, 933 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, 934 .ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED, 935 .lpm_num_modes = ADL_LPM_NUM_MODES, 936 .lpm_num_maps = ADL_LPM_NUM_MAPS, 937 .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, 938 .etr3_offset = ETR3_OFFSET, 939 .lpm_sts_latch_en_offset = ADL_LPM_STATUS_LATCH_EN_OFFSET, 940 .lpm_priority_offset = ADL_LPM_PRI_OFFSET, 941 .lpm_en_offset = ADL_LPM_EN_OFFSET, 942 .lpm_residency_offset = ADL_LPM_RESIDENCY_OFFSET, 943 .lpm_sts = adl_lpm_maps, 944 .lpm_status_offset = ADL_LPM_STATUS_OFFSET, 945 .lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET, 946}; 947 948static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) 949{ 950 return readl(pmcdev->regbase + reg_offset); 951} 952 953static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, 954 u32 val) 955{ 956 writel(val, pmcdev->regbase + reg_offset); 957} 958 959static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value) 960{ 961 return (u64)value * pmcdev->map->slp_s0_res_counter_step; 962} 963 964static int set_etr3(struct pmc_dev *pmcdev) 965{ 966 const struct pmc_reg_map *map = pmcdev->map; 967 u32 reg; 968 int err; 969 970 if (!map->etr3_offset) 971 return -EOPNOTSUPP; 972 973 mutex_lock(&pmcdev->lock); 974 975 /* check if CF9 is locked */ 976 reg = pmc_core_reg_read(pmcdev, map->etr3_offset); 977 if (reg & ETR3_CF9LOCK) { 978 err = -EACCES; 979 goto out_unlock; 980 } 981 982 /* write CF9 global reset bit */ 983 reg |= ETR3_CF9GR; 984 pmc_core_reg_write(pmcdev, map->etr3_offset, reg); 985 986 reg = pmc_core_reg_read(pmcdev, map->etr3_offset); 987 if (!(reg & ETR3_CF9GR)) { 988 err = -EIO; 989 goto out_unlock; 990 } 991 992 err = 0; 993 994out_unlock: 995 mutex_unlock(&pmcdev->lock); 996 return err; 997} 998static umode_t etr3_is_visible(struct kobject *kobj, 999 struct attribute *attr, 1000 int idx) 1001{ 1002 struct device *dev = kobj_to_dev(kobj); 1003 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1004 const struct pmc_reg_map *map = pmcdev->map; 1005 u32 reg; 1006 1007 mutex_lock(&pmcdev->lock); 1008 reg = pmc_core_reg_read(pmcdev, map->etr3_offset); 1009 mutex_unlock(&pmcdev->lock); 1010 1011 return reg & ETR3_CF9LOCK ? attr->mode & (SYSFS_PREALLOC | 0444) : attr->mode; 1012} 1013 1014static ssize_t etr3_show(struct device *dev, 1015 struct device_attribute *attr, char *buf) 1016{ 1017 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1018 const struct pmc_reg_map *map = pmcdev->map; 1019 u32 reg; 1020 1021 if (!map->etr3_offset) 1022 return -EOPNOTSUPP; 1023 1024 mutex_lock(&pmcdev->lock); 1025 1026 reg = pmc_core_reg_read(pmcdev, map->etr3_offset); 1027 reg &= ETR3_CF9GR | ETR3_CF9LOCK; 1028 1029 mutex_unlock(&pmcdev->lock); 1030 1031 return sysfs_emit(buf, "0x%08x", reg); 1032} 1033 1034static ssize_t etr3_store(struct device *dev, 1035 struct device_attribute *attr, 1036 const char *buf, size_t len) 1037{ 1038 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1039 int err; 1040 u32 reg; 1041 1042 err = kstrtouint(buf, 16, ®); 1043 if (err) 1044 return err; 1045 1046 /* allow only CF9 writes */ 1047 if (reg != ETR3_CF9GR) 1048 return -EINVAL; 1049 1050 err = set_etr3(pmcdev); 1051 if (err) 1052 return err; 1053 1054 return len; 1055} 1056static DEVICE_ATTR_RW(etr3); 1057 1058static struct attribute *pmc_attrs[] = { 1059 &dev_attr_etr3.attr, 1060 NULL 1061}; 1062 1063static const struct attribute_group pmc_attr_group = { 1064 .attrs = pmc_attrs, 1065 .is_visible = etr3_is_visible, 1066}; 1067 1068static const struct attribute_group *pmc_dev_groups[] = { 1069 &pmc_attr_group, 1070 NULL 1071}; 1072 1073static int pmc_core_dev_state_get(void *data, u64 *val) 1074{ 1075 struct pmc_dev *pmcdev = data; 1076 const struct pmc_reg_map *map = pmcdev->map; 1077 u32 value; 1078 1079 value = pmc_core_reg_read(pmcdev, map->slp_s0_offset); 1080 *val = pmc_core_adjust_slp_s0_step(pmcdev, value); 1081 1082 return 0; 1083} 1084 1085DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n"); 1086 1087static int pmc_core_check_read_lock_bit(struct pmc_dev *pmcdev) 1088{ 1089 u32 value; 1090 1091 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset); 1092 return value & BIT(pmcdev->map->pm_read_disable_bit); 1093} 1094 1095static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev, 1096 struct seq_file *s) 1097{ 1098 const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps; 1099 const struct pmc_bit_map *map; 1100 int offset = pmcdev->map->slps0_dbg_offset; 1101 u32 data; 1102 1103 while (*maps) { 1104 map = *maps; 1105 data = pmc_core_reg_read(pmcdev, offset); 1106 offset += 4; 1107 while (map->name) { 1108 if (dev) 1109 dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n", 1110 map->name, 1111 data & map->bit_mask ? "Yes" : "No"); 1112 if (s) 1113 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n", 1114 map->name, 1115 data & map->bit_mask ? "Yes" : "No"); 1116 ++map; 1117 } 1118 ++maps; 1119 } 1120} 1121 1122static int pmc_core_lpm_get_arr_size(const struct pmc_bit_map **maps) 1123{ 1124 int idx; 1125 1126 for (idx = 0; maps[idx]; idx++) 1127 ;/* Nothing */ 1128 1129 return idx; 1130} 1131 1132static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev, 1133 struct seq_file *s, u32 offset, 1134 const char *str, 1135 const struct pmc_bit_map **maps) 1136{ 1137 int index, idx, len = 32, bit_mask, arr_size; 1138 u32 *lpm_regs; 1139 1140 arr_size = pmc_core_lpm_get_arr_size(maps); 1141 lpm_regs = kmalloc_array(arr_size, sizeof(*lpm_regs), GFP_KERNEL); 1142 if (!lpm_regs) 1143 return; 1144 1145 for (index = 0; index < arr_size; index++) { 1146 lpm_regs[index] = pmc_core_reg_read(pmcdev, offset); 1147 offset += 4; 1148 } 1149 1150 for (idx = 0; idx < arr_size; idx++) { 1151 if (dev) 1152 dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx, 1153 lpm_regs[idx]); 1154 if (s) 1155 seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx, 1156 lpm_regs[idx]); 1157 for (index = 0; maps[idx][index].name && index < len; index++) { 1158 bit_mask = maps[idx][index].bit_mask; 1159 if (dev) 1160 dev_info(dev, "%-30s %-30d\n", 1161 maps[idx][index].name, 1162 lpm_regs[idx] & bit_mask ? 1 : 0); 1163 if (s) 1164 seq_printf(s, "%-30s %-30d\n", 1165 maps[idx][index].name, 1166 lpm_regs[idx] & bit_mask ? 1 : 0); 1167 } 1168 } 1169 1170 kfree(lpm_regs); 1171} 1172 1173static bool slps0_dbg_latch; 1174 1175static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) 1176{ 1177 return readb(pmcdev->regbase + offset); 1178} 1179 1180static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, 1181 u8 pf_reg, const struct pmc_bit_map **pf_map) 1182{ 1183 seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", 1184 ip, pf_map[idx][index].name, 1185 pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); 1186} 1187 1188static int pmc_core_ppfear_show(struct seq_file *s, void *unused) 1189{ 1190 struct pmc_dev *pmcdev = s->private; 1191 const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; 1192 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; 1193 int index, iter, idx, ip = 0; 1194 1195 iter = pmcdev->map->ppfear0_offset; 1196 1197 for (index = 0; index < pmcdev->map->ppfear_buckets && 1198 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) 1199 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); 1200 1201 for (idx = 0; maps[idx]; idx++) { 1202 for (index = 0; maps[idx][index].name && 1203 index < pmcdev->map->ppfear_buckets * 8; ip++, index++) 1204 pmc_core_display_map(s, index, idx, ip, 1205 pf_regs[index / 8], maps); 1206 } 1207 1208 return 0; 1209} 1210DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear); 1211 1212/* This function should return link status, 0 means ready */ 1213static int pmc_core_mtpmc_link_status(struct pmc_dev *pmcdev) 1214{ 1215 u32 value; 1216 1217 value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET); 1218 return value & BIT(SPT_PMC_MSG_FULL_STS_BIT); 1219} 1220 1221static int pmc_core_send_msg(struct pmc_dev *pmcdev, u32 *addr_xram) 1222{ 1223 u32 dest; 1224 int timeout; 1225 1226 for (timeout = NUM_RETRIES; timeout > 0; timeout--) { 1227 if (pmc_core_mtpmc_link_status(pmcdev) == 0) 1228 break; 1229 msleep(5); 1230 } 1231 1232 if (timeout <= 0 && pmc_core_mtpmc_link_status(pmcdev)) 1233 return -EBUSY; 1234 1235 dest = (*addr_xram & MTPMC_MASK) | (1U << 1); 1236 pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest); 1237 return 0; 1238} 1239 1240static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused) 1241{ 1242 struct pmc_dev *pmcdev = s->private; 1243 const struct pmc_bit_map *map = pmcdev->map->mphy_sts; 1244 u32 mphy_core_reg_low, mphy_core_reg_high; 1245 u32 val_low, val_high; 1246 int index, err = 0; 1247 1248 if (pmcdev->pmc_xram_read_bit) { 1249 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); 1250 return 0; 1251 } 1252 1253 mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16); 1254 mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16); 1255 1256 mutex_lock(&pmcdev->lock); 1257 1258 if (pmc_core_send_msg(pmcdev, &mphy_core_reg_low) != 0) { 1259 err = -EBUSY; 1260 goto out_unlock; 1261 } 1262 1263 msleep(10); 1264 val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); 1265 1266 if (pmc_core_send_msg(pmcdev, &mphy_core_reg_high) != 0) { 1267 err = -EBUSY; 1268 goto out_unlock; 1269 } 1270 1271 msleep(10); 1272 val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); 1273 1274 for (index = 0; index < 8 && map[index].name; index++) { 1275 seq_printf(s, "%-32s\tState: %s\n", 1276 map[index].name, 1277 map[index].bit_mask & val_low ? "Not power gated" : 1278 "Power gated"); 1279 } 1280 1281 for (index = 8; map[index].name; index++) { 1282 seq_printf(s, "%-32s\tState: %s\n", 1283 map[index].name, 1284 map[index].bit_mask & val_high ? "Not power gated" : 1285 "Power gated"); 1286 } 1287 1288out_unlock: 1289 mutex_unlock(&pmcdev->lock); 1290 return err; 1291} 1292DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg); 1293 1294static int pmc_core_pll_show(struct seq_file *s, void *unused) 1295{ 1296 struct pmc_dev *pmcdev = s->private; 1297 const struct pmc_bit_map *map = pmcdev->map->pll_sts; 1298 u32 mphy_common_reg, val; 1299 int index, err = 0; 1300 1301 if (pmcdev->pmc_xram_read_bit) { 1302 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS."); 1303 return 0; 1304 } 1305 1306 mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16); 1307 mutex_lock(&pmcdev->lock); 1308 1309 if (pmc_core_send_msg(pmcdev, &mphy_common_reg) != 0) { 1310 err = -EBUSY; 1311 goto out_unlock; 1312 } 1313 1314 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */ 1315 msleep(10); 1316 val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET); 1317 1318 for (index = 0; map[index].name ; index++) { 1319 seq_printf(s, "%-32s\tState: %s\n", 1320 map[index].name, 1321 map[index].bit_mask & val ? "Active" : "Idle"); 1322 } 1323 1324out_unlock: 1325 mutex_unlock(&pmcdev->lock); 1326 return err; 1327} 1328DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); 1329 1330static int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value) 1331{ 1332 const struct pmc_reg_map *map = pmcdev->map; 1333 u32 reg; 1334 int err = 0; 1335 1336 mutex_lock(&pmcdev->lock); 1337 1338 if (value > map->ltr_ignore_max) { 1339 err = -EINVAL; 1340 goto out_unlock; 1341 } 1342 1343 reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); 1344 reg |= BIT(value); 1345 pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); 1346 1347out_unlock: 1348 mutex_unlock(&pmcdev->lock); 1349 1350 return err; 1351} 1352 1353static ssize_t pmc_core_ltr_ignore_write(struct file *file, 1354 const char __user *userbuf, 1355 size_t count, loff_t *ppos) 1356{ 1357 struct seq_file *s = file->private_data; 1358 struct pmc_dev *pmcdev = s->private; 1359 u32 buf_size, value; 1360 int err; 1361 1362 buf_size = min_t(u32, count, 64); 1363 1364 err = kstrtou32_from_user(userbuf, buf_size, 10, &value); 1365 if (err) 1366 return err; 1367 1368 err = pmc_core_send_ltr_ignore(pmcdev, value); 1369 1370 return err == 0 ? count : err; 1371} 1372 1373static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused) 1374{ 1375 return 0; 1376} 1377 1378static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file) 1379{ 1380 return single_open(file, pmc_core_ltr_ignore_show, inode->i_private); 1381} 1382 1383static const struct file_operations pmc_core_ltr_ignore_ops = { 1384 .open = pmc_core_ltr_ignore_open, 1385 .read = seq_read, 1386 .write = pmc_core_ltr_ignore_write, 1387 .llseek = seq_lseek, 1388 .release = single_release, 1389}; 1390 1391static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset) 1392{ 1393 const struct pmc_reg_map *map = pmcdev->map; 1394 u32 fd; 1395 1396 mutex_lock(&pmcdev->lock); 1397 1398 if (!reset && !slps0_dbg_latch) 1399 goto out_unlock; 1400 1401 fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset); 1402 if (reset) 1403 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS; 1404 else 1405 fd |= CNP_PMC_LATCH_SLPS0_EVENTS; 1406 pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd); 1407 1408 slps0_dbg_latch = false; 1409 1410out_unlock: 1411 mutex_unlock(&pmcdev->lock); 1412} 1413 1414static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused) 1415{ 1416 struct pmc_dev *pmcdev = s->private; 1417 1418 pmc_core_slps0_dbg_latch(pmcdev, false); 1419 pmc_core_slps0_display(pmcdev, NULL, s); 1420 pmc_core_slps0_dbg_latch(pmcdev, true); 1421 1422 return 0; 1423} 1424DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg); 1425 1426static u32 convert_ltr_scale(u32 val) 1427{ 1428 /* 1429 * As per PCIE specification supporting document 1430 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency 1431 * Tolerance Reporting data payload is encoded in a 1432 * 3 bit scale and 10 bit value fields. Values are 1433 * multiplied by the indicated scale to yield an absolute time 1434 * value, expressible in a range from 1 nanosecond to 1435 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds. 1436 * 1437 * scale encoding is as follows: 1438 * 1439 * ---------------------------------------------- 1440 * |scale factor | Multiplier (ns) | 1441 * ---------------------------------------------- 1442 * | 0 | 1 | 1443 * | 1 | 32 | 1444 * | 2 | 1024 | 1445 * | 3 | 32768 | 1446 * | 4 | 1048576 | 1447 * | 5 | 33554432 | 1448 * | 6 | Invalid | 1449 * | 7 | Invalid | 1450 * ---------------------------------------------- 1451 */ 1452 if (val > 5) { 1453 pr_warn("Invalid LTR scale factor.\n"); 1454 return 0; 1455 } 1456 1457 return 1U << (5 * val); 1458} 1459 1460static int pmc_core_ltr_show(struct seq_file *s, void *unused) 1461{ 1462 struct pmc_dev *pmcdev = s->private; 1463 const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts; 1464 u64 decoded_snoop_ltr, decoded_non_snoop_ltr; 1465 u32 ltr_raw_data, scale, val; 1466 u16 snoop_ltr, nonsnoop_ltr; 1467 int index; 1468 1469 for (index = 0; map[index].name ; index++) { 1470 decoded_snoop_ltr = decoded_non_snoop_ltr = 0; 1471 ltr_raw_data = pmc_core_reg_read(pmcdev, 1472 map[index].bit_mask); 1473 snoop_ltr = ltr_raw_data & ~MTPMC_MASK; 1474 nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK; 1475 1476 if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) { 1477 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr); 1478 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr); 1479 decoded_non_snoop_ltr = val * convert_ltr_scale(scale); 1480 } 1481 1482 if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) { 1483 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr); 1484 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr); 1485 decoded_snoop_ltr = val * convert_ltr_scale(scale); 1486 } 1487 1488 seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n", 1489 map[index].name, ltr_raw_data, 1490 decoded_non_snoop_ltr, 1491 decoded_snoop_ltr); 1492 } 1493 return 0; 1494} 1495DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr); 1496 1497static inline u64 adjust_lpm_residency(struct pmc_dev *pmcdev, u32 offset, 1498 const int lpm_adj_x2) 1499{ 1500 u64 lpm_res = pmc_core_reg_read(pmcdev, offset); 1501 1502 return GET_X2_COUNTER((u64)lpm_adj_x2 * lpm_res); 1503} 1504 1505static int pmc_core_substate_res_show(struct seq_file *s, void *unused) 1506{ 1507 struct pmc_dev *pmcdev = s->private; 1508 const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2; 1509 u32 offset = pmcdev->map->lpm_residency_offset; 1510 int i, mode; 1511 1512 seq_printf(s, "%-10s %-15s\n", "Substate", "Residency"); 1513 1514 pmc_for_each_mode(i, mode, pmcdev) { 1515 seq_printf(s, "%-10s %-15llu\n", pmc_lpm_modes[mode], 1516 adjust_lpm_residency(pmcdev, offset + (4 * mode), lpm_adj_x2)); 1517 } 1518 1519 return 0; 1520} 1521DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res); 1522 1523static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused) 1524{ 1525 struct pmc_dev *pmcdev = s->private; 1526 const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; 1527 u32 offset = pmcdev->map->lpm_status_offset; 1528 1529 pmc_core_lpm_display(pmcdev, NULL, s, offset, "STATUS", maps); 1530 1531 return 0; 1532} 1533DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs); 1534 1535static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) 1536{ 1537 struct pmc_dev *pmcdev = s->private; 1538 const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; 1539 u32 offset = pmcdev->map->lpm_live_status_offset; 1540 1541 pmc_core_lpm_display(pmcdev, NULL, s, offset, "LIVE_STATUS", maps); 1542 1543 return 0; 1544} 1545DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs); 1546 1547static void pmc_core_substate_req_header_show(struct seq_file *s) 1548{ 1549 struct pmc_dev *pmcdev = s->private; 1550 int i, mode; 1551 1552 seq_printf(s, "%30s |", "Element"); 1553 pmc_for_each_mode(i, mode, pmcdev) 1554 seq_printf(s, " %9s |", pmc_lpm_modes[mode]); 1555 1556 seq_printf(s, " %9s |\n", "Status"); 1557} 1558 1559static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) 1560{ 1561 struct pmc_dev *pmcdev = s->private; 1562 const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; 1563 const struct pmc_bit_map *map; 1564 const int num_maps = pmcdev->map->lpm_num_maps; 1565 u32 sts_offset = pmcdev->map->lpm_status_offset; 1566 u32 *lpm_req_regs = pmcdev->lpm_req_regs; 1567 int mp; 1568 1569 /* Display the header */ 1570 pmc_core_substate_req_header_show(s); 1571 1572 /* Loop over maps */ 1573 for (mp = 0; mp < num_maps; mp++) { 1574 u32 req_mask = 0; 1575 u32 lpm_status; 1576 int mode, idx, i, len = 32; 1577 1578 /* 1579 * Capture the requirements and create a mask so that we only 1580 * show an element if it's required for at least one of the 1581 * enabled low power modes 1582 */ 1583 pmc_for_each_mode(idx, mode, pmcdev) 1584 req_mask |= lpm_req_regs[mp + (mode * num_maps)]; 1585 1586 /* Get the last latched status for this map */ 1587 lpm_status = pmc_core_reg_read(pmcdev, sts_offset + (mp * 4)); 1588 1589 /* Loop over elements in this map */ 1590 map = maps[mp]; 1591 for (i = 0; map[i].name && i < len; i++) { 1592 u32 bit_mask = map[i].bit_mask; 1593 1594 if (!(bit_mask & req_mask)) 1595 /* 1596 * Not required for any enabled states 1597 * so don't display 1598 */ 1599 continue; 1600 1601 /* Display the element name in the first column */ 1602 seq_printf(s, "%30s |", map[i].name); 1603 1604 /* Loop over the enabled states and display if required */ 1605 pmc_for_each_mode(idx, mode, pmcdev) { 1606 if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask) 1607 seq_printf(s, " %9s |", 1608 "Required"); 1609 else 1610 seq_printf(s, " %9s |", " "); 1611 } 1612 1613 /* In Status column, show the last captured state of this agent */ 1614 if (lpm_status & bit_mask) 1615 seq_printf(s, " %9s |", "Yes"); 1616 else 1617 seq_printf(s, " %9s |", " "); 1618 1619 seq_puts(s, "\n"); 1620 } 1621 } 1622 1623 return 0; 1624} 1625DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs); 1626 1627static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused) 1628{ 1629 struct pmc_dev *pmcdev = s->private; 1630 bool c10; 1631 u32 reg; 1632 int idx, mode; 1633 1634 reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset); 1635 if (reg & LPM_STS_LATCH_MODE) { 1636 seq_puts(s, "c10"); 1637 c10 = false; 1638 } else { 1639 seq_puts(s, "[c10]"); 1640 c10 = true; 1641 } 1642 1643 pmc_for_each_mode(idx, mode, pmcdev) { 1644 if ((BIT(mode) & reg) && !c10) 1645 seq_printf(s, " [%s]", pmc_lpm_modes[mode]); 1646 else 1647 seq_printf(s, " %s", pmc_lpm_modes[mode]); 1648 } 1649 1650 seq_puts(s, " clear\n"); 1651 1652 return 0; 1653} 1654 1655static ssize_t pmc_core_lpm_latch_mode_write(struct file *file, 1656 const char __user *userbuf, 1657 size_t count, loff_t *ppos) 1658{ 1659 struct seq_file *s = file->private_data; 1660 struct pmc_dev *pmcdev = s->private; 1661 bool clear = false, c10 = false; 1662 unsigned char buf[8]; 1663 int idx, m, mode; 1664 u32 reg; 1665 1666 if (count > sizeof(buf) - 1) 1667 return -EINVAL; 1668 if (copy_from_user(buf, userbuf, count)) 1669 return -EFAULT; 1670 buf[count] = '\0'; 1671 1672 /* 1673 * Allowed strings are: 1674 * Any enabled substate, e.g. 'S0i2.0' 1675 * 'c10' 1676 * 'clear' 1677 */ 1678 mode = sysfs_match_string(pmc_lpm_modes, buf); 1679 1680 /* Check string matches enabled mode */ 1681 pmc_for_each_mode(idx, m, pmcdev) 1682 if (mode == m) 1683 break; 1684 1685 if (mode != m || mode < 0) { 1686 if (sysfs_streq(buf, "clear")) 1687 clear = true; 1688 else if (sysfs_streq(buf, "c10")) 1689 c10 = true; 1690 else 1691 return -EINVAL; 1692 } 1693 1694 if (clear) { 1695 mutex_lock(&pmcdev->lock); 1696 1697 reg = pmc_core_reg_read(pmcdev, pmcdev->map->etr3_offset); 1698 reg |= ETR3_CLEAR_LPM_EVENTS; 1699 pmc_core_reg_write(pmcdev, pmcdev->map->etr3_offset, reg); 1700 1701 mutex_unlock(&pmcdev->lock); 1702 1703 return count; 1704 } 1705 1706 if (c10) { 1707 mutex_lock(&pmcdev->lock); 1708 1709 reg = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_sts_latch_en_offset); 1710 reg &= ~LPM_STS_LATCH_MODE; 1711 pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg); 1712 1713 mutex_unlock(&pmcdev->lock); 1714 1715 return count; 1716 } 1717 1718 /* 1719 * For LPM mode latching we set the latch enable bit and selected mode 1720 * and clear everything else. 1721 */ 1722 reg = LPM_STS_LATCH_MODE | BIT(mode); 1723 mutex_lock(&pmcdev->lock); 1724 pmc_core_reg_write(pmcdev, pmcdev->map->lpm_sts_latch_en_offset, reg); 1725 mutex_unlock(&pmcdev->lock); 1726 1727 return count; 1728} 1729DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode); 1730 1731static int pmc_core_pkgc_show(struct seq_file *s, void *unused) 1732{ 1733 struct pmc_dev *pmcdev = s->private; 1734 const struct pmc_bit_map *map = pmcdev->map->msr_sts; 1735 u64 pcstate_count; 1736 int index; 1737 1738 for (index = 0; map[index].name ; index++) { 1739 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) 1740 continue; 1741 1742 pcstate_count *= 1000; 1743 do_div(pcstate_count, tsc_khz); 1744 seq_printf(s, "%-8s : %llu\n", map[index].name, 1745 pcstate_count); 1746 } 1747 1748 return 0; 1749} 1750DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc); 1751 1752static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order) 1753{ 1754 int i, j; 1755 1756 if (!lpm_pri) 1757 return false; 1758 /* 1759 * Each byte contains the priority level for 2 modes (7:4 and 3:0). 1760 * In a 32 bit register this allows for describing 8 modes. Store the 1761 * levels and look for values out of range. 1762 */ 1763 for (i = 0; i < 8; i++) { 1764 int level = lpm_pri & GENMASK(3, 0); 1765 1766 if (level >= LPM_MAX_NUM_MODES) 1767 return false; 1768 1769 mode_order[i] = level; 1770 lpm_pri >>= 4; 1771 } 1772 1773 /* Check that we have unique values */ 1774 for (i = 0; i < LPM_MAX_NUM_MODES - 1; i++) 1775 for (j = i + 1; j < LPM_MAX_NUM_MODES; j++) 1776 if (mode_order[i] == mode_order[j]) 1777 return false; 1778 1779 return true; 1780} 1781 1782static void pmc_core_get_low_power_modes(struct platform_device *pdev) 1783{ 1784 struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 1785 u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI; 1786 u8 mode_order[LPM_MAX_NUM_MODES]; 1787 u32 lpm_pri; 1788 u32 lpm_en; 1789 int mode, i, p; 1790 1791 /* Use LPM Maps to indicate support for substates */ 1792 if (!pmcdev->map->lpm_num_maps) 1793 return; 1794 1795 lpm_en = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_en_offset); 1796 pmcdev->num_lpm_modes = hweight32(lpm_en); 1797 1798 /* Read 32 bit LPM_PRI register */ 1799 lpm_pri = pmc_core_reg_read(pmcdev, pmcdev->map->lpm_priority_offset); 1800 1801 1802 /* 1803 * If lpm_pri value passes verification, then override the default 1804 * modes here. Otherwise stick with the default. 1805 */ 1806 if (pmc_core_pri_verify(lpm_pri, mode_order)) 1807 /* Get list of modes in priority order */ 1808 for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++) 1809 pri_order[mode_order[mode]] = mode; 1810 else 1811 dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n"); 1812 1813 /* 1814 * Loop through all modes from lowest to highest priority, 1815 * and capture all enabled modes in order 1816 */ 1817 i = 0; 1818 for (p = LPM_MAX_NUM_MODES - 1; p >= 0; p--) { 1819 int mode = pri_order[p]; 1820 1821 if (!(BIT(mode) & lpm_en)) 1822 continue; 1823 1824 pmcdev->lpm_en_modes[i++] = mode; 1825 } 1826} 1827 1828static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) 1829{ 1830 debugfs_remove_recursive(pmcdev->dbgfs_dir); 1831} 1832 1833static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) 1834{ 1835 struct dentry *dir; 1836 1837 dir = debugfs_create_dir("pmc_core", NULL); 1838 pmcdev->dbgfs_dir = dir; 1839 1840 debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, 1841 &pmc_core_dev_state); 1842 1843 if (pmcdev->map->pfear_sts) 1844 debugfs_create_file("pch_ip_power_gating_status", 0444, dir, 1845 pmcdev, &pmc_core_ppfear_fops); 1846 1847 debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, 1848 &pmc_core_ltr_ignore_ops); 1849 1850 debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops); 1851 1852 debugfs_create_file("package_cstate_show", 0444, dir, pmcdev, 1853 &pmc_core_pkgc_fops); 1854 1855 if (pmcdev->map->pll_sts) 1856 debugfs_create_file("pll_status", 0444, dir, pmcdev, 1857 &pmc_core_pll_fops); 1858 1859 if (pmcdev->map->mphy_sts) 1860 debugfs_create_file("mphy_core_lanes_power_gating_status", 1861 0444, dir, pmcdev, 1862 &pmc_core_mphy_pg_fops); 1863 1864 if (pmcdev->map->slps0_dbg_maps) { 1865 debugfs_create_file("slp_s0_debug_status", 0444, 1866 dir, pmcdev, 1867 &pmc_core_slps0_dbg_fops); 1868 1869 debugfs_create_bool("slp_s0_dbg_latch", 0644, 1870 dir, &slps0_dbg_latch); 1871 } 1872 1873 if (pmcdev->map->lpm_en_offset) { 1874 debugfs_create_file("substate_residencies", 0444, 1875 pmcdev->dbgfs_dir, pmcdev, 1876 &pmc_core_substate_res_fops); 1877 } 1878 1879 if (pmcdev->map->lpm_status_offset) { 1880 debugfs_create_file("substate_status_registers", 0444, 1881 pmcdev->dbgfs_dir, pmcdev, 1882 &pmc_core_substate_sts_regs_fops); 1883 debugfs_create_file("substate_live_status_registers", 0444, 1884 pmcdev->dbgfs_dir, pmcdev, 1885 &pmc_core_substate_l_sts_regs_fops); 1886 debugfs_create_file("lpm_latch_mode", 0644, 1887 pmcdev->dbgfs_dir, pmcdev, 1888 &pmc_core_lpm_latch_mode_fops); 1889 } 1890 1891 if (pmcdev->lpm_req_regs) { 1892 debugfs_create_file("substate_requirements", 0444, 1893 pmcdev->dbgfs_dir, pmcdev, 1894 &pmc_core_substate_req_regs_fops); 1895 } 1896} 1897 1898static const struct x86_cpu_id intel_pmc_core_ids[] = { 1899 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map), 1900 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map), 1901 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map), 1902 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map), 1903 X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map), 1904 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map), 1905 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map), 1906 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map), 1907 X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map), 1908 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map), 1909 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map), 1910 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map), 1911 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map), 1912 X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map), 1913 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map), 1914 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &tgl_reg_map), 1915 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), 1916 X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), 1917 {} 1918}; 1919 1920MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); 1921 1922static const struct pci_device_id pmc_pci_ids[] = { 1923 { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, 1924 { } 1925}; 1926 1927/* 1928 * This quirk can be used on those platforms where 1929 * the platform BIOS enforces 24Mhz crystal to shutdown 1930 * before PMC can assert SLP_S0#. 1931 */ 1932static bool xtal_ignore; 1933static int quirk_xtal_ignore(const struct dmi_system_id *id) 1934{ 1935 xtal_ignore = true; 1936 return 0; 1937} 1938 1939static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev) 1940{ 1941 u32 value; 1942 1943 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); 1944 /* 24MHz Crystal Shutdown Qualification Disable */ 1945 value |= SPT_PMC_VRIC1_XTALSDQDIS; 1946 /* Low Voltage Mode Enable */ 1947 value &= ~SPT_PMC_VRIC1_SLPS0LVEN; 1948 pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); 1949} 1950 1951static const struct dmi_system_id pmc_core_dmi_table[] = { 1952 { 1953 .callback = quirk_xtal_ignore, 1954 .ident = "HP Elite x2 1013 G3", 1955 .matches = { 1956 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1957 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"), 1958 }, 1959 }, 1960 {} 1961}; 1962 1963static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev) 1964{ 1965 dmi_check_system(pmc_core_dmi_table); 1966 1967 if (xtal_ignore) 1968 pmc_core_xtal_ignore(pmcdev); 1969} 1970 1971static int pmc_core_probe(struct platform_device *pdev) 1972{ 1973 static bool device_initialized; 1974 struct pmc_dev *pmcdev; 1975 const struct x86_cpu_id *cpu_id; 1976 u64 slp_s0_addr; 1977 1978 if (device_initialized) 1979 return -ENODEV; 1980 1981 pmcdev = devm_kzalloc(&pdev->dev, sizeof(*pmcdev), GFP_KERNEL); 1982 if (!pmcdev) 1983 return -ENOMEM; 1984 1985 platform_set_drvdata(pdev, pmcdev); 1986 1987 cpu_id = x86_match_cpu(intel_pmc_core_ids); 1988 if (!cpu_id) 1989 return -ENODEV; 1990 1991 pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data; 1992 1993 /* 1994 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here 1995 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap 1996 * in this case. 1997 */ 1998 if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) 1999 pmcdev->map = &cnp_reg_map; 2000 2001 if (lpit_read_residency_count_address(&slp_s0_addr)) { 2002 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT; 2003 2004 if (page_is_ram(PHYS_PFN(pmcdev->base_addr))) 2005 return -ENODEV; 2006 } else { 2007 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; 2008 } 2009 2010 pmcdev->regbase = ioremap(pmcdev->base_addr, 2011 pmcdev->map->regmap_length); 2012 if (!pmcdev->regbase) 2013 return -ENOMEM; 2014 2015 mutex_init(&pmcdev->lock); 2016 2017 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(pmcdev); 2018 pmc_core_get_low_power_modes(pdev); 2019 pmc_core_do_dmi_quirks(pmcdev); 2020 2021 if (pmcdev->map == &tgl_reg_map) 2022 pmc_core_get_tgl_lpm_reqs(pdev); 2023 2024 /* 2025 * On TGL and ADL, due to a hardware limitation, the GBE LTR blocks PC10 2026 * when a cable is attached. Tell the PMC to ignore it. 2027 */ 2028 if (pmcdev->map == &tgl_reg_map || pmcdev->map == &adl_reg_map) { 2029 dev_dbg(&pdev->dev, "ignoring GBE LTR\n"); 2030 pmc_core_send_ltr_ignore(pmcdev, 3); 2031 } 2032 2033 pmc_core_dbgfs_register(pmcdev); 2034 2035 device_initialized = true; 2036 dev_info(&pdev->dev, " initialized\n"); 2037 2038 return 0; 2039} 2040 2041static int pmc_core_remove(struct platform_device *pdev) 2042{ 2043 struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 2044 2045 pmc_core_dbgfs_unregister(pmcdev); 2046 platform_set_drvdata(pdev, NULL); 2047 mutex_destroy(&pmcdev->lock); 2048 iounmap(pmcdev->regbase); 2049 return 0; 2050} 2051 2052static bool warn_on_s0ix_failures; 2053module_param(warn_on_s0ix_failures, bool, 0644); 2054MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); 2055 2056static __maybe_unused int pmc_core_suspend(struct device *dev) 2057{ 2058 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 2059 2060 pmcdev->check_counters = false; 2061 2062 /* No warnings on S0ix failures */ 2063 if (!warn_on_s0ix_failures) 2064 return 0; 2065 2066 /* Check if the syspend will actually use S0ix */ 2067 if (pm_suspend_via_firmware()) 2068 return 0; 2069 2070 /* Save PC10 residency for checking later */ 2071 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter)) 2072 return -EIO; 2073 2074 /* Save S0ix residency for checking later */ 2075 if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter)) 2076 return -EIO; 2077 2078 pmcdev->check_counters = true; 2079 return 0; 2080} 2081 2082static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev) 2083{ 2084 u64 pc10_counter; 2085 2086 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter)) 2087 return false; 2088 2089 if (pc10_counter == pmcdev->pc10_counter) 2090 return true; 2091 2092 return false; 2093} 2094 2095static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) 2096{ 2097 u64 s0ix_counter; 2098 2099 if (pmc_core_dev_state_get(pmcdev, &s0ix_counter)) 2100 return false; 2101 2102 if (s0ix_counter == pmcdev->s0ix_counter) 2103 return true; 2104 2105 return false; 2106} 2107 2108static __maybe_unused int pmc_core_resume(struct device *dev) 2109{ 2110 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 2111 const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; 2112 int offset = pmcdev->map->lpm_status_offset; 2113 2114 if (!pmcdev->check_counters) 2115 return 0; 2116 2117 if (!pmc_core_is_s0ix_failed(pmcdev)) 2118 return 0; 2119 2120 if (pmc_core_is_pc10_failed(pmcdev)) { 2121 /* S0ix failed because of PC10 entry failure */ 2122 dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n", 2123 pmcdev->pc10_counter); 2124 return 0; 2125 } 2126 2127 /* The real interesting case - S0ix failed - lets ask PMC why. */ 2128 dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n", 2129 pmcdev->s0ix_counter); 2130 if (pmcdev->map->slps0_dbg_maps) 2131 pmc_core_slps0_display(pmcdev, dev, NULL); 2132 if (pmcdev->map->lpm_sts) 2133 pmc_core_lpm_display(pmcdev, dev, NULL, offset, "STATUS", maps); 2134 2135 return 0; 2136} 2137 2138static const struct dev_pm_ops pmc_core_pm_ops = { 2139 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume) 2140}; 2141 2142static const struct acpi_device_id pmc_core_acpi_ids[] = { 2143 {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/ 2144 { } 2145}; 2146MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids); 2147 2148static struct platform_driver pmc_core_driver = { 2149 .driver = { 2150 .name = "intel_pmc_core", 2151 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids), 2152 .pm = &pmc_core_pm_ops, 2153 .dev_groups = pmc_dev_groups, 2154 }, 2155 .probe = pmc_core_probe, 2156 .remove = pmc_core_remove, 2157}; 2158 2159module_platform_driver(pmc_core_driver); 2160 2161MODULE_LICENSE("GPL v2"); 2162MODULE_DESCRIPTION("Intel PMC Core Driver");