processor_thermal_device_pci.c (10934B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Processor thermal device for newer processors 4 * Copyright (c) 2020, Intel Corporation. 5 */ 6 7#include <linux/acpi.h> 8#include <linux/kernel.h> 9#include <linux/module.h> 10#include <linux/pci.h> 11#include <linux/thermal.h> 12 13#include "int340x_thermal_zone.h" 14#include "processor_thermal_device.h" 15 16#define DRV_NAME "proc_thermal_pci" 17 18struct proc_thermal_pci { 19 struct pci_dev *pdev; 20 struct proc_thermal_device *proc_priv; 21 struct thermal_zone_device *tzone; 22 struct delayed_work work; 23 int stored_thres; 24 int no_legacy; 25}; 26 27enum proc_thermal_mmio_type { 28 PROC_THERMAL_MMIO_TJMAX, 29 PROC_THERMAL_MMIO_PP0_TEMP, 30 PROC_THERMAL_MMIO_PP1_TEMP, 31 PROC_THERMAL_MMIO_PKG_TEMP, 32 PROC_THERMAL_MMIO_THRES_0, 33 PROC_THERMAL_MMIO_THRES_1, 34 PROC_THERMAL_MMIO_INT_ENABLE_0, 35 PROC_THERMAL_MMIO_INT_ENABLE_1, 36 PROC_THERMAL_MMIO_INT_STATUS_0, 37 PROC_THERMAL_MMIO_INT_STATUS_1, 38 PROC_THERMAL_MMIO_MAX 39}; 40 41struct proc_thermal_mmio_info { 42 enum proc_thermal_mmio_type mmio_type; 43 u64 mmio_addr; 44 u64 shift; 45 u64 mask; 46}; 47 48static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = { 49 { PROC_THERMAL_MMIO_TJMAX, 0x599c, 16, 0xff }, 50 { PROC_THERMAL_MMIO_PP0_TEMP, 0x597c, 0, 0xff }, 51 { PROC_THERMAL_MMIO_PP1_TEMP, 0x5980, 0, 0xff }, 52 { PROC_THERMAL_MMIO_PKG_TEMP, 0x5978, 0, 0xff }, 53 { PROC_THERMAL_MMIO_THRES_0, 0x5820, 8, 0x7F }, 54 { PROC_THERMAL_MMIO_THRES_1, 0x5820, 16, 0x7F }, 55 { PROC_THERMAL_MMIO_INT_ENABLE_0, 0x5820, 15, 0x01 }, 56 { PROC_THERMAL_MMIO_INT_ENABLE_1, 0x5820, 23, 0x01 }, 57 { PROC_THERMAL_MMIO_INT_STATUS_0, 0x7200, 6, 0x01 }, 58 { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 }, 59}; 60 61#define B0D4_THERMAL_NOTIFY_DELAY 1000 62static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY; 63 64static void proc_thermal_mmio_read(struct proc_thermal_pci *pci_info, 65 enum proc_thermal_mmio_type type, 66 u32 *value) 67{ 68 *value = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base + 69 proc_thermal_mmio_info[type].mmio_addr)); 70 *value >>= proc_thermal_mmio_info[type].shift; 71 *value &= proc_thermal_mmio_info[type].mask; 72} 73 74static void proc_thermal_mmio_write(struct proc_thermal_pci *pci_info, 75 enum proc_thermal_mmio_type type, 76 u32 value) 77{ 78 u32 current_val; 79 u32 mask; 80 81 current_val = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base + 82 proc_thermal_mmio_info[type].mmio_addr)); 83 mask = proc_thermal_mmio_info[type].mask << proc_thermal_mmio_info[type].shift; 84 current_val &= ~mask; 85 86 value &= proc_thermal_mmio_info[type].mask; 87 value <<= proc_thermal_mmio_info[type].shift; 88 89 current_val |= value; 90 iowrite32(current_val, ((u8 __iomem *)pci_info->proc_priv->mmio_base + 91 proc_thermal_mmio_info[type].mmio_addr)); 92} 93 94/* 95 * To avoid sending two many messages to user space, we have 1 second delay. 96 * On interrupt we are disabling interrupt and enabling after 1 second. 97 * This workload function is delayed by 1 second. 98 */ 99static void proc_thermal_threshold_work_fn(struct work_struct *work) 100{ 101 struct delayed_work *delayed_work = to_delayed_work(work); 102 struct proc_thermal_pci *pci_info = container_of(delayed_work, 103 struct proc_thermal_pci, work); 104 struct thermal_zone_device *tzone = pci_info->tzone; 105 106 if (tzone) 107 thermal_zone_device_update(tzone, THERMAL_TRIP_VIOLATED); 108 109 /* Enable interrupt flag */ 110 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); 111} 112 113static void pkg_thermal_schedule_work(struct delayed_work *work) 114{ 115 unsigned long ms = msecs_to_jiffies(notify_delay_ms); 116 117 schedule_delayed_work(work, ms); 118} 119 120static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) 121{ 122 struct proc_thermal_pci *pci_info = devid; 123 u32 status; 124 125 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status); 126 127 /* Disable enable interrupt flag */ 128 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); 129 pci_write_config_byte(pci_info->pdev, 0xdc, 0x01); 130 131 pkg_thermal_schedule_work(&pci_info->work); 132 133 return IRQ_HANDLED; 134} 135 136static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) 137{ 138 struct proc_thermal_pci *pci_info = tzd->devdata; 139 u32 _temp; 140 141 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_PKG_TEMP, &_temp); 142 *temp = (unsigned long)_temp * 1000; 143 144 return 0; 145} 146 147static int sys_get_trip_temp(struct thermal_zone_device *tzd, 148 int trip, int *temp) 149{ 150 struct proc_thermal_pci *pci_info = tzd->devdata; 151 u32 _temp; 152 153 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &_temp); 154 if (!_temp) { 155 *temp = THERMAL_TEMP_INVALID; 156 } else { 157 int tjmax; 158 159 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax); 160 _temp = tjmax - _temp; 161 *temp = (unsigned long)_temp * 1000; 162 } 163 164 return 0; 165} 166 167static int sys_get_trip_type(struct thermal_zone_device *tzd, int trip, 168 enum thermal_trip_type *type) 169{ 170 *type = THERMAL_TRIP_PASSIVE; 171 172 return 0; 173} 174 175static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) 176{ 177 struct proc_thermal_pci *pci_info = tzd->devdata; 178 int tjmax, _temp; 179 180 if (temp <= 0) { 181 cancel_delayed_work_sync(&pci_info->work); 182 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); 183 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); 184 thermal_zone_device_disable(tzd); 185 pci_info->stored_thres = 0; 186 return 0; 187 } 188 189 proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax); 190 _temp = tjmax - (temp / 1000); 191 if (_temp < 0) 192 return -EINVAL; 193 194 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp); 195 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); 196 197 thermal_zone_device_enable(tzd); 198 pci_info->stored_thres = temp; 199 200 return 0; 201} 202 203static struct thermal_zone_device_ops tzone_ops = { 204 .get_temp = sys_get_curr_temp, 205 .get_trip_temp = sys_get_trip_temp, 206 .get_trip_type = sys_get_trip_type, 207 .set_trip_temp = sys_set_trip_temp, 208}; 209 210static struct thermal_zone_params tzone_params = { 211 .governor_name = "user_space", 212 .no_hwmon = true, 213}; 214 215static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 216{ 217 struct proc_thermal_device *proc_priv; 218 struct proc_thermal_pci *pci_info; 219 int irq_flag = 0, irq, ret; 220 221 proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); 222 if (!proc_priv) 223 return -ENOMEM; 224 225 pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL); 226 if (!pci_info) 227 return -ENOMEM; 228 229 pci_info->pdev = pdev; 230 ret = pcim_enable_device(pdev); 231 if (ret < 0) { 232 dev_err(&pdev->dev, "error: could not enable device\n"); 233 return ret; 234 } 235 236 pci_set_master(pdev); 237 238 INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn); 239 240 ret = proc_thermal_add(&pdev->dev, proc_priv); 241 if (ret) { 242 dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n"); 243 pci_info->no_legacy = 1; 244 } 245 246 proc_priv->priv_data = pci_info; 247 pci_info->proc_priv = proc_priv; 248 pci_set_drvdata(pdev, proc_priv); 249 250 ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data); 251 if (ret) 252 goto err_ret_thermal; 253 254 pci_info->tzone = thermal_zone_device_register("TCPU_PCI", 1, 1, pci_info, 255 &tzone_ops, 256 &tzone_params, 0, 0); 257 if (IS_ERR(pci_info->tzone)) { 258 ret = PTR_ERR(pci_info->tzone); 259 goto err_ret_mmio; 260 } 261 262 /* request and enable interrupt */ 263 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 264 if (ret < 0) { 265 dev_err(&pdev->dev, "Failed to allocate vectors!\n"); 266 goto err_ret_tzone; 267 } 268 if (!pdev->msi_enabled && !pdev->msix_enabled) 269 irq_flag = IRQF_SHARED; 270 271 irq = pci_irq_vector(pdev, 0); 272 ret = devm_request_threaded_irq(&pdev->dev, irq, 273 proc_thermal_irq_handler, NULL, 274 irq_flag, KBUILD_MODNAME, pci_info); 275 if (ret) { 276 dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); 277 goto err_free_vectors; 278 } 279 280 return 0; 281 282err_free_vectors: 283 pci_free_irq_vectors(pdev); 284err_ret_tzone: 285 thermal_zone_device_unregister(pci_info->tzone); 286err_ret_mmio: 287 proc_thermal_mmio_remove(pdev, proc_priv); 288err_ret_thermal: 289 if (!pci_info->no_legacy) 290 proc_thermal_remove(proc_priv); 291 pci_disable_device(pdev); 292 293 return ret; 294} 295 296static void proc_thermal_pci_remove(struct pci_dev *pdev) 297{ 298 struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev); 299 struct proc_thermal_pci *pci_info = proc_priv->priv_data; 300 301 cancel_delayed_work_sync(&pci_info->work); 302 303 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); 304 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); 305 306 devm_free_irq(&pdev->dev, pdev->irq, pci_info); 307 pci_free_irq_vectors(pdev); 308 309 thermal_zone_device_unregister(pci_info->tzone); 310 proc_thermal_mmio_remove(pdev, pci_info->proc_priv); 311 if (!pci_info->no_legacy) 312 proc_thermal_remove(proc_priv); 313 pci_disable_device(pdev); 314} 315 316#ifdef CONFIG_PM_SLEEP 317static int proc_thermal_pci_suspend(struct device *dev) 318{ 319 struct pci_dev *pdev = to_pci_dev(dev); 320 struct proc_thermal_device *proc_priv; 321 struct proc_thermal_pci *pci_info; 322 323 proc_priv = pci_get_drvdata(pdev); 324 pci_info = proc_priv->priv_data; 325 326 if (!pci_info->no_legacy) 327 return proc_thermal_suspend(dev); 328 329 return 0; 330} 331static int proc_thermal_pci_resume(struct device *dev) 332{ 333 struct pci_dev *pdev = to_pci_dev(dev); 334 struct proc_thermal_device *proc_priv; 335 struct proc_thermal_pci *pci_info; 336 337 proc_priv = pci_get_drvdata(pdev); 338 pci_info = proc_priv->priv_data; 339 340 if (pci_info->stored_thres) { 341 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 342 pci_info->stored_thres / 1000); 343 proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1); 344 } 345 346 if (!pci_info->no_legacy) 347 return proc_thermal_resume(dev); 348 349 return 0; 350} 351#else 352#define proc_thermal_pci_suspend NULL 353#define proc_thermal_pci_resume NULL 354#endif 355 356static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, 357 proc_thermal_pci_resume); 358 359static const struct pci_device_id proc_thermal_pci_ids[] = { 360 { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, 361 { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, 362 { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, 363 { }, 364}; 365 366MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); 367 368static struct pci_driver proc_thermal_pci_driver = { 369 .name = DRV_NAME, 370 .probe = proc_thermal_pci_probe, 371 .remove = proc_thermal_pci_remove, 372 .id_table = proc_thermal_pci_ids, 373 .driver.pm = &proc_thermal_pci_pm, 374}; 375 376static int __init proc_thermal_init(void) 377{ 378 return pci_register_driver(&proc_thermal_pci_driver); 379} 380 381static void __exit proc_thermal_exit(void) 382{ 383 pci_unregister_driver(&proc_thermal_pci_driver); 384} 385 386module_init(proc_thermal_init); 387module_exit(proc_thermal_exit); 388 389MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); 390MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); 391MODULE_LICENSE("GPL v2");