vsec.c (10408B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Intel Vendor Specific Extended Capabilities auxiliary bus driver 4 * 5 * Copyright (c) 2021, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * Author: David E. Box <david.e.box@linux.intel.com> 9 * 10 * This driver discovers and creates auxiliary devices for Intel defined PCIe 11 * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities, 12 * VSEC and DVSEC respectively. The driver supports features on specific PCIe 13 * endpoints that exist primarily to expose them. 14 */ 15 16#include <linux/auxiliary_bus.h> 17#include <linux/bits.h> 18#include <linux/kernel.h> 19#include <linux/idr.h> 20#include <linux/module.h> 21#include <linux/pci.h> 22#include <linux/types.h> 23 24#include "vsec.h" 25 26/* Intel DVSEC offsets */ 27#define INTEL_DVSEC_ENTRIES 0xA 28#define INTEL_DVSEC_SIZE 0xB 29#define INTEL_DVSEC_TABLE 0xC 30#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) 31#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) 32#define TABLE_OFFSET_SHIFT 3 33 34static DEFINE_IDA(intel_vsec_ida); 35static DEFINE_IDA(intel_vsec_sdsi_ida); 36 37/** 38 * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. 39 * @rev: Revision ID of the VSEC/DVSEC register space 40 * @length: Length of the VSEC/DVSEC register space 41 * @id: ID of the feature 42 * @num_entries: Number of instances of the feature 43 * @entry_size: Size of the discovery table for each feature 44 * @tbir: BAR containing the discovery tables 45 * @offset: BAR offset of start of the first discovery table 46 */ 47struct intel_vsec_header { 48 u8 rev; 49 u16 length; 50 u16 id; 51 u8 num_entries; 52 u8 entry_size; 53 u8 tbir; 54 u32 offset; 55}; 56 57/* Platform specific data */ 58struct intel_vsec_platform_info { 59 struct intel_vsec_header **capabilities; 60 unsigned long quirks; 61}; 62 63enum intel_vsec_id { 64 VSEC_ID_TELEMETRY = 2, 65 VSEC_ID_WATCHER = 3, 66 VSEC_ID_CRASHLOG = 4, 67 VSEC_ID_SDSI = 65, 68}; 69 70static enum intel_vsec_id intel_vsec_allow_list[] = { 71 VSEC_ID_TELEMETRY, 72 VSEC_ID_WATCHER, 73 VSEC_ID_CRASHLOG, 74 VSEC_ID_SDSI, 75}; 76 77static const char *intel_vsec_name(enum intel_vsec_id id) 78{ 79 switch (id) { 80 case VSEC_ID_TELEMETRY: 81 return "telemetry"; 82 83 case VSEC_ID_WATCHER: 84 return "watcher"; 85 86 case VSEC_ID_CRASHLOG: 87 return "crashlog"; 88 89 case VSEC_ID_SDSI: 90 return "sdsi"; 91 92 default: 93 return NULL; 94 } 95} 96 97static bool intel_vsec_allowed(u16 id) 98{ 99 int i; 100 101 for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++) 102 if (intel_vsec_allow_list[i] == id) 103 return true; 104 105 return false; 106} 107 108static bool intel_vsec_disabled(u16 id, unsigned long quirks) 109{ 110 switch (id) { 111 case VSEC_ID_WATCHER: 112 return !!(quirks & VSEC_QUIRK_NO_WATCHER); 113 114 case VSEC_ID_CRASHLOG: 115 return !!(quirks & VSEC_QUIRK_NO_CRASHLOG); 116 117 default: 118 return false; 119 } 120} 121 122static void intel_vsec_remove_aux(void *data) 123{ 124 auxiliary_device_delete(data); 125 auxiliary_device_uninit(data); 126} 127 128static void intel_vsec_dev_release(struct device *dev) 129{ 130 struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev); 131 132 ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id); 133 kfree(intel_vsec_dev->resource); 134 kfree(intel_vsec_dev); 135} 136 137static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev, 138 const char *name) 139{ 140 struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; 141 int ret; 142 143 ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); 144 if (ret < 0) { 145 kfree(intel_vsec_dev); 146 return ret; 147 } 148 149 auxdev->id = ret; 150 auxdev->name = name; 151 auxdev->dev.parent = &pdev->dev; 152 auxdev->dev.release = intel_vsec_dev_release; 153 154 ret = auxiliary_device_init(auxdev); 155 if (ret < 0) { 156 ida_free(intel_vsec_dev->ida, auxdev->id); 157 kfree(intel_vsec_dev->resource); 158 kfree(intel_vsec_dev); 159 return ret; 160 } 161 162 ret = auxiliary_device_add(auxdev); 163 if (ret < 0) { 164 auxiliary_device_uninit(auxdev); 165 return ret; 166 } 167 168 return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev); 169} 170 171static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, 172 unsigned long quirks) 173{ 174 struct intel_vsec_device *intel_vsec_dev; 175 struct resource *res, *tmp; 176 int i; 177 178 if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks)) 179 return -EINVAL; 180 181 if (!header->num_entries) { 182 dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id); 183 return -EINVAL; 184 } 185 186 if (!header->entry_size) { 187 dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id); 188 return -EINVAL; 189 } 190 191 intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL); 192 if (!intel_vsec_dev) 193 return -ENOMEM; 194 195 res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL); 196 if (!res) { 197 kfree(intel_vsec_dev); 198 return -ENOMEM; 199 } 200 201 if (quirks & VSEC_QUIRK_TABLE_SHIFT) 202 header->offset >>= TABLE_OFFSET_SHIFT; 203 204 /* 205 * The DVSEC/VSEC contains the starting offset and count for a block of 206 * discovery tables. Create a resource array of these tables to the 207 * auxiliary device driver. 208 */ 209 for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { 210 tmp->start = pdev->resource[header->tbir].start + 211 header->offset + i * (header->entry_size * sizeof(u32)); 212 tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; 213 tmp->flags = IORESOURCE_MEM; 214 } 215 216 intel_vsec_dev->pcidev = pdev; 217 intel_vsec_dev->resource = res; 218 intel_vsec_dev->num_resources = header->num_entries; 219 intel_vsec_dev->quirks = quirks; 220 221 if (header->id == VSEC_ID_SDSI) 222 intel_vsec_dev->ida = &intel_vsec_sdsi_ida; 223 else 224 intel_vsec_dev->ida = &intel_vsec_ida; 225 226 return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id)); 227} 228 229static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks, 230 struct intel_vsec_header **header) 231{ 232 bool have_devices = false; 233 int ret; 234 235 for ( ; *header; header++) { 236 ret = intel_vsec_add_dev(pdev, *header, quirks); 237 if (ret) 238 dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n", 239 (*header)->id); 240 else 241 have_devices = true; 242 } 243 244 return have_devices; 245} 246 247static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks) 248{ 249 bool have_devices = false; 250 int pos = 0; 251 252 do { 253 struct intel_vsec_header header; 254 u32 table, hdr; 255 u16 vid; 256 int ret; 257 258 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC); 259 if (!pos) 260 break; 261 262 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr); 263 vid = PCI_DVSEC_HEADER1_VID(hdr); 264 if (vid != PCI_VENDOR_ID_INTEL) 265 continue; 266 267 /* Support only revision 1 */ 268 header.rev = PCI_DVSEC_HEADER1_REV(hdr); 269 if (header.rev != 1) { 270 dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev); 271 continue; 272 } 273 274 header.length = PCI_DVSEC_HEADER1_LEN(hdr); 275 276 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries); 277 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size); 278 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table); 279 280 header.tbir = INTEL_DVSEC_TABLE_BAR(table); 281 header.offset = INTEL_DVSEC_TABLE_OFFSET(table); 282 283 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr); 284 header.id = PCI_DVSEC_HEADER2_ID(hdr); 285 286 ret = intel_vsec_add_dev(pdev, &header, quirks); 287 if (ret) 288 continue; 289 290 have_devices = true; 291 } while (true); 292 293 return have_devices; 294} 295 296static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks) 297{ 298 bool have_devices = false; 299 int pos = 0; 300 301 do { 302 struct intel_vsec_header header; 303 u32 table, hdr; 304 int ret; 305 306 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR); 307 if (!pos) 308 break; 309 310 pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr); 311 312 /* Support only revision 1 */ 313 header.rev = PCI_VNDR_HEADER_REV(hdr); 314 if (header.rev != 1) { 315 dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev); 316 continue; 317 } 318 319 header.id = PCI_VNDR_HEADER_ID(hdr); 320 header.length = PCI_VNDR_HEADER_LEN(hdr); 321 322 /* entry, size, and table offset are the same as DVSEC */ 323 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries); 324 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size); 325 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table); 326 327 header.tbir = INTEL_DVSEC_TABLE_BAR(table); 328 header.offset = INTEL_DVSEC_TABLE_OFFSET(table); 329 330 ret = intel_vsec_add_dev(pdev, &header, quirks); 331 if (ret) 332 continue; 333 334 have_devices = true; 335 } while (true); 336 337 return have_devices; 338} 339 340static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 341{ 342 struct intel_vsec_platform_info *info; 343 bool have_devices = false; 344 unsigned long quirks = 0; 345 int ret; 346 347 ret = pcim_enable_device(pdev); 348 if (ret) 349 return ret; 350 351 info = (struct intel_vsec_platform_info *)id->driver_data; 352 if (info) 353 quirks = info->quirks; 354 355 if (intel_vsec_walk_dvsec(pdev, quirks)) 356 have_devices = true; 357 358 if (intel_vsec_walk_vsec(pdev, quirks)) 359 have_devices = true; 360 361 if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && 362 intel_vsec_walk_header(pdev, quirks, info->capabilities)) 363 have_devices = true; 364 365 if (!have_devices) 366 return -ENODEV; 367 368 return 0; 369} 370 371/* TGL info */ 372static const struct intel_vsec_platform_info tgl_info = { 373 .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT, 374}; 375 376/* DG1 info */ 377static struct intel_vsec_header dg1_telemetry = { 378 .length = 0x10, 379 .id = 2, 380 .num_entries = 1, 381 .entry_size = 3, 382 .tbir = 0, 383 .offset = 0x466000, 384}; 385 386static struct intel_vsec_header *dg1_capabilities[] = { 387 &dg1_telemetry, 388 NULL 389}; 390 391static const struct intel_vsec_platform_info dg1_info = { 392 .capabilities = dg1_capabilities, 393 .quirks = VSEC_QUIRK_NO_DVSEC, 394}; 395 396#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d 397#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e 398#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 399#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d 400static const struct pci_device_id intel_vsec_pci_ids[] = { 401 { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, 402 { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, 403 { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) }, 404 { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, 405 { } 406}; 407MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); 408 409static struct pci_driver intel_vsec_pci_driver = { 410 .name = "intel_vsec", 411 .id_table = intel_vsec_pci_ids, 412 .probe = intel_vsec_pci_probe, 413}; 414module_pci_driver(intel_vsec_pci_driver); 415 416MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); 417MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver"); 418MODULE_LICENSE("GPL v2");