amdgpu_debugfs.c (46798B)
1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26#include <linux/kthread.h> 27#include <linux/pci.h> 28#include <linux/uaccess.h> 29#include <linux/pm_runtime.h> 30 31#include "amdgpu.h" 32#include "amdgpu_pm.h" 33#include "amdgpu_dm_debugfs.h" 34#include "amdgpu_ras.h" 35#include "amdgpu_rap.h" 36#include "amdgpu_securedisplay.h" 37#include "amdgpu_fw_attestation.h" 38#include "amdgpu_umr.h" 39 40#include "amdgpu_reset.h" 41#include "amdgpu_psp_ta.h" 42 43#if defined(CONFIG_DEBUG_FS) 44 45/** 46 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes 47 * 48 * @read: True if reading 49 * @f: open file handle 50 * @buf: User buffer to write/read to 51 * @size: Number of bytes to write/read 52 * @pos: Offset to seek to 53 * 54 * This debugfs entry has special meaning on the offset being sought. 55 * Various bits have different meanings: 56 * 57 * Bit 62: Indicates a GRBM bank switch is needed 58 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is 59 * zero) 60 * Bits 24..33: The SE or ME selector if needed 61 * Bits 34..43: The SH (or SA) or PIPE selector if needed 62 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed 63 * 64 * Bit 23: Indicates that the PM power gating lock should be held 65 * This is necessary to read registers that might be 66 * unreliable during a power gating transistion. 67 * 68 * The lower bits are the BYTE offset of the register to read. This 69 * allows reading multiple registers in a single call and having 70 * the returned size reflect that. 71 */ 72static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 73 char __user *buf, size_t size, loff_t *pos) 74{ 75 struct amdgpu_device *adev = file_inode(f)->i_private; 76 ssize_t result = 0; 77 int r; 78 bool pm_pg_lock, use_bank, use_ring; 79 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; 80 81 pm_pg_lock = use_bank = use_ring = false; 82 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; 83 84 if (size & 0x3 || *pos & 0x3 || 85 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) 86 return -EINVAL; 87 88 /* are we reading registers for which a PG lock is necessary? */ 89 pm_pg_lock = (*pos >> 23) & 1; 90 91 if (*pos & (1ULL << 62)) { 92 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; 93 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; 94 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; 95 96 if (se_bank == 0x3FF) 97 se_bank = 0xFFFFFFFF; 98 if (sh_bank == 0x3FF) 99 sh_bank = 0xFFFFFFFF; 100 if (instance_bank == 0x3FF) 101 instance_bank = 0xFFFFFFFF; 102 use_bank = true; 103 } else if (*pos & (1ULL << 61)) { 104 105 me = (*pos & GENMASK_ULL(33, 24)) >> 24; 106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; 107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; 109 110 use_ring = true; 111 } else { 112 use_bank = use_ring = false; 113 } 114 115 *pos &= (1UL << 22) - 1; 116 117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 118 if (r < 0) { 119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 120 return r; 121 } 122 123 r = amdgpu_virt_enable_access_debugfs(adev); 124 if (r < 0) { 125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 126 return r; 127 } 128 129 if (use_bank) { 130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { 132 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 134 amdgpu_virt_disable_access_debugfs(adev); 135 return -EINVAL; 136 } 137 mutex_lock(&adev->grbm_idx_mutex); 138 amdgpu_gfx_select_se_sh(adev, se_bank, 139 sh_bank, instance_bank); 140 } else if (use_ring) { 141 mutex_lock(&adev->srbm_mutex); 142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); 143 } 144 145 if (pm_pg_lock) 146 mutex_lock(&adev->pm.mutex); 147 148 while (size) { 149 uint32_t value; 150 151 if (read) { 152 value = RREG32(*pos >> 2); 153 r = put_user(value, (uint32_t *)buf); 154 } else { 155 r = get_user(value, (uint32_t *)buf); 156 if (!r) 157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); 158 } 159 if (r) { 160 result = r; 161 goto end; 162 } 163 164 result += 4; 165 buf += 4; 166 *pos += 4; 167 size -= 4; 168 } 169 170end: 171 if (use_bank) { 172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 173 mutex_unlock(&adev->grbm_idx_mutex); 174 } else if (use_ring) { 175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); 176 mutex_unlock(&adev->srbm_mutex); 177 } 178 179 if (pm_pg_lock) 180 mutex_unlock(&adev->pm.mutex); 181 182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 184 185 amdgpu_virt_disable_access_debugfs(adev); 186 return result; 187} 188 189/* 190 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers 191 */ 192static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 193 size_t size, loff_t *pos) 194{ 195 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 196} 197 198/* 199 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers 200 */ 201static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 202 size_t size, loff_t *pos) 203{ 204 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 205} 206 207static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) 208{ 209 struct amdgpu_debugfs_regs2_data *rd; 210 211 rd = kzalloc(sizeof *rd, GFP_KERNEL); 212 if (!rd) 213 return -ENOMEM; 214 rd->adev = file_inode(file)->i_private; 215 file->private_data = rd; 216 mutex_init(&rd->lock); 217 218 return 0; 219} 220 221static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) 222{ 223 struct amdgpu_debugfs_regs2_data *rd = file->private_data; 224 mutex_destroy(&rd->lock); 225 kfree(file->private_data); 226 return 0; 227} 228 229static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) 230{ 231 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 232 struct amdgpu_device *adev = rd->adev; 233 ssize_t result = 0; 234 int r; 235 uint32_t value; 236 237 if (size & 0x3 || offset & 0x3) 238 return -EINVAL; 239 240 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 241 if (r < 0) { 242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 243 return r; 244 } 245 246 r = amdgpu_virt_enable_access_debugfs(adev); 247 if (r < 0) { 248 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 249 return r; 250 } 251 252 mutex_lock(&rd->lock); 253 254 if (rd->id.use_grbm) { 255 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || 256 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { 257 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 258 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 259 amdgpu_virt_disable_access_debugfs(adev); 260 mutex_unlock(&rd->lock); 261 return -EINVAL; 262 } 263 mutex_lock(&adev->grbm_idx_mutex); 264 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, 265 rd->id.grbm.sh, 266 rd->id.grbm.instance); 267 } 268 269 if (rd->id.use_srbm) { 270 mutex_lock(&adev->srbm_mutex); 271 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, 272 rd->id.srbm.queue, rd->id.srbm.vmid); 273 } 274 275 if (rd->id.pg_lock) 276 mutex_lock(&adev->pm.mutex); 277 278 while (size) { 279 if (!write_en) { 280 value = RREG32(offset >> 2); 281 r = put_user(value, (uint32_t *)buf); 282 } else { 283 r = get_user(value, (uint32_t *)buf); 284 if (!r) 285 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); 286 } 287 if (r) { 288 result = r; 289 goto end; 290 } 291 offset += 4; 292 size -= 4; 293 result += 4; 294 buf += 4; 295 } 296end: 297 if (rd->id.use_grbm) { 298 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 299 mutex_unlock(&adev->grbm_idx_mutex); 300 } 301 302 if (rd->id.use_srbm) { 303 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); 304 mutex_unlock(&adev->srbm_mutex); 305 } 306 307 if (rd->id.pg_lock) 308 mutex_unlock(&adev->pm.mutex); 309 310 mutex_unlock(&rd->lock); 311 312 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 313 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 314 315 amdgpu_virt_disable_access_debugfs(adev); 316 return result; 317} 318 319static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) 320{ 321 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 322 int r; 323 324 switch (cmd) { 325 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: 326 mutex_lock(&rd->lock); 327 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); 328 mutex_unlock(&rd->lock); 329 return r ? -EINVAL : 0; 330 default: 331 return -EINVAL; 332 } 333 return 0; 334} 335 336static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) 337{ 338 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); 339} 340 341static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) 342{ 343 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); 344} 345 346 347/** 348 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register 349 * 350 * @f: open file handle 351 * @buf: User buffer to store read data in 352 * @size: Number of bytes to read 353 * @pos: Offset to seek to 354 * 355 * The lower bits are the BYTE offset of the register to read. This 356 * allows reading multiple registers in a single call and having 357 * the returned size reflect that. 358 */ 359static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 360 size_t size, loff_t *pos) 361{ 362 struct amdgpu_device *adev = file_inode(f)->i_private; 363 ssize_t result = 0; 364 int r; 365 366 if (size & 0x3 || *pos & 0x3) 367 return -EINVAL; 368 369 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 370 if (r < 0) { 371 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 372 return r; 373 } 374 375 r = amdgpu_virt_enable_access_debugfs(adev); 376 if (r < 0) { 377 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 378 return r; 379 } 380 381 while (size) { 382 uint32_t value; 383 384 value = RREG32_PCIE(*pos); 385 r = put_user(value, (uint32_t *)buf); 386 if (r) { 387 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 388 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 389 amdgpu_virt_disable_access_debugfs(adev); 390 return r; 391 } 392 393 result += 4; 394 buf += 4; 395 *pos += 4; 396 size -= 4; 397 } 398 399 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 400 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 401 402 amdgpu_virt_disable_access_debugfs(adev); 403 return result; 404} 405 406/** 407 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register 408 * 409 * @f: open file handle 410 * @buf: User buffer to write data from 411 * @size: Number of bytes to write 412 * @pos: Offset to seek to 413 * 414 * The lower bits are the BYTE offset of the register to write. This 415 * allows writing multiple registers in a single call and having 416 * the returned size reflect that. 417 */ 418static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 419 size_t size, loff_t *pos) 420{ 421 struct amdgpu_device *adev = file_inode(f)->i_private; 422 ssize_t result = 0; 423 int r; 424 425 if (size & 0x3 || *pos & 0x3) 426 return -EINVAL; 427 428 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 429 if (r < 0) { 430 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 431 return r; 432 } 433 434 r = amdgpu_virt_enable_access_debugfs(adev); 435 if (r < 0) { 436 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 437 return r; 438 } 439 440 while (size) { 441 uint32_t value; 442 443 r = get_user(value, (uint32_t *)buf); 444 if (r) { 445 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 446 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 447 amdgpu_virt_disable_access_debugfs(adev); 448 return r; 449 } 450 451 WREG32_PCIE(*pos, value); 452 453 result += 4; 454 buf += 4; 455 *pos += 4; 456 size -= 4; 457 } 458 459 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 460 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 461 462 amdgpu_virt_disable_access_debugfs(adev); 463 return result; 464} 465 466/** 467 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register 468 * 469 * @f: open file handle 470 * @buf: User buffer to store read data in 471 * @size: Number of bytes to read 472 * @pos: Offset to seek to 473 * 474 * The lower bits are the BYTE offset of the register to read. This 475 * allows reading multiple registers in a single call and having 476 * the returned size reflect that. 477 */ 478static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 479 size_t size, loff_t *pos) 480{ 481 struct amdgpu_device *adev = file_inode(f)->i_private; 482 ssize_t result = 0; 483 int r; 484 485 if (size & 0x3 || *pos & 0x3) 486 return -EINVAL; 487 488 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 489 if (r < 0) { 490 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 491 return r; 492 } 493 494 r = amdgpu_virt_enable_access_debugfs(adev); 495 if (r < 0) { 496 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 497 return r; 498 } 499 500 while (size) { 501 uint32_t value; 502 503 value = RREG32_DIDT(*pos >> 2); 504 r = put_user(value, (uint32_t *)buf); 505 if (r) { 506 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 507 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 508 amdgpu_virt_disable_access_debugfs(adev); 509 return r; 510 } 511 512 result += 4; 513 buf += 4; 514 *pos += 4; 515 size -= 4; 516 } 517 518 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 519 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 520 521 amdgpu_virt_disable_access_debugfs(adev); 522 return result; 523} 524 525/** 526 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register 527 * 528 * @f: open file handle 529 * @buf: User buffer to write data from 530 * @size: Number of bytes to write 531 * @pos: Offset to seek to 532 * 533 * The lower bits are the BYTE offset of the register to write. This 534 * allows writing multiple registers in a single call and having 535 * the returned size reflect that. 536 */ 537static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 538 size_t size, loff_t *pos) 539{ 540 struct amdgpu_device *adev = file_inode(f)->i_private; 541 ssize_t result = 0; 542 int r; 543 544 if (size & 0x3 || *pos & 0x3) 545 return -EINVAL; 546 547 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 548 if (r < 0) { 549 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 550 return r; 551 } 552 553 r = amdgpu_virt_enable_access_debugfs(adev); 554 if (r < 0) { 555 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 556 return r; 557 } 558 559 while (size) { 560 uint32_t value; 561 562 r = get_user(value, (uint32_t *)buf); 563 if (r) { 564 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 565 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 566 amdgpu_virt_disable_access_debugfs(adev); 567 return r; 568 } 569 570 WREG32_DIDT(*pos >> 2, value); 571 572 result += 4; 573 buf += 4; 574 *pos += 4; 575 size -= 4; 576 } 577 578 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 579 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 580 581 amdgpu_virt_disable_access_debugfs(adev); 582 return result; 583} 584 585/** 586 * amdgpu_debugfs_regs_smc_read - Read from a SMC register 587 * 588 * @f: open file handle 589 * @buf: User buffer to store read data in 590 * @size: Number of bytes to read 591 * @pos: Offset to seek to 592 * 593 * The lower bits are the BYTE offset of the register to read. This 594 * allows reading multiple registers in a single call and having 595 * the returned size reflect that. 596 */ 597static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 598 size_t size, loff_t *pos) 599{ 600 struct amdgpu_device *adev = file_inode(f)->i_private; 601 ssize_t result = 0; 602 int r; 603 604 if (size & 0x3 || *pos & 0x3) 605 return -EINVAL; 606 607 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 608 if (r < 0) { 609 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 610 return r; 611 } 612 613 r = amdgpu_virt_enable_access_debugfs(adev); 614 if (r < 0) { 615 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 616 return r; 617 } 618 619 while (size) { 620 uint32_t value; 621 622 value = RREG32_SMC(*pos); 623 r = put_user(value, (uint32_t *)buf); 624 if (r) { 625 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 626 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 627 amdgpu_virt_disable_access_debugfs(adev); 628 return r; 629 } 630 631 result += 4; 632 buf += 4; 633 *pos += 4; 634 size -= 4; 635 } 636 637 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 638 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 639 640 amdgpu_virt_disable_access_debugfs(adev); 641 return result; 642} 643 644/** 645 * amdgpu_debugfs_regs_smc_write - Write to a SMC register 646 * 647 * @f: open file handle 648 * @buf: User buffer to write data from 649 * @size: Number of bytes to write 650 * @pos: Offset to seek to 651 * 652 * The lower bits are the BYTE offset of the register to write. This 653 * allows writing multiple registers in a single call and having 654 * the returned size reflect that. 655 */ 656static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 657 size_t size, loff_t *pos) 658{ 659 struct amdgpu_device *adev = file_inode(f)->i_private; 660 ssize_t result = 0; 661 int r; 662 663 if (size & 0x3 || *pos & 0x3) 664 return -EINVAL; 665 666 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 667 if (r < 0) { 668 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 669 return r; 670 } 671 672 r = amdgpu_virt_enable_access_debugfs(adev); 673 if (r < 0) { 674 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 675 return r; 676 } 677 678 while (size) { 679 uint32_t value; 680 681 r = get_user(value, (uint32_t *)buf); 682 if (r) { 683 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 684 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 685 amdgpu_virt_disable_access_debugfs(adev); 686 return r; 687 } 688 689 WREG32_SMC(*pos, value); 690 691 result += 4; 692 buf += 4; 693 *pos += 4; 694 size -= 4; 695 } 696 697 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 698 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 699 700 amdgpu_virt_disable_access_debugfs(adev); 701 return result; 702} 703 704/** 705 * amdgpu_debugfs_gca_config_read - Read from gfx config data 706 * 707 * @f: open file handle 708 * @buf: User buffer to store read data in 709 * @size: Number of bytes to read 710 * @pos: Offset to seek to 711 * 712 * This file is used to access configuration data in a somewhat 713 * stable fashion. The format is a series of DWORDs with the first 714 * indicating which revision it is. New content is appended to the 715 * end so that older software can still read the data. 716 */ 717 718static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 719 size_t size, loff_t *pos) 720{ 721 struct amdgpu_device *adev = file_inode(f)->i_private; 722 ssize_t result = 0; 723 int r; 724 uint32_t *config, no_regs = 0; 725 726 if (size & 0x3 || *pos & 0x3) 727 return -EINVAL; 728 729 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 730 if (!config) 731 return -ENOMEM; 732 733 /* version, increment each time something is added */ 734 config[no_regs++] = 5; 735 config[no_regs++] = adev->gfx.config.max_shader_engines; 736 config[no_regs++] = adev->gfx.config.max_tile_pipes; 737 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 738 config[no_regs++] = adev->gfx.config.max_sh_per_se; 739 config[no_regs++] = adev->gfx.config.max_backends_per_se; 740 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 741 config[no_regs++] = adev->gfx.config.max_gprs; 742 config[no_regs++] = adev->gfx.config.max_gs_threads; 743 config[no_regs++] = adev->gfx.config.max_hw_contexts; 744 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 745 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 746 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 747 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 748 config[no_regs++] = adev->gfx.config.num_tile_pipes; 749 config[no_regs++] = adev->gfx.config.backend_enable_mask; 750 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 751 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 752 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 753 config[no_regs++] = adev->gfx.config.num_gpus; 754 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 755 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 756 config[no_regs++] = adev->gfx.config.gb_addr_config; 757 config[no_regs++] = adev->gfx.config.num_rbs; 758 759 /* rev==1 */ 760 config[no_regs++] = adev->rev_id; 761 config[no_regs++] = lower_32_bits(adev->pg_flags); 762 config[no_regs++] = lower_32_bits(adev->cg_flags); 763 764 /* rev==2 */ 765 config[no_regs++] = adev->family; 766 config[no_regs++] = adev->external_rev_id; 767 768 /* rev==3 */ 769 config[no_regs++] = adev->pdev->device; 770 config[no_regs++] = adev->pdev->revision; 771 config[no_regs++] = adev->pdev->subsystem_device; 772 config[no_regs++] = adev->pdev->subsystem_vendor; 773 774 /* rev==4 APU flag */ 775 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0; 776 777 /* rev==5 PG/CG flag upper 32bit */ 778 config[no_regs++] = upper_32_bits(adev->pg_flags); 779 config[no_regs++] = upper_32_bits(adev->cg_flags); 780 781 while (size && (*pos < no_regs * 4)) { 782 uint32_t value; 783 784 value = config[*pos >> 2]; 785 r = put_user(value, (uint32_t *)buf); 786 if (r) { 787 kfree(config); 788 return r; 789 } 790 791 result += 4; 792 buf += 4; 793 *pos += 4; 794 size -= 4; 795 } 796 797 kfree(config); 798 return result; 799} 800 801/** 802 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors 803 * 804 * @f: open file handle 805 * @buf: User buffer to store read data in 806 * @size: Number of bytes to read 807 * @pos: Offset to seek to 808 * 809 * The offset is treated as the BYTE address of one of the sensors 810 * enumerated in amd/include/kgd_pp_interface.h under the 811 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK 812 * you would use the offset 3 * 4 = 12. 813 */ 814static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 815 size_t size, loff_t *pos) 816{ 817 struct amdgpu_device *adev = file_inode(f)->i_private; 818 int idx, x, outsize, r, valuesize; 819 uint32_t values[16]; 820 821 if (size & 3 || *pos & 0x3) 822 return -EINVAL; 823 824 if (!adev->pm.dpm_enabled) 825 return -EINVAL; 826 827 /* convert offset to sensor number */ 828 idx = *pos >> 2; 829 830 valuesize = sizeof(values); 831 832 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 833 if (r < 0) { 834 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 835 return r; 836 } 837 838 r = amdgpu_virt_enable_access_debugfs(adev); 839 if (r < 0) { 840 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 841 return r; 842 } 843 844 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 845 846 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 847 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 848 849 if (r) { 850 amdgpu_virt_disable_access_debugfs(adev); 851 return r; 852 } 853 854 if (size > valuesize) { 855 amdgpu_virt_disable_access_debugfs(adev); 856 return -EINVAL; 857 } 858 859 outsize = 0; 860 x = 0; 861 if (!r) { 862 while (size) { 863 r = put_user(values[x++], (int32_t *)buf); 864 buf += 4; 865 size -= 4; 866 outsize += 4; 867 } 868 } 869 870 amdgpu_virt_disable_access_debugfs(adev); 871 return !r ? outsize : r; 872} 873 874/** amdgpu_debugfs_wave_read - Read WAVE STATUS data 875 * 876 * @f: open file handle 877 * @buf: User buffer to store read data in 878 * @size: Number of bytes to read 879 * @pos: Offset to seek to 880 * 881 * The offset being sought changes which wave that the status data 882 * will be returned for. The bits are used as follows: 883 * 884 * Bits 0..6: Byte offset into data 885 * Bits 7..14: SE selector 886 * Bits 15..22: SH/SA selector 887 * Bits 23..30: CU/{WGP+SIMD} selector 888 * Bits 31..36: WAVE ID selector 889 * Bits 37..44: SIMD ID selector 890 * 891 * The returned data begins with one DWORD of version information 892 * Followed by WAVE STATUS registers relevant to the GFX IP version 893 * being used. See gfx_v8_0_read_wave_data() for an example output. 894 */ 895static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 896 size_t size, loff_t *pos) 897{ 898 struct amdgpu_device *adev = f->f_inode->i_private; 899 int r, x; 900 ssize_t result = 0; 901 uint32_t offset, se, sh, cu, wave, simd, data[32]; 902 903 if (size & 3 || *pos & 3) 904 return -EINVAL; 905 906 /* decode offset */ 907 offset = (*pos & GENMASK_ULL(6, 0)); 908 se = (*pos & GENMASK_ULL(14, 7)) >> 7; 909 sh = (*pos & GENMASK_ULL(22, 15)) >> 15; 910 cu = (*pos & GENMASK_ULL(30, 23)) >> 23; 911 wave = (*pos & GENMASK_ULL(36, 31)) >> 31; 912 simd = (*pos & GENMASK_ULL(44, 37)) >> 37; 913 914 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 915 if (r < 0) { 916 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 917 return r; 918 } 919 920 r = amdgpu_virt_enable_access_debugfs(adev); 921 if (r < 0) { 922 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 923 return r; 924 } 925 926 /* switch to the specific se/sh/cu */ 927 mutex_lock(&adev->grbm_idx_mutex); 928 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 929 930 x = 0; 931 if (adev->gfx.funcs->read_wave_data) 932 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 933 934 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 935 mutex_unlock(&adev->grbm_idx_mutex); 936 937 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 938 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 939 940 if (!x) { 941 amdgpu_virt_disable_access_debugfs(adev); 942 return -EINVAL; 943 } 944 945 while (size && (offset < x * 4)) { 946 uint32_t value; 947 948 value = data[offset >> 2]; 949 r = put_user(value, (uint32_t *)buf); 950 if (r) { 951 amdgpu_virt_disable_access_debugfs(adev); 952 return r; 953 } 954 955 result += 4; 956 buf += 4; 957 offset += 4; 958 size -= 4; 959 } 960 961 amdgpu_virt_disable_access_debugfs(adev); 962 return result; 963} 964 965/** amdgpu_debugfs_gpr_read - Read wave gprs 966 * 967 * @f: open file handle 968 * @buf: User buffer to store read data in 969 * @size: Number of bytes to read 970 * @pos: Offset to seek to 971 * 972 * The offset being sought changes which wave that the status data 973 * will be returned for. The bits are used as follows: 974 * 975 * Bits 0..11: Byte offset into data 976 * Bits 12..19: SE selector 977 * Bits 20..27: SH/SA selector 978 * Bits 28..35: CU/{WGP+SIMD} selector 979 * Bits 36..43: WAVE ID selector 980 * Bits 37..44: SIMD ID selector 981 * Bits 52..59: Thread selector 982 * Bits 60..61: Bank selector (VGPR=0,SGPR=1) 983 * 984 * The return data comes from the SGPR or VGPR register bank for 985 * the selected operational unit. 986 */ 987static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 988 size_t size, loff_t *pos) 989{ 990 struct amdgpu_device *adev = f->f_inode->i_private; 991 int r; 992 ssize_t result = 0; 993 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 994 995 if (size > 4096 || size & 3 || *pos & 3) 996 return -EINVAL; 997 998 /* decode offset */ 999 offset = (*pos & GENMASK_ULL(11, 0)) >> 2; 1000 se = (*pos & GENMASK_ULL(19, 12)) >> 12; 1001 sh = (*pos & GENMASK_ULL(27, 20)) >> 20; 1002 cu = (*pos & GENMASK_ULL(35, 28)) >> 28; 1003 wave = (*pos & GENMASK_ULL(43, 36)) >> 36; 1004 simd = (*pos & GENMASK_ULL(51, 44)) >> 44; 1005 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 1006 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 1007 1008 data = kcalloc(1024, sizeof(*data), GFP_KERNEL); 1009 if (!data) 1010 return -ENOMEM; 1011 1012 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1013 if (r < 0) 1014 goto err; 1015 1016 r = amdgpu_virt_enable_access_debugfs(adev); 1017 if (r < 0) 1018 goto err; 1019 1020 /* switch to the specific se/sh/cu */ 1021 mutex_lock(&adev->grbm_idx_mutex); 1022 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 1023 1024 if (bank == 0) { 1025 if (adev->gfx.funcs->read_wave_vgprs) 1026 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 1027 } else { 1028 if (adev->gfx.funcs->read_wave_sgprs) 1029 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 1030 } 1031 1032 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 1033 mutex_unlock(&adev->grbm_idx_mutex); 1034 1035 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1036 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1037 1038 while (size) { 1039 uint32_t value; 1040 1041 value = data[result >> 2]; 1042 r = put_user(value, (uint32_t *)buf); 1043 if (r) { 1044 amdgpu_virt_disable_access_debugfs(adev); 1045 goto err; 1046 } 1047 1048 result += 4; 1049 buf += 4; 1050 size -= 4; 1051 } 1052 1053 kfree(data); 1054 amdgpu_virt_disable_access_debugfs(adev); 1055 return result; 1056 1057err: 1058 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1059 kfree(data); 1060 return r; 1061} 1062 1063/** 1064 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF 1065 * 1066 * @f: open file handle 1067 * @buf: User buffer to write data from 1068 * @size: Number of bytes to write 1069 * @pos: Offset to seek to 1070 * 1071 * Write a 32-bit zero to disable or a 32-bit non-zero to enable 1072 */ 1073static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, 1074 size_t size, loff_t *pos) 1075{ 1076 struct amdgpu_device *adev = file_inode(f)->i_private; 1077 ssize_t result = 0; 1078 int r; 1079 1080 if (size & 0x3 || *pos & 0x3) 1081 return -EINVAL; 1082 1083 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1084 if (r < 0) { 1085 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1086 return r; 1087 } 1088 1089 while (size) { 1090 uint32_t value; 1091 1092 r = get_user(value, (uint32_t *)buf); 1093 if (r) { 1094 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1095 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1096 return r; 1097 } 1098 1099 amdgpu_gfx_off_ctrl(adev, value ? true : false); 1100 1101 result += 4; 1102 buf += 4; 1103 *pos += 4; 1104 size -= 4; 1105 } 1106 1107 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1108 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1109 1110 return result; 1111} 1112 1113 1114/** 1115 * amdgpu_debugfs_gfxoff_read - read gfxoff status 1116 * 1117 * @f: open file handle 1118 * @buf: User buffer to store read data in 1119 * @size: Number of bytes to read 1120 * @pos: Offset to seek to 1121 */ 1122static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, 1123 size_t size, loff_t *pos) 1124{ 1125 struct amdgpu_device *adev = file_inode(f)->i_private; 1126 ssize_t result = 0; 1127 int r; 1128 1129 if (size & 0x3 || *pos & 0x3) 1130 return -EINVAL; 1131 1132 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1133 if (r < 0) { 1134 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1135 return r; 1136 } 1137 1138 while (size) { 1139 uint32_t value; 1140 1141 r = amdgpu_get_gfx_off_status(adev, &value); 1142 if (r) { 1143 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1144 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1145 return r; 1146 } 1147 1148 r = put_user(value, (uint32_t *)buf); 1149 if (r) { 1150 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1151 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1152 return r; 1153 } 1154 1155 result += 4; 1156 buf += 4; 1157 *pos += 4; 1158 size -= 4; 1159 } 1160 1161 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1162 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1163 1164 return result; 1165} 1166 1167static const struct file_operations amdgpu_debugfs_regs2_fops = { 1168 .owner = THIS_MODULE, 1169 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, 1170 .read = amdgpu_debugfs_regs2_read, 1171 .write = amdgpu_debugfs_regs2_write, 1172 .open = amdgpu_debugfs_regs2_open, 1173 .release = amdgpu_debugfs_regs2_release, 1174 .llseek = default_llseek 1175}; 1176 1177static const struct file_operations amdgpu_debugfs_regs_fops = { 1178 .owner = THIS_MODULE, 1179 .read = amdgpu_debugfs_regs_read, 1180 .write = amdgpu_debugfs_regs_write, 1181 .llseek = default_llseek 1182}; 1183static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 1184 .owner = THIS_MODULE, 1185 .read = amdgpu_debugfs_regs_didt_read, 1186 .write = amdgpu_debugfs_regs_didt_write, 1187 .llseek = default_llseek 1188}; 1189static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 1190 .owner = THIS_MODULE, 1191 .read = amdgpu_debugfs_regs_pcie_read, 1192 .write = amdgpu_debugfs_regs_pcie_write, 1193 .llseek = default_llseek 1194}; 1195static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 1196 .owner = THIS_MODULE, 1197 .read = amdgpu_debugfs_regs_smc_read, 1198 .write = amdgpu_debugfs_regs_smc_write, 1199 .llseek = default_llseek 1200}; 1201 1202static const struct file_operations amdgpu_debugfs_gca_config_fops = { 1203 .owner = THIS_MODULE, 1204 .read = amdgpu_debugfs_gca_config_read, 1205 .llseek = default_llseek 1206}; 1207 1208static const struct file_operations amdgpu_debugfs_sensors_fops = { 1209 .owner = THIS_MODULE, 1210 .read = amdgpu_debugfs_sensor_read, 1211 .llseek = default_llseek 1212}; 1213 1214static const struct file_operations amdgpu_debugfs_wave_fops = { 1215 .owner = THIS_MODULE, 1216 .read = amdgpu_debugfs_wave_read, 1217 .llseek = default_llseek 1218}; 1219static const struct file_operations amdgpu_debugfs_gpr_fops = { 1220 .owner = THIS_MODULE, 1221 .read = amdgpu_debugfs_gpr_read, 1222 .llseek = default_llseek 1223}; 1224 1225static const struct file_operations amdgpu_debugfs_gfxoff_fops = { 1226 .owner = THIS_MODULE, 1227 .read = amdgpu_debugfs_gfxoff_read, 1228 .write = amdgpu_debugfs_gfxoff_write, 1229 .llseek = default_llseek 1230}; 1231 1232static const struct file_operations *debugfs_regs[] = { 1233 &amdgpu_debugfs_regs_fops, 1234 &amdgpu_debugfs_regs2_fops, 1235 &amdgpu_debugfs_regs_didt_fops, 1236 &amdgpu_debugfs_regs_pcie_fops, 1237 &amdgpu_debugfs_regs_smc_fops, 1238 &amdgpu_debugfs_gca_config_fops, 1239 &amdgpu_debugfs_sensors_fops, 1240 &amdgpu_debugfs_wave_fops, 1241 &amdgpu_debugfs_gpr_fops, 1242 &amdgpu_debugfs_gfxoff_fops, 1243}; 1244 1245static const char *debugfs_regs_names[] = { 1246 "amdgpu_regs", 1247 "amdgpu_regs2", 1248 "amdgpu_regs_didt", 1249 "amdgpu_regs_pcie", 1250 "amdgpu_regs_smc", 1251 "amdgpu_gca_config", 1252 "amdgpu_sensors", 1253 "amdgpu_wave", 1254 "amdgpu_gpr", 1255 "amdgpu_gfxoff", 1256}; 1257 1258/** 1259 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide 1260 * register access. 1261 * 1262 * @adev: The device to attach the debugfs entries to 1263 */ 1264int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1265{ 1266 struct drm_minor *minor = adev_to_drm(adev)->primary; 1267 struct dentry *ent, *root = minor->debugfs_root; 1268 unsigned int i; 1269 1270 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 1271 ent = debugfs_create_file(debugfs_regs_names[i], 1272 S_IFREG | S_IRUGO, root, 1273 adev, debugfs_regs[i]); 1274 if (!i && !IS_ERR_OR_NULL(ent)) 1275 i_size_write(ent->d_inode, adev->rmmio_size); 1276 } 1277 1278 return 0; 1279} 1280 1281static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) 1282{ 1283 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1284 struct drm_device *dev = adev_to_drm(adev); 1285 int r = 0, i; 1286 1287 r = pm_runtime_get_sync(dev->dev); 1288 if (r < 0) { 1289 pm_runtime_put_autosuspend(dev->dev); 1290 return r; 1291 } 1292 1293 /* Avoid accidently unparking the sched thread during GPU reset */ 1294 r = down_write_killable(&adev->reset_domain->sem); 1295 if (r) 1296 return r; 1297 1298 /* hold on the scheduler */ 1299 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1300 struct amdgpu_ring *ring = adev->rings[i]; 1301 1302 if (!ring || !ring->sched.thread) 1303 continue; 1304 kthread_park(ring->sched.thread); 1305 } 1306 1307 seq_printf(m, "run ib test:\n"); 1308 r = amdgpu_ib_ring_tests(adev); 1309 if (r) 1310 seq_printf(m, "ib ring tests failed (%d).\n", r); 1311 else 1312 seq_printf(m, "ib ring tests passed.\n"); 1313 1314 /* go on the scheduler */ 1315 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1316 struct amdgpu_ring *ring = adev->rings[i]; 1317 1318 if (!ring || !ring->sched.thread) 1319 continue; 1320 kthread_unpark(ring->sched.thread); 1321 } 1322 1323 up_write(&adev->reset_domain->sem); 1324 1325 pm_runtime_mark_last_busy(dev->dev); 1326 pm_runtime_put_autosuspend(dev->dev); 1327 1328 return 0; 1329} 1330 1331static int amdgpu_debugfs_evict_vram(void *data, u64 *val) 1332{ 1333 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1334 struct drm_device *dev = adev_to_drm(adev); 1335 int r; 1336 1337 r = pm_runtime_get_sync(dev->dev); 1338 if (r < 0) { 1339 pm_runtime_put_autosuspend(dev->dev); 1340 return r; 1341 } 1342 1343 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 1344 1345 pm_runtime_mark_last_busy(dev->dev); 1346 pm_runtime_put_autosuspend(dev->dev); 1347 1348 return 0; 1349} 1350 1351 1352static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) 1353{ 1354 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1355 struct drm_device *dev = adev_to_drm(adev); 1356 int r; 1357 1358 r = pm_runtime_get_sync(dev->dev); 1359 if (r < 0) { 1360 pm_runtime_put_autosuspend(dev->dev); 1361 return r; 1362 } 1363 1364 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); 1365 1366 pm_runtime_mark_last_busy(dev->dev); 1367 pm_runtime_put_autosuspend(dev->dev); 1368 1369 return 0; 1370} 1371 1372static int amdgpu_debugfs_benchmark(void *data, u64 val) 1373{ 1374 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1375 struct drm_device *dev = adev_to_drm(adev); 1376 int r; 1377 1378 r = pm_runtime_get_sync(dev->dev); 1379 if (r < 0) { 1380 pm_runtime_put_autosuspend(dev->dev); 1381 return r; 1382 } 1383 1384 r = amdgpu_benchmark(adev, val); 1385 1386 pm_runtime_mark_last_busy(dev->dev); 1387 pm_runtime_put_autosuspend(dev->dev); 1388 1389 return r; 1390} 1391 1392static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) 1393{ 1394 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1395 struct drm_device *dev = adev_to_drm(adev); 1396 struct drm_file *file; 1397 int r; 1398 1399 r = mutex_lock_interruptible(&dev->filelist_mutex); 1400 if (r) 1401 return r; 1402 1403 list_for_each_entry(file, &dev->filelist, lhead) { 1404 struct amdgpu_fpriv *fpriv = file->driver_priv; 1405 struct amdgpu_vm *vm = &fpriv->vm; 1406 1407 seq_printf(m, "pid:%d\tProcess:%s ----------\n", 1408 vm->task_info.pid, vm->task_info.process_name); 1409 r = amdgpu_bo_reserve(vm->root.bo, true); 1410 if (r) 1411 break; 1412 amdgpu_debugfs_vm_bo_info(vm, m); 1413 amdgpu_bo_unreserve(vm->root.bo); 1414 } 1415 1416 mutex_unlock(&dev->filelist_mutex); 1417 1418 return r; 1419} 1420 1421DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib); 1422DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info); 1423DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, 1424 NULL, "%lld\n"); 1425DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, 1426 NULL, "%lld\n"); 1427DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark, 1428 "%lld\n"); 1429 1430static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, 1431 struct dma_fence **fences) 1432{ 1433 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1434 uint32_t sync_seq, last_seq; 1435 1436 last_seq = atomic_read(&ring->fence_drv.last_seq); 1437 sync_seq = ring->fence_drv.sync_seq; 1438 1439 last_seq &= drv->num_fences_mask; 1440 sync_seq &= drv->num_fences_mask; 1441 1442 do { 1443 struct dma_fence *fence, **ptr; 1444 1445 ++last_seq; 1446 last_seq &= drv->num_fences_mask; 1447 ptr = &drv->fences[last_seq]; 1448 1449 fence = rcu_dereference_protected(*ptr, 1); 1450 RCU_INIT_POINTER(*ptr, NULL); 1451 1452 if (!fence) 1453 continue; 1454 1455 fences[last_seq] = fence; 1456 1457 } while (last_seq != sync_seq); 1458} 1459 1460static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, 1461 int length) 1462{ 1463 int i; 1464 struct dma_fence *fence; 1465 1466 for (i = 0; i < length; i++) { 1467 fence = fences[i]; 1468 if (!fence) 1469 continue; 1470 dma_fence_signal(fence); 1471 dma_fence_put(fence); 1472 } 1473} 1474 1475static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) 1476{ 1477 struct drm_sched_job *s_job; 1478 struct dma_fence *fence; 1479 1480 spin_lock(&sched->job_list_lock); 1481 list_for_each_entry(s_job, &sched->pending_list, list) { 1482 fence = sched->ops->run_job(s_job); 1483 dma_fence_put(fence); 1484 } 1485 spin_unlock(&sched->job_list_lock); 1486} 1487 1488static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1489{ 1490 struct amdgpu_job *job; 1491 struct drm_sched_job *s_job, *tmp; 1492 uint32_t preempt_seq; 1493 struct dma_fence *fence, **ptr; 1494 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1495 struct drm_gpu_scheduler *sched = &ring->sched; 1496 bool preempted = true; 1497 1498 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1499 return; 1500 1501 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1502 if (preempt_seq <= atomic_read(&drv->last_seq)) { 1503 preempted = false; 1504 goto no_preempt; 1505 } 1506 1507 preempt_seq &= drv->num_fences_mask; 1508 ptr = &drv->fences[preempt_seq]; 1509 fence = rcu_dereference_protected(*ptr, 1); 1510 1511no_preempt: 1512 spin_lock(&sched->job_list_lock); 1513 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 1514 if (dma_fence_is_signaled(&s_job->s_fence->finished)) { 1515 /* remove job from ring_mirror_list */ 1516 list_del_init(&s_job->list); 1517 sched->ops->free_job(s_job); 1518 continue; 1519 } 1520 job = to_amdgpu_job(s_job); 1521 if (preempted && (&job->hw_fence) == fence) 1522 /* mark the job as preempted */ 1523 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1524 } 1525 spin_unlock(&sched->job_list_lock); 1526} 1527 1528static int amdgpu_debugfs_ib_preempt(void *data, u64 val) 1529{ 1530 int r, resched, length; 1531 struct amdgpu_ring *ring; 1532 struct dma_fence **fences = NULL; 1533 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1534 1535 if (val >= AMDGPU_MAX_RINGS) 1536 return -EINVAL; 1537 1538 ring = adev->rings[val]; 1539 1540 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) 1541 return -EINVAL; 1542 1543 /* the last preemption failed */ 1544 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr)) 1545 return -EBUSY; 1546 1547 length = ring->fence_drv.num_fences_mask + 1; 1548 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); 1549 if (!fences) 1550 return -ENOMEM; 1551 1552 /* Avoid accidently unparking the sched thread during GPU reset */ 1553 r = down_read_killable(&adev->reset_domain->sem); 1554 if (r) 1555 goto pro_end; 1556 1557 /* stop the scheduler */ 1558 kthread_park(ring->sched.thread); 1559 1560 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 1561 1562 /* preempt the IB */ 1563 r = amdgpu_ring_preempt_ib(ring); 1564 if (r) { 1565 DRM_WARN("failed to preempt ring %d\n", ring->idx); 1566 goto failure; 1567 } 1568 1569 amdgpu_fence_process(ring); 1570 1571 if (atomic_read(&ring->fence_drv.last_seq) != 1572 ring->fence_drv.sync_seq) { 1573 DRM_INFO("ring %d was preempted\n", ring->idx); 1574 1575 amdgpu_ib_preempt_mark_partial_job(ring); 1576 1577 /* swap out the old fences */ 1578 amdgpu_ib_preempt_fences_swap(ring, fences); 1579 1580 amdgpu_fence_driver_force_completion(ring); 1581 1582 /* resubmit unfinished jobs */ 1583 amdgpu_ib_preempt_job_recovery(&ring->sched); 1584 1585 /* wait for jobs finished */ 1586 amdgpu_fence_wait_empty(ring); 1587 1588 /* signal the old fences */ 1589 amdgpu_ib_preempt_signal_fences(fences, length); 1590 } 1591 1592failure: 1593 /* restart the scheduler */ 1594 kthread_unpark(ring->sched.thread); 1595 1596 up_read(&adev->reset_domain->sem); 1597 1598 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 1599 1600pro_end: 1601 kfree(fences); 1602 1603 return r; 1604} 1605 1606static int amdgpu_debugfs_sclk_set(void *data, u64 val) 1607{ 1608 int ret = 0; 1609 uint32_t max_freq, min_freq; 1610 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1611 1612 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1613 return -EINVAL; 1614 1615 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1616 if (ret < 0) { 1617 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1618 return ret; 1619 } 1620 1621 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); 1622 if (ret == -EOPNOTSUPP) { 1623 ret = 0; 1624 goto out; 1625 } 1626 if (ret || val > max_freq || val < min_freq) { 1627 ret = -EINVAL; 1628 goto out; 1629 } 1630 1631 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); 1632 if (ret) 1633 ret = -EINVAL; 1634 1635out: 1636 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1637 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1638 1639 return ret; 1640} 1641 1642DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, 1643 amdgpu_debugfs_ib_preempt, "%llu\n"); 1644 1645DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, 1646 amdgpu_debugfs_sclk_set, "%llu\n"); 1647 1648static ssize_t amdgpu_reset_dump_register_list_read(struct file *f, 1649 char __user *buf, size_t size, loff_t *pos) 1650{ 1651 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 1652 char reg_offset[12]; 1653 int i, ret, len = 0; 1654 1655 if (*pos) 1656 return 0; 1657 1658 memset(reg_offset, 0, 12); 1659 ret = down_read_killable(&adev->reset_domain->sem); 1660 if (ret) 1661 return ret; 1662 1663 for (i = 0; i < adev->num_regs; i++) { 1664 sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]); 1665 up_read(&adev->reset_domain->sem); 1666 if (copy_to_user(buf + len, reg_offset, strlen(reg_offset))) 1667 return -EFAULT; 1668 1669 len += strlen(reg_offset); 1670 ret = down_read_killable(&adev->reset_domain->sem); 1671 if (ret) 1672 return ret; 1673 } 1674 1675 up_read(&adev->reset_domain->sem); 1676 *pos += len; 1677 1678 return len; 1679} 1680 1681static ssize_t amdgpu_reset_dump_register_list_write(struct file *f, 1682 const char __user *buf, size_t size, loff_t *pos) 1683{ 1684 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 1685 char reg_offset[11]; 1686 uint32_t *new, *tmp = NULL; 1687 int ret, i = 0, len = 0; 1688 1689 do { 1690 memset(reg_offset, 0, 11); 1691 if (copy_from_user(reg_offset, buf + len, 1692 min(10, ((int)size-len)))) { 1693 ret = -EFAULT; 1694 goto error_free; 1695 } 1696 1697 new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL); 1698 if (!new) { 1699 ret = -ENOMEM; 1700 goto error_free; 1701 } 1702 tmp = new; 1703 if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) { 1704 ret = -EINVAL; 1705 goto error_free; 1706 } 1707 1708 len += ret; 1709 i++; 1710 } while (len < size); 1711 1712 ret = down_write_killable(&adev->reset_domain->sem); 1713 if (ret) 1714 goto error_free; 1715 1716 swap(adev->reset_dump_reg_list, tmp); 1717 adev->num_regs = i; 1718 up_write(&adev->reset_domain->sem); 1719 ret = size; 1720 1721error_free: 1722 kfree(tmp); 1723 return ret; 1724} 1725 1726static const struct file_operations amdgpu_reset_dump_register_list = { 1727 .owner = THIS_MODULE, 1728 .read = amdgpu_reset_dump_register_list_read, 1729 .write = amdgpu_reset_dump_register_list_write, 1730 .llseek = default_llseek 1731}; 1732 1733int amdgpu_debugfs_init(struct amdgpu_device *adev) 1734{ 1735 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; 1736 struct dentry *ent; 1737 int r, i; 1738 1739 if (!debugfs_initialized()) 1740 return 0; 1741 1742 debugfs_create_x32("amdgpu_smu_debug", 0600, root, 1743 &adev->pm.smu_debug_mask); 1744 1745 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, 1746 &fops_ib_preempt); 1747 if (IS_ERR(ent)) { 1748 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 1749 return PTR_ERR(ent); 1750 } 1751 1752 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, 1753 &fops_sclk_set); 1754 if (IS_ERR(ent)) { 1755 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 1756 return PTR_ERR(ent); 1757 } 1758 1759 /* Register debugfs entries for amdgpu_ttm */ 1760 amdgpu_ttm_debugfs_init(adev); 1761 amdgpu_debugfs_pm_init(adev); 1762 amdgpu_debugfs_sa_init(adev); 1763 amdgpu_debugfs_fence_init(adev); 1764 amdgpu_debugfs_gem_init(adev); 1765 1766 r = amdgpu_debugfs_regs_init(adev); 1767 if (r) 1768 DRM_ERROR("registering register debugfs failed (%d).\n", r); 1769 1770 amdgpu_debugfs_firmware_init(adev); 1771 amdgpu_ta_if_debugfs_init(adev); 1772 1773#if defined(CONFIG_DRM_AMD_DC) 1774 if (amdgpu_device_has_dc_support(adev)) 1775 dtn_debugfs_init(adev); 1776#endif 1777 1778 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1779 struct amdgpu_ring *ring = adev->rings[i]; 1780 1781 if (!ring) 1782 continue; 1783 1784 amdgpu_debugfs_ring_init(adev, ring); 1785 } 1786 1787 for ( i = 0; i < adev->vcn.num_vcn_inst; i++) { 1788 if (!amdgpu_vcnfw_log) 1789 break; 1790 1791 if (adev->vcn.harvest_config & (1 << i)) 1792 continue; 1793 1794 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]); 1795 } 1796 1797 amdgpu_ras_debugfs_create_all(adev); 1798 amdgpu_rap_debugfs_init(adev); 1799 amdgpu_securedisplay_debugfs_init(adev); 1800 amdgpu_fw_attestation_debugfs_init(adev); 1801 1802 debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, 1803 &amdgpu_evict_vram_fops); 1804 debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, 1805 &amdgpu_evict_gtt_fops); 1806 debugfs_create_file("amdgpu_test_ib", 0444, root, adev, 1807 &amdgpu_debugfs_test_ib_fops); 1808 debugfs_create_file("amdgpu_vm_info", 0444, root, adev, 1809 &amdgpu_debugfs_vm_info_fops); 1810 debugfs_create_file("amdgpu_benchmark", 0200, root, adev, 1811 &amdgpu_benchmark_fops); 1812 debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev, 1813 &amdgpu_reset_dump_register_list); 1814 1815 adev->debugfs_vbios_blob.data = adev->bios; 1816 adev->debugfs_vbios_blob.size = adev->bios_size; 1817 debugfs_create_blob("amdgpu_vbios", 0444, root, 1818 &adev->debugfs_vbios_blob); 1819 1820 adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; 1821 adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; 1822 debugfs_create_blob("amdgpu_discovery", 0444, root, 1823 &adev->debugfs_discovery_blob); 1824 1825 return 0; 1826} 1827 1828#else 1829int amdgpu_debugfs_init(struct amdgpu_device *adev) 1830{ 1831 return 0; 1832} 1833int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1834{ 1835 return 0; 1836} 1837#endif