cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_trace.h (17395B)


      1/*
      2 * Copyright 2017 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
     25#define _AMDGPU_TRACE_H_
     26
     27#include <linux/stringify.h>
     28#include <linux/types.h>
     29#include <linux/tracepoint.h>
     30
     31#undef TRACE_SYSTEM
     32#define TRACE_SYSTEM amdgpu
     33#define TRACE_INCLUDE_FILE amdgpu_trace
     34
     35#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
     36	 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
     37
     38TRACE_EVENT(amdgpu_device_rreg,
     39	    TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
     40	    TP_ARGS(did, reg, value),
     41	    TP_STRUCT__entry(
     42				__field(unsigned, did)
     43				__field(uint32_t, reg)
     44				__field(uint32_t, value)
     45			    ),
     46	    TP_fast_assign(
     47			   __entry->did = did;
     48			   __entry->reg = reg;
     49			   __entry->value = value;
     50			   ),
     51	    TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
     52		      (unsigned long)__entry->did,
     53		      (unsigned long)__entry->reg,
     54		      (unsigned long)__entry->value)
     55);
     56
     57TRACE_EVENT(amdgpu_device_wreg,
     58	    TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
     59	    TP_ARGS(did, reg, value),
     60	    TP_STRUCT__entry(
     61				__field(unsigned, did)
     62				__field(uint32_t, reg)
     63				__field(uint32_t, value)
     64			    ),
     65	    TP_fast_assign(
     66			   __entry->did = did;
     67			   __entry->reg = reg;
     68			   __entry->value = value;
     69			   ),
     70	    TP_printk("0x%04lx, 0x%08lx, 0x%08lx",
     71		      (unsigned long)__entry->did,
     72		      (unsigned long)__entry->reg,
     73		      (unsigned long)__entry->value)
     74);
     75
     76TRACE_EVENT(amdgpu_iv,
     77	    TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv),
     78	    TP_ARGS(ih, iv),
     79	    TP_STRUCT__entry(
     80			     __field(unsigned, ih)
     81			     __field(unsigned, client_id)
     82			     __field(unsigned, src_id)
     83			     __field(unsigned, ring_id)
     84			     __field(unsigned, vmid)
     85			     __field(unsigned, vmid_src)
     86			     __field(uint64_t, timestamp)
     87			     __field(unsigned, timestamp_src)
     88			     __field(unsigned, pasid)
     89			     __array(unsigned, src_data, 4)
     90			    ),
     91	    TP_fast_assign(
     92			   __entry->ih = ih;
     93			   __entry->client_id = iv->client_id;
     94			   __entry->src_id = iv->src_id;
     95			   __entry->ring_id = iv->ring_id;
     96			   __entry->vmid = iv->vmid;
     97			   __entry->vmid_src = iv->vmid_src;
     98			   __entry->timestamp = iv->timestamp;
     99			   __entry->timestamp_src = iv->timestamp_src;
    100			   __entry->pasid = iv->pasid;
    101			   __entry->src_data[0] = iv->src_data[0];
    102			   __entry->src_data[1] = iv->src_data[1];
    103			   __entry->src_data[2] = iv->src_data[2];
    104			   __entry->src_data[3] = iv->src_data[3];
    105			   ),
    106	    TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u "
    107		      "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
    108		      __entry->ih, __entry->client_id, __entry->src_id,
    109		      __entry->ring_id, __entry->vmid,
    110		      __entry->timestamp, __entry->pasid,
    111		      __entry->src_data[0], __entry->src_data[1],
    112		      __entry->src_data[2], __entry->src_data[3])
    113);
    114
    115
    116TRACE_EVENT(amdgpu_bo_create,
    117	    TP_PROTO(struct amdgpu_bo *bo),
    118	    TP_ARGS(bo),
    119	    TP_STRUCT__entry(
    120			     __field(struct amdgpu_bo *, bo)
    121			     __field(u32, pages)
    122			     __field(u32, type)
    123			     __field(u32, prefer)
    124			     __field(u32, allow)
    125			     __field(u32, visible)
    126			     ),
    127
    128	    TP_fast_assign(
    129			   __entry->bo = bo;
    130			   __entry->pages = bo->tbo.resource->num_pages;
    131			   __entry->type = bo->tbo.resource->mem_type;
    132			   __entry->prefer = bo->preferred_domains;
    133			   __entry->allow = bo->allowed_domains;
    134			   __entry->visible = bo->flags;
    135			   ),
    136
    137	    TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
    138		       __entry->bo, __entry->pages, __entry->type,
    139		       __entry->prefer, __entry->allow, __entry->visible)
    140);
    141
    142TRACE_EVENT(amdgpu_cs,
    143	    TP_PROTO(struct amdgpu_cs_parser *p, int i),
    144	    TP_ARGS(p, i),
    145	    TP_STRUCT__entry(
    146			     __field(struct amdgpu_bo_list *, bo_list)
    147			     __field(u32, ring)
    148			     __field(u32, dw)
    149			     __field(u32, fences)
    150			     ),
    151
    152	    TP_fast_assign(
    153			   __entry->bo_list = p->bo_list;
    154			   __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
    155			   __entry->dw = p->job->ibs[i].length_dw;
    156			   __entry->fences = amdgpu_fence_count_emitted(
    157				to_amdgpu_ring(p->entity->rq->sched));
    158			   ),
    159	    TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
    160		      __entry->bo_list, __entry->ring, __entry->dw,
    161		      __entry->fences)
    162);
    163
    164TRACE_EVENT(amdgpu_cs_ioctl,
    165	    TP_PROTO(struct amdgpu_job *job),
    166	    TP_ARGS(job),
    167	    TP_STRUCT__entry(
    168			     __field(uint64_t, sched_job_id)
    169			     __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
    170			     __field(unsigned int, context)
    171			     __field(unsigned int, seqno)
    172			     __field(struct dma_fence *, fence)
    173			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
    174			     __field(u32, num_ibs)
    175			     ),
    176
    177	    TP_fast_assign(
    178			   __entry->sched_job_id = job->base.id;
    179			   __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
    180			   __entry->context = job->base.s_fence->finished.context;
    181			   __entry->seqno = job->base.s_fence->finished.seqno;
    182			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
    183			   __entry->num_ibs = job->num_ibs;
    184			   ),
    185	    TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
    186		      __entry->sched_job_id, __get_str(timeline), __entry->context,
    187		      __entry->seqno, __get_str(ring), __entry->num_ibs)
    188);
    189
    190TRACE_EVENT(amdgpu_sched_run_job,
    191	    TP_PROTO(struct amdgpu_job *job),
    192	    TP_ARGS(job),
    193	    TP_STRUCT__entry(
    194			     __field(uint64_t, sched_job_id)
    195			     __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
    196			     __field(unsigned int, context)
    197			     __field(unsigned int, seqno)
    198			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
    199			     __field(u32, num_ibs)
    200			     ),
    201
    202	    TP_fast_assign(
    203			   __entry->sched_job_id = job->base.id;
    204			   __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job));
    205			   __entry->context = job->base.s_fence->finished.context;
    206			   __entry->seqno = job->base.s_fence->finished.seqno;
    207			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
    208			   __entry->num_ibs = job->num_ibs;
    209			   ),
    210	    TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
    211		      __entry->sched_job_id, __get_str(timeline), __entry->context,
    212		      __entry->seqno, __get_str(ring), __entry->num_ibs)
    213);
    214
    215
    216TRACE_EVENT(amdgpu_vm_grab_id,
    217	    TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
    218		     struct amdgpu_job *job),
    219	    TP_ARGS(vm, ring, job),
    220	    TP_STRUCT__entry(
    221			     __field(u32, pasid)
    222			     __string(ring, ring->name)
    223			     __field(u32, ring)
    224			     __field(u32, vmid)
    225			     __field(u32, vm_hub)
    226			     __field(u64, pd_addr)
    227			     __field(u32, needs_flush)
    228			     ),
    229
    230	    TP_fast_assign(
    231			   __entry->pasid = vm->pasid;
    232			   __assign_str(ring, ring->name);
    233			   __entry->vmid = job->vmid;
    234			   __entry->vm_hub = ring->funcs->vmhub,
    235			   __entry->pd_addr = job->vm_pd_addr;
    236			   __entry->needs_flush = job->vm_needs_flush;
    237			   ),
    238	    TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
    239		      __entry->pasid, __get_str(ring), __entry->vmid,
    240		      __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
    241);
    242
    243TRACE_EVENT(amdgpu_vm_bo_map,
    244	    TP_PROTO(struct amdgpu_bo_va *bo_va,
    245		     struct amdgpu_bo_va_mapping *mapping),
    246	    TP_ARGS(bo_va, mapping),
    247	    TP_STRUCT__entry(
    248			     __field(struct amdgpu_bo *, bo)
    249			     __field(long, start)
    250			     __field(long, last)
    251			     __field(u64, offset)
    252			     __field(u64, flags)
    253			     ),
    254
    255	    TP_fast_assign(
    256			   __entry->bo = bo_va ? bo_va->base.bo : NULL;
    257			   __entry->start = mapping->start;
    258			   __entry->last = mapping->last;
    259			   __entry->offset = mapping->offset;
    260			   __entry->flags = mapping->flags;
    261			   ),
    262	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
    263		      __entry->bo, __entry->start, __entry->last,
    264		      __entry->offset, __entry->flags)
    265);
    266
    267TRACE_EVENT(amdgpu_vm_bo_unmap,
    268	    TP_PROTO(struct amdgpu_bo_va *bo_va,
    269		     struct amdgpu_bo_va_mapping *mapping),
    270	    TP_ARGS(bo_va, mapping),
    271	    TP_STRUCT__entry(
    272			     __field(struct amdgpu_bo *, bo)
    273			     __field(long, start)
    274			     __field(long, last)
    275			     __field(u64, offset)
    276			     __field(u64, flags)
    277			     ),
    278
    279	    TP_fast_assign(
    280			   __entry->bo = bo_va ? bo_va->base.bo : NULL;
    281			   __entry->start = mapping->start;
    282			   __entry->last = mapping->last;
    283			   __entry->offset = mapping->offset;
    284			   __entry->flags = mapping->flags;
    285			   ),
    286	    TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
    287		      __entry->bo, __entry->start, __entry->last,
    288		      __entry->offset, __entry->flags)
    289);
    290
    291DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
    292	    TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
    293	    TP_ARGS(mapping),
    294	    TP_STRUCT__entry(
    295			     __field(u64, soffset)
    296			     __field(u64, eoffset)
    297			     __field(u64, flags)
    298			     ),
    299
    300	    TP_fast_assign(
    301			   __entry->soffset = mapping->start;
    302			   __entry->eoffset = mapping->last + 1;
    303			   __entry->flags = mapping->flags;
    304			   ),
    305	    TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx",
    306		      __entry->soffset, __entry->eoffset, __entry->flags)
    307);
    308
    309DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
    310	    TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
    311	    TP_ARGS(mapping)
    312);
    313
    314DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
    315	    TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
    316	    TP_ARGS(mapping)
    317);
    318
    319DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
    320	    TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
    321	    TP_ARGS(mapping)
    322);
    323
    324TRACE_EVENT(amdgpu_vm_update_ptes,
    325	    TP_PROTO(struct amdgpu_vm_update_params *p,
    326		     uint64_t start, uint64_t end,
    327		     unsigned int nptes, uint64_t dst,
    328		     uint64_t incr, uint64_t flags,
    329		     pid_t pid, uint64_t vm_ctx),
    330	TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
    331	TP_STRUCT__entry(
    332			 __field(u64, start)
    333			 __field(u64, end)
    334			 __field(u64, flags)
    335			 __field(unsigned int, nptes)
    336			 __field(u64, incr)
    337			 __field(pid_t, pid)
    338			 __field(u64, vm_ctx)
    339			 __dynamic_array(u64, dst, nptes)
    340	),
    341
    342	TP_fast_assign(
    343			unsigned int i;
    344
    345			__entry->start = start;
    346			__entry->end = end;
    347			__entry->flags = flags;
    348			__entry->incr = incr;
    349			__entry->nptes = nptes;
    350			__entry->pid = pid;
    351			__entry->vm_ctx = vm_ctx;
    352			for (i = 0; i < nptes; ++i) {
    353				u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
    354					p->pages_addr, dst) : dst;
    355
    356				((u64 *)__get_dynamic_array(dst))[i] = addr;
    357				dst += incr;
    358			}
    359	),
    360	TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
    361		  " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
    362		  __entry->vm_ctx, __entry->start, __entry->end,
    363		  __entry->flags, __entry->incr,  __print_array(
    364		  __get_dynamic_array(dst), __entry->nptes, 8))
    365);
    366
    367TRACE_EVENT(amdgpu_vm_set_ptes,
    368	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
    369		     uint32_t incr, uint64_t flags, bool immediate),
    370	    TP_ARGS(pe, addr, count, incr, flags, immediate),
    371	    TP_STRUCT__entry(
    372			     __field(u64, pe)
    373			     __field(u64, addr)
    374			     __field(u32, count)
    375			     __field(u32, incr)
    376			     __field(u64, flags)
    377			     __field(bool, immediate)
    378			     ),
    379
    380	    TP_fast_assign(
    381			   __entry->pe = pe;
    382			   __entry->addr = addr;
    383			   __entry->count = count;
    384			   __entry->incr = incr;
    385			   __entry->flags = flags;
    386			   __entry->immediate = immediate;
    387			   ),
    388	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
    389		      "immediate=%d", __entry->pe, __entry->addr, __entry->incr,
    390		      __entry->flags, __entry->count, __entry->immediate)
    391);
    392
    393TRACE_EVENT(amdgpu_vm_copy_ptes,
    394	    TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool immediate),
    395	    TP_ARGS(pe, src, count, immediate),
    396	    TP_STRUCT__entry(
    397			     __field(u64, pe)
    398			     __field(u64, src)
    399			     __field(u32, count)
    400			     __field(bool, immediate)
    401			     ),
    402
    403	    TP_fast_assign(
    404			   __entry->pe = pe;
    405			   __entry->src = src;
    406			   __entry->count = count;
    407			   __entry->immediate = immediate;
    408			   ),
    409	    TP_printk("pe=%010Lx, src=%010Lx, count=%u, immediate=%d",
    410		      __entry->pe, __entry->src, __entry->count,
    411		      __entry->immediate)
    412);
    413
    414TRACE_EVENT(amdgpu_vm_flush,
    415	    TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
    416		     uint64_t pd_addr),
    417	    TP_ARGS(ring, vmid, pd_addr),
    418	    TP_STRUCT__entry(
    419			     __string(ring, ring->name)
    420			     __field(u32, vmid)
    421			     __field(u32, vm_hub)
    422			     __field(u64, pd_addr)
    423			     ),
    424
    425	    TP_fast_assign(
    426			   __assign_str(ring, ring->name);
    427			   __entry->vmid = vmid;
    428			   __entry->vm_hub = ring->funcs->vmhub;
    429			   __entry->pd_addr = pd_addr;
    430			   ),
    431	    TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
    432		      __get_str(ring), __entry->vmid,
    433		      __entry->vm_hub,__entry->pd_addr)
    434);
    435
    436DECLARE_EVENT_CLASS(amdgpu_pasid,
    437	    TP_PROTO(unsigned pasid),
    438	    TP_ARGS(pasid),
    439	    TP_STRUCT__entry(
    440			     __field(unsigned, pasid)
    441			     ),
    442	    TP_fast_assign(
    443			   __entry->pasid = pasid;
    444			   ),
    445	    TP_printk("pasid=%u", __entry->pasid)
    446);
    447
    448DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
    449	    TP_PROTO(unsigned pasid),
    450	    TP_ARGS(pasid)
    451);
    452
    453DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
    454	    TP_PROTO(unsigned pasid),
    455	    TP_ARGS(pasid)
    456);
    457
    458TRACE_EVENT(amdgpu_bo_list_set,
    459	    TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
    460	    TP_ARGS(list, bo),
    461	    TP_STRUCT__entry(
    462			     __field(struct amdgpu_bo_list *, list)
    463			     __field(struct amdgpu_bo *, bo)
    464			     __field(u64, bo_size)
    465			     ),
    466
    467	    TP_fast_assign(
    468			   __entry->list = list;
    469			   __entry->bo = bo;
    470			   __entry->bo_size = amdgpu_bo_size(bo);
    471			   ),
    472	    TP_printk("list=%p, bo=%p, bo_size=%Ld",
    473		      __entry->list,
    474		      __entry->bo,
    475		      __entry->bo_size)
    476);
    477
    478TRACE_EVENT(amdgpu_cs_bo_status,
    479	    TP_PROTO(uint64_t total_bo, uint64_t total_size),
    480	    TP_ARGS(total_bo, total_size),
    481	    TP_STRUCT__entry(
    482			__field(u64, total_bo)
    483			__field(u64, total_size)
    484			),
    485
    486	    TP_fast_assign(
    487			__entry->total_bo = total_bo;
    488			__entry->total_size = total_size;
    489			),
    490	    TP_printk("total_bo_size=%Ld, total_bo_count=%Ld",
    491			__entry->total_bo, __entry->total_size)
    492);
    493
    494TRACE_EVENT(amdgpu_bo_move,
    495	    TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
    496	    TP_ARGS(bo, new_placement, old_placement),
    497	    TP_STRUCT__entry(
    498			__field(struct amdgpu_bo *, bo)
    499			__field(u64, bo_size)
    500			__field(u32, new_placement)
    501			__field(u32, old_placement)
    502			),
    503
    504	    TP_fast_assign(
    505			__entry->bo      = bo;
    506			__entry->bo_size = amdgpu_bo_size(bo);
    507			__entry->new_placement = new_placement;
    508			__entry->old_placement = old_placement;
    509			),
    510	    TP_printk("bo=%p, from=%d, to=%d, size=%Ld",
    511			__entry->bo, __entry->old_placement,
    512			__entry->new_placement, __entry->bo_size)
    513);
    514
    515TRACE_EVENT(amdgpu_ib_pipe_sync,
    516	    TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
    517	    TP_ARGS(sched_job, fence),
    518	    TP_STRUCT__entry(
    519			     __string(ring, sched_job->base.sched->name)
    520			     __field(uint64_t, id)
    521			     __field(struct dma_fence *, fence)
    522			     __field(uint64_t, ctx)
    523			     __field(unsigned, seqno)
    524			     ),
    525
    526	    TP_fast_assign(
    527			   __assign_str(ring, sched_job->base.sched->name);
    528			   __entry->id = sched_job->base.id;
    529			   __entry->fence = fence;
    530			   __entry->ctx = fence->context;
    531			   __entry->seqno = fence->seqno;
    532			   ),
    533	    TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
    534		      __get_str(ring), __entry->id,
    535		      __entry->fence, __entry->ctx,
    536		      __entry->seqno)
    537);
    538
    539TRACE_EVENT(amdgpu_reset_reg_dumps,
    540	    TP_PROTO(uint32_t address, uint32_t value),
    541	    TP_ARGS(address, value),
    542	    TP_STRUCT__entry(
    543			     __field(uint32_t, address)
    544			     __field(uint32_t, value)
    545			     ),
    546	    TP_fast_assign(
    547			   __entry->address = address;
    548			   __entry->value = value;
    549			   ),
    550	    TP_printk("amdgpu register dump 0x%x: 0x%x",
    551		      __entry->address,
    552		      __entry->value)
    553);
    554
    555#undef AMDGPU_JOB_GET_TIMELINE_NAME
    556#endif
    557
    558/* This part must be outside protection */
    559#undef TRACE_INCLUDE_PATH
    560#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
    561#include <trace/define_trace.h>