trace.h (45283B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3#define _TRACE_KVM_H 4 5#include <linux/tracepoint.h> 6#include <asm/vmx.h> 7#include <asm/svm.h> 8#include <asm/clocksource.h> 9#include <asm/pvclock-abi.h> 10#include <asm/sev-common.h> 11 12#undef TRACE_SYSTEM 13#define TRACE_SYSTEM kvm 14 15/* 16 * Tracepoint for guest mode entry. 17 */ 18TRACE_EVENT(kvm_entry, 19 TP_PROTO(struct kvm_vcpu *vcpu), 20 TP_ARGS(vcpu), 21 22 TP_STRUCT__entry( 23 __field( unsigned int, vcpu_id ) 24 __field( unsigned long, rip ) 25 ), 26 27 TP_fast_assign( 28 __entry->vcpu_id = vcpu->vcpu_id; 29 __entry->rip = kvm_rip_read(vcpu); 30 ), 31 32 TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 33); 34 35/* 36 * Tracepoint for hypercall. 37 */ 38TRACE_EVENT(kvm_hypercall, 39 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 40 unsigned long a2, unsigned long a3), 41 TP_ARGS(nr, a0, a1, a2, a3), 42 43 TP_STRUCT__entry( 44 __field( unsigned long, nr ) 45 __field( unsigned long, a0 ) 46 __field( unsigned long, a1 ) 47 __field( unsigned long, a2 ) 48 __field( unsigned long, a3 ) 49 ), 50 51 TP_fast_assign( 52 __entry->nr = nr; 53 __entry->a0 = a0; 54 __entry->a1 = a1; 55 __entry->a2 = a2; 56 __entry->a3 = a3; 57 ), 58 59 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 60 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 61 __entry->a3) 62); 63 64/* 65 * Tracepoint for hypercall. 66 */ 67TRACE_EVENT(kvm_hv_hypercall, 68 TP_PROTO(__u16 code, bool fast, __u16 var_cnt, __u16 rep_cnt, 69 __u16 rep_idx, __u64 ingpa, __u64 outgpa), 70 TP_ARGS(code, fast, var_cnt, rep_cnt, rep_idx, ingpa, outgpa), 71 72 TP_STRUCT__entry( 73 __field( __u16, rep_cnt ) 74 __field( __u16, rep_idx ) 75 __field( __u64, ingpa ) 76 __field( __u64, outgpa ) 77 __field( __u16, code ) 78 __field( __u16, var_cnt ) 79 __field( bool, fast ) 80 ), 81 82 TP_fast_assign( 83 __entry->rep_cnt = rep_cnt; 84 __entry->rep_idx = rep_idx; 85 __entry->ingpa = ingpa; 86 __entry->outgpa = outgpa; 87 __entry->code = code; 88 __entry->var_cnt = var_cnt; 89 __entry->fast = fast; 90 ), 91 92 TP_printk("code 0x%x %s var_cnt 0x%x rep_cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 93 __entry->code, __entry->fast ? "fast" : "slow", 94 __entry->var_cnt, __entry->rep_cnt, __entry->rep_idx, 95 __entry->ingpa, __entry->outgpa) 96); 97 98TRACE_EVENT(kvm_hv_hypercall_done, 99 TP_PROTO(u64 result), 100 TP_ARGS(result), 101 102 TP_STRUCT__entry( 103 __field(__u64, result) 104 ), 105 106 TP_fast_assign( 107 __entry->result = result; 108 ), 109 110 TP_printk("result 0x%llx", __entry->result) 111); 112 113/* 114 * Tracepoint for Xen hypercall. 115 */ 116TRACE_EVENT(kvm_xen_hypercall, 117 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 118 unsigned long a2, unsigned long a3, unsigned long a4, 119 unsigned long a5), 120 TP_ARGS(nr, a0, a1, a2, a3, a4, a5), 121 122 TP_STRUCT__entry( 123 __field(unsigned long, nr) 124 __field(unsigned long, a0) 125 __field(unsigned long, a1) 126 __field(unsigned long, a2) 127 __field(unsigned long, a3) 128 __field(unsigned long, a4) 129 __field(unsigned long, a5) 130 ), 131 132 TP_fast_assign( 133 __entry->nr = nr; 134 __entry->a0 = a0; 135 __entry->a1 = a1; 136 __entry->a2 = a2; 137 __entry->a3 = a3; 138 __entry->a4 = a4; 139 __entry->a4 = a5; 140 ), 141 142 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", 143 __entry->nr, __entry->a0, __entry->a1, __entry->a2, 144 __entry->a3, __entry->a4, __entry->a5) 145); 146 147 148 149/* 150 * Tracepoint for PIO. 151 */ 152 153#define KVM_PIO_IN 0 154#define KVM_PIO_OUT 1 155 156TRACE_EVENT(kvm_pio, 157 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 158 unsigned int count, void *data), 159 TP_ARGS(rw, port, size, count, data), 160 161 TP_STRUCT__entry( 162 __field( unsigned int, rw ) 163 __field( unsigned int, port ) 164 __field( unsigned int, size ) 165 __field( unsigned int, count ) 166 __field( unsigned int, val ) 167 ), 168 169 TP_fast_assign( 170 __entry->rw = rw; 171 __entry->port = port; 172 __entry->size = size; 173 __entry->count = count; 174 if (size == 1) 175 __entry->val = *(unsigned char *)data; 176 else if (size == 2) 177 __entry->val = *(unsigned short *)data; 178 else 179 __entry->val = *(unsigned int *)data; 180 ), 181 182 TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 183 __entry->rw ? "write" : "read", 184 __entry->port, __entry->size, __entry->count, __entry->val, 185 __entry->count > 1 ? "(...)" : "") 186); 187 188/* 189 * Tracepoint for fast mmio. 190 */ 191TRACE_EVENT(kvm_fast_mmio, 192 TP_PROTO(u64 gpa), 193 TP_ARGS(gpa), 194 195 TP_STRUCT__entry( 196 __field(u64, gpa) 197 ), 198 199 TP_fast_assign( 200 __entry->gpa = gpa; 201 ), 202 203 TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 204); 205 206/* 207 * Tracepoint for cpuid. 208 */ 209TRACE_EVENT(kvm_cpuid, 210 TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 211 unsigned long rbx, unsigned long rcx, unsigned long rdx, 212 bool found, bool used_max_basic), 213 TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 214 215 TP_STRUCT__entry( 216 __field( unsigned int, function ) 217 __field( unsigned int, index ) 218 __field( unsigned long, rax ) 219 __field( unsigned long, rbx ) 220 __field( unsigned long, rcx ) 221 __field( unsigned long, rdx ) 222 __field( bool, found ) 223 __field( bool, used_max_basic ) 224 ), 225 226 TP_fast_assign( 227 __entry->function = function; 228 __entry->index = index; 229 __entry->rax = rax; 230 __entry->rbx = rbx; 231 __entry->rcx = rcx; 232 __entry->rdx = rdx; 233 __entry->found = found; 234 __entry->used_max_basic = used_max_basic; 235 ), 236 237 TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 238 __entry->function, __entry->index, __entry->rax, 239 __entry->rbx, __entry->rcx, __entry->rdx, 240 __entry->found ? "found" : "not found", 241 __entry->used_max_basic ? ", used max basic" : "") 242); 243 244#define AREG(x) { APIC_##x, "APIC_" #x } 245 246#define kvm_trace_symbol_apic \ 247 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 248 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 249 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 250 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 251 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 252 AREG(ECTRL) 253/* 254 * Tracepoint for apic access. 255 */ 256TRACE_EVENT(kvm_apic, 257 TP_PROTO(unsigned int rw, unsigned int reg, u64 val), 258 TP_ARGS(rw, reg, val), 259 260 TP_STRUCT__entry( 261 __field( unsigned int, rw ) 262 __field( unsigned int, reg ) 263 __field( u64, val ) 264 ), 265 266 TP_fast_assign( 267 __entry->rw = rw; 268 __entry->reg = reg; 269 __entry->val = val; 270 ), 271 272 TP_printk("apic_%s %s = 0x%llx", 273 __entry->rw ? "write" : "read", 274 __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 275 __entry->val) 276); 277 278#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 279#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 280 281#define KVM_ISA_VMX 1 282#define KVM_ISA_SVM 2 283 284#define kvm_print_exit_reason(exit_reason, isa) \ 285 (isa == KVM_ISA_VMX) ? \ 286 __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 287 __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 288 (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 289 (isa == KVM_ISA_VMX) ? \ 290 __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 291 292#define TRACE_EVENT_KVM_EXIT(name) \ 293TRACE_EVENT(name, \ 294 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa), \ 295 TP_ARGS(vcpu, isa), \ 296 \ 297 TP_STRUCT__entry( \ 298 __field( unsigned int, exit_reason ) \ 299 __field( unsigned long, guest_rip ) \ 300 __field( u32, isa ) \ 301 __field( u64, info1 ) \ 302 __field( u64, info2 ) \ 303 __field( u32, intr_info ) \ 304 __field( u32, error_code ) \ 305 __field( unsigned int, vcpu_id ) \ 306 ), \ 307 \ 308 TP_fast_assign( \ 309 __entry->guest_rip = kvm_rip_read(vcpu); \ 310 __entry->isa = isa; \ 311 __entry->vcpu_id = vcpu->vcpu_id; \ 312 static_call(kvm_x86_get_exit_info)(vcpu, \ 313 &__entry->exit_reason, \ 314 &__entry->info1, \ 315 &__entry->info2, \ 316 &__entry->intr_info, \ 317 &__entry->error_code); \ 318 ), \ 319 \ 320 TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 321 "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 322 __entry->vcpu_id, \ 323 kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 324 __entry->guest_rip, __entry->info1, __entry->info2, \ 325 __entry->intr_info, __entry->error_code) \ 326) 327 328/* 329 * Tracepoint for kvm guest exit: 330 */ 331TRACE_EVENT_KVM_EXIT(kvm_exit); 332 333/* 334 * Tracepoint for kvm interrupt injection: 335 */ 336TRACE_EVENT(kvm_inj_virq, 337 TP_PROTO(unsigned int irq), 338 TP_ARGS(irq), 339 340 TP_STRUCT__entry( 341 __field( unsigned int, irq ) 342 ), 343 344 TP_fast_assign( 345 __entry->irq = irq; 346 ), 347 348 TP_printk("irq %u", __entry->irq) 349); 350 351#define EXS(x) { x##_VECTOR, "#" #x } 352 353#define kvm_trace_sym_exc \ 354 EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 355 EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 356 EXS(MF), EXS(AC), EXS(MC) 357 358/* 359 * Tracepoint for kvm interrupt injection: 360 */ 361TRACE_EVENT(kvm_inj_exception, 362 TP_PROTO(unsigned exception, bool has_error, unsigned error_code), 363 TP_ARGS(exception, has_error, error_code), 364 365 TP_STRUCT__entry( 366 __field( u8, exception ) 367 __field( u8, has_error ) 368 __field( u32, error_code ) 369 ), 370 371 TP_fast_assign( 372 __entry->exception = exception; 373 __entry->has_error = has_error; 374 __entry->error_code = error_code; 375 ), 376 377 TP_printk("%s (0x%x)", 378 __print_symbolic(__entry->exception, kvm_trace_sym_exc), 379 /* FIXME: don't print error_code if not present */ 380 __entry->has_error ? __entry->error_code : 0) 381); 382 383/* 384 * Tracepoint for page fault. 385 */ 386TRACE_EVENT(kvm_page_fault, 387 TP_PROTO(unsigned long fault_address, u64 error_code), 388 TP_ARGS(fault_address, error_code), 389 390 TP_STRUCT__entry( 391 __field( unsigned long, fault_address ) 392 __field( u64, error_code ) 393 ), 394 395 TP_fast_assign( 396 __entry->fault_address = fault_address; 397 __entry->error_code = error_code; 398 ), 399 400 TP_printk("address %lx error_code %llx", 401 __entry->fault_address, __entry->error_code) 402); 403 404/* 405 * Tracepoint for guest MSR access. 406 */ 407TRACE_EVENT(kvm_msr, 408 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 409 TP_ARGS(write, ecx, data, exception), 410 411 TP_STRUCT__entry( 412 __field( unsigned, write ) 413 __field( u32, ecx ) 414 __field( u64, data ) 415 __field( u8, exception ) 416 ), 417 418 TP_fast_assign( 419 __entry->write = write; 420 __entry->ecx = ecx; 421 __entry->data = data; 422 __entry->exception = exception; 423 ), 424 425 TP_printk("msr_%s %x = 0x%llx%s", 426 __entry->write ? "write" : "read", 427 __entry->ecx, __entry->data, 428 __entry->exception ? " (#GP)" : "") 429); 430 431#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 432#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 433#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 434#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 435 436/* 437 * Tracepoint for guest CR access. 438 */ 439TRACE_EVENT(kvm_cr, 440 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 441 TP_ARGS(rw, cr, val), 442 443 TP_STRUCT__entry( 444 __field( unsigned int, rw ) 445 __field( unsigned int, cr ) 446 __field( unsigned long, val ) 447 ), 448 449 TP_fast_assign( 450 __entry->rw = rw; 451 __entry->cr = cr; 452 __entry->val = val; 453 ), 454 455 TP_printk("cr_%s %x = 0x%lx", 456 __entry->rw ? "write" : "read", 457 __entry->cr, __entry->val) 458); 459 460#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 461#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 462 463TRACE_EVENT(kvm_pic_set_irq, 464 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 465 TP_ARGS(chip, pin, elcr, imr, coalesced), 466 467 TP_STRUCT__entry( 468 __field( __u8, chip ) 469 __field( __u8, pin ) 470 __field( __u8, elcr ) 471 __field( __u8, imr ) 472 __field( bool, coalesced ) 473 ), 474 475 TP_fast_assign( 476 __entry->chip = chip; 477 __entry->pin = pin; 478 __entry->elcr = elcr; 479 __entry->imr = imr; 480 __entry->coalesced = coalesced; 481 ), 482 483 TP_printk("chip %u pin %u (%s%s)%s", 484 __entry->chip, __entry->pin, 485 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 486 (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 487 __entry->coalesced ? " (coalesced)" : "") 488); 489 490#define kvm_apic_dst_shorthand \ 491 {0x0, "dst"}, \ 492 {0x1, "self"}, \ 493 {0x2, "all"}, \ 494 {0x3, "all-but-self"} 495 496TRACE_EVENT(kvm_apic_ipi, 497 TP_PROTO(__u32 icr_low, __u32 dest_id), 498 TP_ARGS(icr_low, dest_id), 499 500 TP_STRUCT__entry( 501 __field( __u32, icr_low ) 502 __field( __u32, dest_id ) 503 ), 504 505 TP_fast_assign( 506 __entry->icr_low = icr_low; 507 __entry->dest_id = dest_id; 508 ), 509 510 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 511 __entry->dest_id, (u8)__entry->icr_low, 512 __print_symbolic((__entry->icr_low >> 8 & 0x7), 513 kvm_deliver_mode), 514 (__entry->icr_low & (1<<11)) ? "logical" : "physical", 515 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 516 (__entry->icr_low & (1<<15)) ? "level" : "edge", 517 __print_symbolic((__entry->icr_low >> 18 & 0x3), 518 kvm_apic_dst_shorthand)) 519); 520 521TRACE_EVENT(kvm_apic_accept_irq, 522 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 523 TP_ARGS(apicid, dm, tm, vec), 524 525 TP_STRUCT__entry( 526 __field( __u32, apicid ) 527 __field( __u16, dm ) 528 __field( __u16, tm ) 529 __field( __u8, vec ) 530 ), 531 532 TP_fast_assign( 533 __entry->apicid = apicid; 534 __entry->dm = dm; 535 __entry->tm = tm; 536 __entry->vec = vec; 537 ), 538 539 TP_printk("apicid %x vec %u (%s|%s)", 540 __entry->apicid, __entry->vec, 541 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 542 __entry->tm ? "level" : "edge") 543); 544 545TRACE_EVENT(kvm_eoi, 546 TP_PROTO(struct kvm_lapic *apic, int vector), 547 TP_ARGS(apic, vector), 548 549 TP_STRUCT__entry( 550 __field( __u32, apicid ) 551 __field( int, vector ) 552 ), 553 554 TP_fast_assign( 555 __entry->apicid = apic->vcpu->vcpu_id; 556 __entry->vector = vector; 557 ), 558 559 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 560); 561 562TRACE_EVENT(kvm_pv_eoi, 563 TP_PROTO(struct kvm_lapic *apic, int vector), 564 TP_ARGS(apic, vector), 565 566 TP_STRUCT__entry( 567 __field( __u32, apicid ) 568 __field( int, vector ) 569 ), 570 571 TP_fast_assign( 572 __entry->apicid = apic->vcpu->vcpu_id; 573 __entry->vector = vector; 574 ), 575 576 TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 577); 578 579/* 580 * Tracepoint for nested VMRUN 581 */ 582TRACE_EVENT(kvm_nested_vmrun, 583 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 584 __u32 event_inj, bool npt), 585 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 586 587 TP_STRUCT__entry( 588 __field( __u64, rip ) 589 __field( __u64, vmcb ) 590 __field( __u64, nested_rip ) 591 __field( __u32, int_ctl ) 592 __field( __u32, event_inj ) 593 __field( bool, npt ) 594 ), 595 596 TP_fast_assign( 597 __entry->rip = rip; 598 __entry->vmcb = vmcb; 599 __entry->nested_rip = nested_rip; 600 __entry->int_ctl = int_ctl; 601 __entry->event_inj = event_inj; 602 __entry->npt = npt; 603 ), 604 605 TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 606 "event_inj: 0x%08x npt: %s", 607 __entry->rip, __entry->vmcb, __entry->nested_rip, 608 __entry->int_ctl, __entry->event_inj, 609 __entry->npt ? "on" : "off") 610); 611 612TRACE_EVENT(kvm_nested_intercepts, 613 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 614 __u32 intercept1, __u32 intercept2, __u32 intercept3), 615 TP_ARGS(cr_read, cr_write, exceptions, intercept1, 616 intercept2, intercept3), 617 618 TP_STRUCT__entry( 619 __field( __u16, cr_read ) 620 __field( __u16, cr_write ) 621 __field( __u32, exceptions ) 622 __field( __u32, intercept1 ) 623 __field( __u32, intercept2 ) 624 __field( __u32, intercept3 ) 625 ), 626 627 TP_fast_assign( 628 __entry->cr_read = cr_read; 629 __entry->cr_write = cr_write; 630 __entry->exceptions = exceptions; 631 __entry->intercept1 = intercept1; 632 __entry->intercept2 = intercept2; 633 __entry->intercept3 = intercept3; 634 ), 635 636 TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 637 "intercepts: %08x %08x %08x", 638 __entry->cr_read, __entry->cr_write, __entry->exceptions, 639 __entry->intercept1, __entry->intercept2, __entry->intercept3) 640); 641/* 642 * Tracepoint for #VMEXIT while nested 643 */ 644TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 645 646/* 647 * Tracepoint for #VMEXIT reinjected to the guest 648 */ 649TRACE_EVENT(kvm_nested_vmexit_inject, 650 TP_PROTO(__u32 exit_code, 651 __u64 exit_info1, __u64 exit_info2, 652 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 653 TP_ARGS(exit_code, exit_info1, exit_info2, 654 exit_int_info, exit_int_info_err, isa), 655 656 TP_STRUCT__entry( 657 __field( __u32, exit_code ) 658 __field( __u64, exit_info1 ) 659 __field( __u64, exit_info2 ) 660 __field( __u32, exit_int_info ) 661 __field( __u32, exit_int_info_err ) 662 __field( __u32, isa ) 663 ), 664 665 TP_fast_assign( 666 __entry->exit_code = exit_code; 667 __entry->exit_info1 = exit_info1; 668 __entry->exit_info2 = exit_info2; 669 __entry->exit_int_info = exit_int_info; 670 __entry->exit_int_info_err = exit_int_info_err; 671 __entry->isa = isa; 672 ), 673 674 TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 675 "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 676 kvm_print_exit_reason(__entry->exit_code, __entry->isa), 677 __entry->exit_info1, __entry->exit_info2, 678 __entry->exit_int_info, __entry->exit_int_info_err) 679); 680 681/* 682 * Tracepoint for nested #vmexit because of interrupt pending 683 */ 684TRACE_EVENT(kvm_nested_intr_vmexit, 685 TP_PROTO(__u64 rip), 686 TP_ARGS(rip), 687 688 TP_STRUCT__entry( 689 __field( __u64, rip ) 690 ), 691 692 TP_fast_assign( 693 __entry->rip = rip 694 ), 695 696 TP_printk("rip: 0x%016llx", __entry->rip) 697); 698 699/* 700 * Tracepoint for nested #vmexit because of interrupt pending 701 */ 702TRACE_EVENT(kvm_invlpga, 703 TP_PROTO(__u64 rip, int asid, u64 address), 704 TP_ARGS(rip, asid, address), 705 706 TP_STRUCT__entry( 707 __field( __u64, rip ) 708 __field( int, asid ) 709 __field( __u64, address ) 710 ), 711 712 TP_fast_assign( 713 __entry->rip = rip; 714 __entry->asid = asid; 715 __entry->address = address; 716 ), 717 718 TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 719 __entry->rip, __entry->asid, __entry->address) 720); 721 722/* 723 * Tracepoint for nested #vmexit because of interrupt pending 724 */ 725TRACE_EVENT(kvm_skinit, 726 TP_PROTO(__u64 rip, __u32 slb), 727 TP_ARGS(rip, slb), 728 729 TP_STRUCT__entry( 730 __field( __u64, rip ) 731 __field( __u32, slb ) 732 ), 733 734 TP_fast_assign( 735 __entry->rip = rip; 736 __entry->slb = slb; 737 ), 738 739 TP_printk("rip: 0x%016llx slb: 0x%08x", 740 __entry->rip, __entry->slb) 741); 742 743#define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 744#define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 745#define KVM_EMUL_INSN_F_CS_D (1 << 2) 746#define KVM_EMUL_INSN_F_CS_L (1 << 3) 747 748#define kvm_trace_symbol_emul_flags \ 749 { 0, "real" }, \ 750 { KVM_EMUL_INSN_F_CR0_PE \ 751 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 752 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 753 { KVM_EMUL_INSN_F_CR0_PE \ 754 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 755 { KVM_EMUL_INSN_F_CR0_PE \ 756 | KVM_EMUL_INSN_F_CS_L, "prot64" } 757 758#define kei_decode_mode(mode) ({ \ 759 u8 flags = 0xff; \ 760 switch (mode) { \ 761 case X86EMUL_MODE_REAL: \ 762 flags = 0; \ 763 break; \ 764 case X86EMUL_MODE_VM86: \ 765 flags = KVM_EMUL_INSN_F_EFL_VM; \ 766 break; \ 767 case X86EMUL_MODE_PROT16: \ 768 flags = KVM_EMUL_INSN_F_CR0_PE; \ 769 break; \ 770 case X86EMUL_MODE_PROT32: \ 771 flags = KVM_EMUL_INSN_F_CR0_PE \ 772 | KVM_EMUL_INSN_F_CS_D; \ 773 break; \ 774 case X86EMUL_MODE_PROT64: \ 775 flags = KVM_EMUL_INSN_F_CR0_PE \ 776 | KVM_EMUL_INSN_F_CS_L; \ 777 break; \ 778 } \ 779 flags; \ 780 }) 781 782TRACE_EVENT(kvm_emulate_insn, 783 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 784 TP_ARGS(vcpu, failed), 785 786 TP_STRUCT__entry( 787 __field( __u64, rip ) 788 __field( __u32, csbase ) 789 __field( __u8, len ) 790 __array( __u8, insn, 15 ) 791 __field( __u8, flags ) 792 __field( __u8, failed ) 793 ), 794 795 TP_fast_assign( 796 __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 797 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 798 - vcpu->arch.emulate_ctxt->fetch.data; 799 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 800 memcpy(__entry->insn, 801 vcpu->arch.emulate_ctxt->fetch.data, 802 15); 803 __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 804 __entry->failed = failed; 805 ), 806 807 TP_printk("%x:%llx:%s (%s)%s", 808 __entry->csbase, __entry->rip, 809 __print_hex(__entry->insn, __entry->len), 810 __print_symbolic(__entry->flags, 811 kvm_trace_symbol_emul_flags), 812 __entry->failed ? " failed" : "" 813 ) 814 ); 815 816#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 817#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 818 819TRACE_EVENT( 820 vcpu_match_mmio, 821 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 822 TP_ARGS(gva, gpa, write, gpa_match), 823 824 TP_STRUCT__entry( 825 __field(gva_t, gva) 826 __field(gpa_t, gpa) 827 __field(bool, write) 828 __field(bool, gpa_match) 829 ), 830 831 TP_fast_assign( 832 __entry->gva = gva; 833 __entry->gpa = gpa; 834 __entry->write = write; 835 __entry->gpa_match = gpa_match 836 ), 837 838 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 839 __entry->write ? "Write" : "Read", 840 __entry->gpa_match ? "GPA" : "GVA") 841); 842 843TRACE_EVENT(kvm_write_tsc_offset, 844 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 845 __u64 next_tsc_offset), 846 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 847 848 TP_STRUCT__entry( 849 __field( unsigned int, vcpu_id ) 850 __field( __u64, previous_tsc_offset ) 851 __field( __u64, next_tsc_offset ) 852 ), 853 854 TP_fast_assign( 855 __entry->vcpu_id = vcpu_id; 856 __entry->previous_tsc_offset = previous_tsc_offset; 857 __entry->next_tsc_offset = next_tsc_offset; 858 ), 859 860 TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 861 __entry->previous_tsc_offset, __entry->next_tsc_offset) 862); 863 864#ifdef CONFIG_X86_64 865 866#define host_clocks \ 867 {VDSO_CLOCKMODE_NONE, "none"}, \ 868 {VDSO_CLOCKMODE_TSC, "tsc"} \ 869 870TRACE_EVENT(kvm_update_master_clock, 871 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 872 TP_ARGS(use_master_clock, host_clock, offset_matched), 873 874 TP_STRUCT__entry( 875 __field( bool, use_master_clock ) 876 __field( unsigned int, host_clock ) 877 __field( bool, offset_matched ) 878 ), 879 880 TP_fast_assign( 881 __entry->use_master_clock = use_master_clock; 882 __entry->host_clock = host_clock; 883 __entry->offset_matched = offset_matched; 884 ), 885 886 TP_printk("masterclock %d hostclock %s offsetmatched %u", 887 __entry->use_master_clock, 888 __print_symbolic(__entry->host_clock, host_clocks), 889 __entry->offset_matched) 890); 891 892TRACE_EVENT(kvm_track_tsc, 893 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 894 unsigned int online_vcpus, bool use_master_clock, 895 unsigned int host_clock), 896 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 897 host_clock), 898 899 TP_STRUCT__entry( 900 __field( unsigned int, vcpu_id ) 901 __field( unsigned int, nr_vcpus_matched_tsc ) 902 __field( unsigned int, online_vcpus ) 903 __field( bool, use_master_clock ) 904 __field( unsigned int, host_clock ) 905 ), 906 907 TP_fast_assign( 908 __entry->vcpu_id = vcpu_id; 909 __entry->nr_vcpus_matched_tsc = nr_matched; 910 __entry->online_vcpus = online_vcpus; 911 __entry->use_master_clock = use_master_clock; 912 __entry->host_clock = host_clock; 913 ), 914 915 TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 916 " hostclock %s", 917 __entry->vcpu_id, __entry->use_master_clock, 918 __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 919 __print_symbolic(__entry->host_clock, host_clocks)) 920); 921 922#endif /* CONFIG_X86_64 */ 923 924/* 925 * Tracepoint for PML full VMEXIT. 926 */ 927TRACE_EVENT(kvm_pml_full, 928 TP_PROTO(unsigned int vcpu_id), 929 TP_ARGS(vcpu_id), 930 931 TP_STRUCT__entry( 932 __field( unsigned int, vcpu_id ) 933 ), 934 935 TP_fast_assign( 936 __entry->vcpu_id = vcpu_id; 937 ), 938 939 TP_printk("vcpu %d: PML full", __entry->vcpu_id) 940); 941 942TRACE_EVENT(kvm_ple_window_update, 943 TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 944 TP_ARGS(vcpu_id, new, old), 945 946 TP_STRUCT__entry( 947 __field( unsigned int, vcpu_id ) 948 __field( unsigned int, new ) 949 __field( unsigned int, old ) 950 ), 951 952 TP_fast_assign( 953 __entry->vcpu_id = vcpu_id; 954 __entry->new = new; 955 __entry->old = old; 956 ), 957 958 TP_printk("vcpu %u old %u new %u (%s)", 959 __entry->vcpu_id, __entry->old, __entry->new, 960 __entry->old < __entry->new ? "growed" : "shrinked") 961); 962 963TRACE_EVENT(kvm_pvclock_update, 964 TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 965 TP_ARGS(vcpu_id, pvclock), 966 967 TP_STRUCT__entry( 968 __field( unsigned int, vcpu_id ) 969 __field( __u32, version ) 970 __field( __u64, tsc_timestamp ) 971 __field( __u64, system_time ) 972 __field( __u32, tsc_to_system_mul ) 973 __field( __s8, tsc_shift ) 974 __field( __u8, flags ) 975 ), 976 977 TP_fast_assign( 978 __entry->vcpu_id = vcpu_id; 979 __entry->version = pvclock->version; 980 __entry->tsc_timestamp = pvclock->tsc_timestamp; 981 __entry->system_time = pvclock->system_time; 982 __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 983 __entry->tsc_shift = pvclock->tsc_shift; 984 __entry->flags = pvclock->flags; 985 ), 986 987 TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 988 "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 989 "flags 0x%x }", 990 __entry->vcpu_id, 991 __entry->version, 992 __entry->tsc_timestamp, 993 __entry->system_time, 994 __entry->tsc_to_system_mul, 995 __entry->tsc_shift, 996 __entry->flags) 997); 998 999TRACE_EVENT(kvm_wait_lapic_expire, 1000 TP_PROTO(unsigned int vcpu_id, s64 delta), 1001 TP_ARGS(vcpu_id, delta), 1002 1003 TP_STRUCT__entry( 1004 __field( unsigned int, vcpu_id ) 1005 __field( s64, delta ) 1006 ), 1007 1008 TP_fast_assign( 1009 __entry->vcpu_id = vcpu_id; 1010 __entry->delta = delta; 1011 ), 1012 1013 TP_printk("vcpu %u: delta %lld (%s)", 1014 __entry->vcpu_id, 1015 __entry->delta, 1016 __entry->delta < 0 ? "early" : "late") 1017); 1018 1019TRACE_EVENT(kvm_smm_transition, 1020 TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 1021 TP_ARGS(vcpu_id, smbase, entering), 1022 1023 TP_STRUCT__entry( 1024 __field( unsigned int, vcpu_id ) 1025 __field( u64, smbase ) 1026 __field( bool, entering ) 1027 ), 1028 1029 TP_fast_assign( 1030 __entry->vcpu_id = vcpu_id; 1031 __entry->smbase = smbase; 1032 __entry->entering = entering; 1033 ), 1034 1035 TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 1036 __entry->vcpu_id, 1037 __entry->entering ? "entering" : "leaving", 1038 __entry->smbase) 1039); 1040 1041/* 1042 * Tracepoint for VT-d posted-interrupts. 1043 */ 1044TRACE_EVENT(kvm_pi_irte_update, 1045 TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 1046 unsigned int gsi, unsigned int gvec, 1047 u64 pi_desc_addr, bool set), 1048 TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 1049 1050 TP_STRUCT__entry( 1051 __field( unsigned int, host_irq ) 1052 __field( unsigned int, vcpu_id ) 1053 __field( unsigned int, gsi ) 1054 __field( unsigned int, gvec ) 1055 __field( u64, pi_desc_addr ) 1056 __field( bool, set ) 1057 ), 1058 1059 TP_fast_assign( 1060 __entry->host_irq = host_irq; 1061 __entry->vcpu_id = vcpu_id; 1062 __entry->gsi = gsi; 1063 __entry->gvec = gvec; 1064 __entry->pi_desc_addr = pi_desc_addr; 1065 __entry->set = set; 1066 ), 1067 1068 TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1069 "gvec: 0x%x, pi_desc_addr: 0x%llx", 1070 __entry->set ? "enabled and being updated" : "disabled", 1071 __entry->host_irq, 1072 __entry->vcpu_id, 1073 __entry->gsi, 1074 __entry->gvec, 1075 __entry->pi_desc_addr) 1076); 1077 1078/* 1079 * Tracepoint for kvm_hv_notify_acked_sint. 1080 */ 1081TRACE_EVENT(kvm_hv_notify_acked_sint, 1082 TP_PROTO(int vcpu_id, u32 sint), 1083 TP_ARGS(vcpu_id, sint), 1084 1085 TP_STRUCT__entry( 1086 __field(int, vcpu_id) 1087 __field(u32, sint) 1088 ), 1089 1090 TP_fast_assign( 1091 __entry->vcpu_id = vcpu_id; 1092 __entry->sint = sint; 1093 ), 1094 1095 TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1096); 1097 1098/* 1099 * Tracepoint for synic_set_irq. 1100 */ 1101TRACE_EVENT(kvm_hv_synic_set_irq, 1102 TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1103 TP_ARGS(vcpu_id, sint, vector, ret), 1104 1105 TP_STRUCT__entry( 1106 __field(int, vcpu_id) 1107 __field(u32, sint) 1108 __field(int, vector) 1109 __field(int, ret) 1110 ), 1111 1112 TP_fast_assign( 1113 __entry->vcpu_id = vcpu_id; 1114 __entry->sint = sint; 1115 __entry->vector = vector; 1116 __entry->ret = ret; 1117 ), 1118 1119 TP_printk("vcpu_id %d sint %u vector %d ret %d", 1120 __entry->vcpu_id, __entry->sint, __entry->vector, 1121 __entry->ret) 1122); 1123 1124/* 1125 * Tracepoint for kvm_hv_synic_send_eoi. 1126 */ 1127TRACE_EVENT(kvm_hv_synic_send_eoi, 1128 TP_PROTO(int vcpu_id, int vector), 1129 TP_ARGS(vcpu_id, vector), 1130 1131 TP_STRUCT__entry( 1132 __field(int, vcpu_id) 1133 __field(u32, sint) 1134 __field(int, vector) 1135 __field(int, ret) 1136 ), 1137 1138 TP_fast_assign( 1139 __entry->vcpu_id = vcpu_id; 1140 __entry->vector = vector; 1141 ), 1142 1143 TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1144); 1145 1146/* 1147 * Tracepoint for synic_set_msr. 1148 */ 1149TRACE_EVENT(kvm_hv_synic_set_msr, 1150 TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1151 TP_ARGS(vcpu_id, msr, data, host), 1152 1153 TP_STRUCT__entry( 1154 __field(int, vcpu_id) 1155 __field(u32, msr) 1156 __field(u64, data) 1157 __field(bool, host) 1158 ), 1159 1160 TP_fast_assign( 1161 __entry->vcpu_id = vcpu_id; 1162 __entry->msr = msr; 1163 __entry->data = data; 1164 __entry->host = host 1165 ), 1166 1167 TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1168 __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1169); 1170 1171/* 1172 * Tracepoint for stimer_set_config. 1173 */ 1174TRACE_EVENT(kvm_hv_stimer_set_config, 1175 TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1176 TP_ARGS(vcpu_id, timer_index, config, host), 1177 1178 TP_STRUCT__entry( 1179 __field(int, vcpu_id) 1180 __field(int, timer_index) 1181 __field(u64, config) 1182 __field(bool, host) 1183 ), 1184 1185 TP_fast_assign( 1186 __entry->vcpu_id = vcpu_id; 1187 __entry->timer_index = timer_index; 1188 __entry->config = config; 1189 __entry->host = host; 1190 ), 1191 1192 TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1193 __entry->vcpu_id, __entry->timer_index, __entry->config, 1194 __entry->host) 1195); 1196 1197/* 1198 * Tracepoint for stimer_set_count. 1199 */ 1200TRACE_EVENT(kvm_hv_stimer_set_count, 1201 TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1202 TP_ARGS(vcpu_id, timer_index, count, host), 1203 1204 TP_STRUCT__entry( 1205 __field(int, vcpu_id) 1206 __field(int, timer_index) 1207 __field(u64, count) 1208 __field(bool, host) 1209 ), 1210 1211 TP_fast_assign( 1212 __entry->vcpu_id = vcpu_id; 1213 __entry->timer_index = timer_index; 1214 __entry->count = count; 1215 __entry->host = host; 1216 ), 1217 1218 TP_printk("vcpu_id %d timer %d count %llu host %d", 1219 __entry->vcpu_id, __entry->timer_index, __entry->count, 1220 __entry->host) 1221); 1222 1223/* 1224 * Tracepoint for stimer_start(periodic timer case). 1225 */ 1226TRACE_EVENT(kvm_hv_stimer_start_periodic, 1227 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1228 TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1229 1230 TP_STRUCT__entry( 1231 __field(int, vcpu_id) 1232 __field(int, timer_index) 1233 __field(u64, time_now) 1234 __field(u64, exp_time) 1235 ), 1236 1237 TP_fast_assign( 1238 __entry->vcpu_id = vcpu_id; 1239 __entry->timer_index = timer_index; 1240 __entry->time_now = time_now; 1241 __entry->exp_time = exp_time; 1242 ), 1243 1244 TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1245 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1246 __entry->exp_time) 1247); 1248 1249/* 1250 * Tracepoint for stimer_start(one-shot timer case). 1251 */ 1252TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1253 TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1254 TP_ARGS(vcpu_id, timer_index, time_now, count), 1255 1256 TP_STRUCT__entry( 1257 __field(int, vcpu_id) 1258 __field(int, timer_index) 1259 __field(u64, time_now) 1260 __field(u64, count) 1261 ), 1262 1263 TP_fast_assign( 1264 __entry->vcpu_id = vcpu_id; 1265 __entry->timer_index = timer_index; 1266 __entry->time_now = time_now; 1267 __entry->count = count; 1268 ), 1269 1270 TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1271 __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1272 __entry->count) 1273); 1274 1275/* 1276 * Tracepoint for stimer_timer_callback. 1277 */ 1278TRACE_EVENT(kvm_hv_stimer_callback, 1279 TP_PROTO(int vcpu_id, int timer_index), 1280 TP_ARGS(vcpu_id, timer_index), 1281 1282 TP_STRUCT__entry( 1283 __field(int, vcpu_id) 1284 __field(int, timer_index) 1285 ), 1286 1287 TP_fast_assign( 1288 __entry->vcpu_id = vcpu_id; 1289 __entry->timer_index = timer_index; 1290 ), 1291 1292 TP_printk("vcpu_id %d timer %d", 1293 __entry->vcpu_id, __entry->timer_index) 1294); 1295 1296/* 1297 * Tracepoint for stimer_expiration. 1298 */ 1299TRACE_EVENT(kvm_hv_stimer_expiration, 1300 TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1301 TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1302 1303 TP_STRUCT__entry( 1304 __field(int, vcpu_id) 1305 __field(int, timer_index) 1306 __field(int, direct) 1307 __field(int, msg_send_result) 1308 ), 1309 1310 TP_fast_assign( 1311 __entry->vcpu_id = vcpu_id; 1312 __entry->timer_index = timer_index; 1313 __entry->direct = direct; 1314 __entry->msg_send_result = msg_send_result; 1315 ), 1316 1317 TP_printk("vcpu_id %d timer %d direct %d send result %d", 1318 __entry->vcpu_id, __entry->timer_index, 1319 __entry->direct, __entry->msg_send_result) 1320); 1321 1322/* 1323 * Tracepoint for stimer_cleanup. 1324 */ 1325TRACE_EVENT(kvm_hv_stimer_cleanup, 1326 TP_PROTO(int vcpu_id, int timer_index), 1327 TP_ARGS(vcpu_id, timer_index), 1328 1329 TP_STRUCT__entry( 1330 __field(int, vcpu_id) 1331 __field(int, timer_index) 1332 ), 1333 1334 TP_fast_assign( 1335 __entry->vcpu_id = vcpu_id; 1336 __entry->timer_index = timer_index; 1337 ), 1338 1339 TP_printk("vcpu_id %d timer %d", 1340 __entry->vcpu_id, __entry->timer_index) 1341); 1342 1343TRACE_EVENT(kvm_apicv_inhibit_changed, 1344 TP_PROTO(int reason, bool set, unsigned long inhibits), 1345 TP_ARGS(reason, set, inhibits), 1346 1347 TP_STRUCT__entry( 1348 __field(int, reason) 1349 __field(bool, set) 1350 __field(unsigned long, inhibits) 1351 ), 1352 1353 TP_fast_assign( 1354 __entry->reason = reason; 1355 __entry->set = set; 1356 __entry->inhibits = inhibits; 1357 ), 1358 1359 TP_printk("%s reason=%u, inhibits=0x%lx", 1360 __entry->set ? "set" : "cleared", 1361 __entry->reason, __entry->inhibits) 1362); 1363 1364TRACE_EVENT(kvm_apicv_accept_irq, 1365 TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 1366 TP_ARGS(apicid, dm, tm, vec), 1367 1368 TP_STRUCT__entry( 1369 __field( __u32, apicid ) 1370 __field( __u16, dm ) 1371 __field( __u16, tm ) 1372 __field( __u8, vec ) 1373 ), 1374 1375 TP_fast_assign( 1376 __entry->apicid = apicid; 1377 __entry->dm = dm; 1378 __entry->tm = tm; 1379 __entry->vec = vec; 1380 ), 1381 1382 TP_printk("apicid %x vec %u (%s|%s)", 1383 __entry->apicid, __entry->vec, 1384 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 1385 __entry->tm ? "level" : "edge") 1386); 1387 1388/* 1389 * Tracepoint for AMD AVIC 1390 */ 1391TRACE_EVENT(kvm_avic_incomplete_ipi, 1392 TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1393 TP_ARGS(vcpu, icrh, icrl, id, index), 1394 1395 TP_STRUCT__entry( 1396 __field(u32, vcpu) 1397 __field(u32, icrh) 1398 __field(u32, icrl) 1399 __field(u32, id) 1400 __field(u32, index) 1401 ), 1402 1403 TP_fast_assign( 1404 __entry->vcpu = vcpu; 1405 __entry->icrh = icrh; 1406 __entry->icrl = icrl; 1407 __entry->id = id; 1408 __entry->index = index; 1409 ), 1410 1411 TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1412 __entry->vcpu, __entry->icrh, __entry->icrl, 1413 __entry->id, __entry->index) 1414); 1415 1416TRACE_EVENT(kvm_avic_unaccelerated_access, 1417 TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1418 TP_ARGS(vcpu, offset, ft, rw, vec), 1419 1420 TP_STRUCT__entry( 1421 __field(u32, vcpu) 1422 __field(u32, offset) 1423 __field(bool, ft) 1424 __field(bool, rw) 1425 __field(u32, vec) 1426 ), 1427 1428 TP_fast_assign( 1429 __entry->vcpu = vcpu; 1430 __entry->offset = offset; 1431 __entry->ft = ft; 1432 __entry->rw = rw; 1433 __entry->vec = vec; 1434 ), 1435 1436 TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1437 __entry->vcpu, 1438 __entry->offset, 1439 __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1440 __entry->ft ? "trap" : "fault", 1441 __entry->rw ? "write" : "read", 1442 __entry->vec) 1443); 1444 1445TRACE_EVENT(kvm_avic_ga_log, 1446 TP_PROTO(u32 vmid, u32 vcpuid), 1447 TP_ARGS(vmid, vcpuid), 1448 1449 TP_STRUCT__entry( 1450 __field(u32, vmid) 1451 __field(u32, vcpuid) 1452 ), 1453 1454 TP_fast_assign( 1455 __entry->vmid = vmid; 1456 __entry->vcpuid = vcpuid; 1457 ), 1458 1459 TP_printk("vmid=%u, vcpuid=%u", 1460 __entry->vmid, __entry->vcpuid) 1461); 1462 1463TRACE_EVENT(kvm_avic_kick_vcpu_slowpath, 1464 TP_PROTO(u32 icrh, u32 icrl, u32 index), 1465 TP_ARGS(icrh, icrl, index), 1466 1467 TP_STRUCT__entry( 1468 __field(u32, icrh) 1469 __field(u32, icrl) 1470 __field(u32, index) 1471 ), 1472 1473 TP_fast_assign( 1474 __entry->icrh = icrh; 1475 __entry->icrl = icrl; 1476 __entry->index = index; 1477 ), 1478 1479 TP_printk("icrh:icrl=%#08x:%08x, index=%u", 1480 __entry->icrh, __entry->icrl, __entry->index) 1481); 1482 1483TRACE_EVENT(kvm_hv_timer_state, 1484 TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1485 TP_ARGS(vcpu_id, hv_timer_in_use), 1486 TP_STRUCT__entry( 1487 __field(unsigned int, vcpu_id) 1488 __field(unsigned int, hv_timer_in_use) 1489 ), 1490 TP_fast_assign( 1491 __entry->vcpu_id = vcpu_id; 1492 __entry->hv_timer_in_use = hv_timer_in_use; 1493 ), 1494 TP_printk("vcpu_id %x hv_timer %x", 1495 __entry->vcpu_id, 1496 __entry->hv_timer_in_use) 1497); 1498 1499/* 1500 * Tracepoint for kvm_hv_flush_tlb. 1501 */ 1502TRACE_EVENT(kvm_hv_flush_tlb, 1503 TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1504 TP_ARGS(processor_mask, address_space, flags), 1505 1506 TP_STRUCT__entry( 1507 __field(u64, processor_mask) 1508 __field(u64, address_space) 1509 __field(u64, flags) 1510 ), 1511 1512 TP_fast_assign( 1513 __entry->processor_mask = processor_mask; 1514 __entry->address_space = address_space; 1515 __entry->flags = flags; 1516 ), 1517 1518 TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1519 __entry->processor_mask, __entry->address_space, 1520 __entry->flags) 1521); 1522 1523/* 1524 * Tracepoint for kvm_hv_flush_tlb_ex. 1525 */ 1526TRACE_EVENT(kvm_hv_flush_tlb_ex, 1527 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1528 TP_ARGS(valid_bank_mask, format, address_space, flags), 1529 1530 TP_STRUCT__entry( 1531 __field(u64, valid_bank_mask) 1532 __field(u64, format) 1533 __field(u64, address_space) 1534 __field(u64, flags) 1535 ), 1536 1537 TP_fast_assign( 1538 __entry->valid_bank_mask = valid_bank_mask; 1539 __entry->format = format; 1540 __entry->address_space = address_space; 1541 __entry->flags = flags; 1542 ), 1543 1544 TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1545 "address_space 0x%llx flags 0x%llx", 1546 __entry->valid_bank_mask, __entry->format, 1547 __entry->address_space, __entry->flags) 1548); 1549 1550/* 1551 * Tracepoints for kvm_hv_send_ipi. 1552 */ 1553TRACE_EVENT(kvm_hv_send_ipi, 1554 TP_PROTO(u32 vector, u64 processor_mask), 1555 TP_ARGS(vector, processor_mask), 1556 1557 TP_STRUCT__entry( 1558 __field(u32, vector) 1559 __field(u64, processor_mask) 1560 ), 1561 1562 TP_fast_assign( 1563 __entry->vector = vector; 1564 __entry->processor_mask = processor_mask; 1565 ), 1566 1567 TP_printk("vector %x processor_mask 0x%llx", 1568 __entry->vector, __entry->processor_mask) 1569); 1570 1571TRACE_EVENT(kvm_hv_send_ipi_ex, 1572 TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1573 TP_ARGS(vector, format, valid_bank_mask), 1574 1575 TP_STRUCT__entry( 1576 __field(u32, vector) 1577 __field(u64, format) 1578 __field(u64, valid_bank_mask) 1579 ), 1580 1581 TP_fast_assign( 1582 __entry->vector = vector; 1583 __entry->format = format; 1584 __entry->valid_bank_mask = valid_bank_mask; 1585 ), 1586 1587 TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1588 __entry->vector, __entry->format, 1589 __entry->valid_bank_mask) 1590); 1591 1592TRACE_EVENT(kvm_pv_tlb_flush, 1593 TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1594 TP_ARGS(vcpu_id, need_flush_tlb), 1595 1596 TP_STRUCT__entry( 1597 __field( unsigned int, vcpu_id ) 1598 __field( bool, need_flush_tlb ) 1599 ), 1600 1601 TP_fast_assign( 1602 __entry->vcpu_id = vcpu_id; 1603 __entry->need_flush_tlb = need_flush_tlb; 1604 ), 1605 1606 TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1607 __entry->need_flush_tlb ? "true" : "false") 1608); 1609 1610/* 1611 * Tracepoint for failed nested VMX VM-Enter. 1612 */ 1613TRACE_EVENT(kvm_nested_vmenter_failed, 1614 TP_PROTO(const char *msg, u32 err), 1615 TP_ARGS(msg, err), 1616 1617 TP_STRUCT__entry( 1618 __string(msg, msg) 1619 __field(u32, err) 1620 ), 1621 1622 TP_fast_assign( 1623 __assign_str(msg, msg); 1624 __entry->err = err; 1625 ), 1626 1627 TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1628 __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1629); 1630 1631/* 1632 * Tracepoint for syndbg_set_msr. 1633 */ 1634TRACE_EVENT(kvm_hv_syndbg_set_msr, 1635 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1636 TP_ARGS(vcpu_id, vp_index, msr, data), 1637 1638 TP_STRUCT__entry( 1639 __field(int, vcpu_id) 1640 __field(u32, vp_index) 1641 __field(u32, msr) 1642 __field(u64, data) 1643 ), 1644 1645 TP_fast_assign( 1646 __entry->vcpu_id = vcpu_id; 1647 __entry->vp_index = vp_index; 1648 __entry->msr = msr; 1649 __entry->data = data; 1650 ), 1651 1652 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1653 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1654 __entry->data) 1655); 1656 1657/* 1658 * Tracepoint for syndbg_get_msr. 1659 */ 1660TRACE_EVENT(kvm_hv_syndbg_get_msr, 1661 TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1662 TP_ARGS(vcpu_id, vp_index, msr, data), 1663 1664 TP_STRUCT__entry( 1665 __field(int, vcpu_id) 1666 __field(u32, vp_index) 1667 __field(u32, msr) 1668 __field(u64, data) 1669 ), 1670 1671 TP_fast_assign( 1672 __entry->vcpu_id = vcpu_id; 1673 __entry->vp_index = vp_index; 1674 __entry->msr = msr; 1675 __entry->data = data; 1676 ), 1677 1678 TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1679 __entry->vcpu_id, __entry->vp_index, __entry->msr, 1680 __entry->data) 1681); 1682 1683/* 1684 * Tracepoint for the start of VMGEXIT processing 1685 */ 1686TRACE_EVENT(kvm_vmgexit_enter, 1687 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1688 TP_ARGS(vcpu_id, ghcb), 1689 1690 TP_STRUCT__entry( 1691 __field(unsigned int, vcpu_id) 1692 __field(u64, exit_reason) 1693 __field(u64, info1) 1694 __field(u64, info2) 1695 ), 1696 1697 TP_fast_assign( 1698 __entry->vcpu_id = vcpu_id; 1699 __entry->exit_reason = ghcb->save.sw_exit_code; 1700 __entry->info1 = ghcb->save.sw_exit_info_1; 1701 __entry->info2 = ghcb->save.sw_exit_info_2; 1702 ), 1703 1704 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1705 __entry->vcpu_id, __entry->exit_reason, 1706 __entry->info1, __entry->info2) 1707); 1708 1709/* 1710 * Tracepoint for the end of VMGEXIT processing 1711 */ 1712TRACE_EVENT(kvm_vmgexit_exit, 1713 TP_PROTO(unsigned int vcpu_id, struct ghcb *ghcb), 1714 TP_ARGS(vcpu_id, ghcb), 1715 1716 TP_STRUCT__entry( 1717 __field(unsigned int, vcpu_id) 1718 __field(u64, exit_reason) 1719 __field(u64, info1) 1720 __field(u64, info2) 1721 ), 1722 1723 TP_fast_assign( 1724 __entry->vcpu_id = vcpu_id; 1725 __entry->exit_reason = ghcb->save.sw_exit_code; 1726 __entry->info1 = ghcb->save.sw_exit_info_1; 1727 __entry->info2 = ghcb->save.sw_exit_info_2; 1728 ), 1729 1730 TP_printk("vcpu %u, exit_reason %llx, exit_info1 %llx, exit_info2 %llx", 1731 __entry->vcpu_id, __entry->exit_reason, 1732 __entry->info1, __entry->info2) 1733); 1734 1735/* 1736 * Tracepoint for the start of VMGEXIT MSR procotol processing 1737 */ 1738TRACE_EVENT(kvm_vmgexit_msr_protocol_enter, 1739 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa), 1740 TP_ARGS(vcpu_id, ghcb_gpa), 1741 1742 TP_STRUCT__entry( 1743 __field(unsigned int, vcpu_id) 1744 __field(u64, ghcb_gpa) 1745 ), 1746 1747 TP_fast_assign( 1748 __entry->vcpu_id = vcpu_id; 1749 __entry->ghcb_gpa = ghcb_gpa; 1750 ), 1751 1752 TP_printk("vcpu %u, ghcb_gpa %016llx", 1753 __entry->vcpu_id, __entry->ghcb_gpa) 1754); 1755 1756/* 1757 * Tracepoint for the end of VMGEXIT MSR procotol processing 1758 */ 1759TRACE_EVENT(kvm_vmgexit_msr_protocol_exit, 1760 TP_PROTO(unsigned int vcpu_id, u64 ghcb_gpa, int result), 1761 TP_ARGS(vcpu_id, ghcb_gpa, result), 1762 1763 TP_STRUCT__entry( 1764 __field(unsigned int, vcpu_id) 1765 __field(u64, ghcb_gpa) 1766 __field(int, result) 1767 ), 1768 1769 TP_fast_assign( 1770 __entry->vcpu_id = vcpu_id; 1771 __entry->ghcb_gpa = ghcb_gpa; 1772 __entry->result = result; 1773 ), 1774 1775 TP_printk("vcpu %u, ghcb_gpa %016llx, result %d", 1776 __entry->vcpu_id, __entry->ghcb_gpa, __entry->result) 1777); 1778 1779/* 1780 * Tracepoint for the SEV-SNP page state change processing 1781 */ 1782#define psc_operation \ 1783 {SNP_PAGE_STATE_PRIVATE, "private"}, \ 1784 {SNP_PAGE_STATE_SHARED, "shared"} \ 1785 1786TRACE_EVENT(kvm_snp_psc, 1787 TP_PROTO(unsigned int vcpu_id, u64 pfn, u64 gpa, u8 op, int level), 1788 TP_ARGS(vcpu_id, pfn, gpa, op, level), 1789 1790 TP_STRUCT__entry( 1791 __field(int, vcpu_id) 1792 __field(u64, pfn) 1793 __field(u64, gpa) 1794 __field(u8, op) 1795 __field(int, level) 1796 ), 1797 1798 TP_fast_assign( 1799 __entry->vcpu_id = vcpu_id; 1800 __entry->pfn = pfn; 1801 __entry->gpa = gpa; 1802 __entry->op = op; 1803 __entry->level = level; 1804 ), 1805 1806 TP_printk("vcpu %u, pfn %llx, gpa %llx, op %s, level %d", 1807 __entry->vcpu_id, __entry->pfn, __entry->gpa, 1808 __print_symbolic(__entry->op, psc_operation), 1809 __entry->level) 1810); 1811 1812TRACE_EVENT(kvm_sev_es_unmap_ghcb, 1813 TP_PROTO(void *ghcb_sa, u64 ghcb_sa_gpa, u32 ghcb_sa_len, u32 ghcb_sa_alloc_len, bool ghcb_sa_sync, bool ghcb_in_use, u8 ghcb_sa0, u8 ghcb_sa1), 1814 TP_ARGS(ghcb_sa, ghcb_sa_gpa, ghcb_sa_len, ghcb_sa_alloc_len, ghcb_sa_sync, ghcb_in_use, ghcb_sa0, ghcb_sa1), 1815 1816 TP_STRUCT__entry( 1817 __field(u64, ghcb_sa_hva) 1818 __field(u64, ghcb_sa_gpa) 1819 __field(u32, ghcb_sa_len) 1820 __field(u32, ghcb_sa_alloc_len) 1821 __field(bool, ghcb_sa_sync) 1822 __field(bool, ghcb_in_use) 1823 __field(u8, ghcb_sa0) 1824 __field(u8, ghcb_sa1) 1825 ), 1826 1827 TP_fast_assign( 1828 __entry->ghcb_sa_hva = (u64)ghcb_sa; 1829 __entry->ghcb_sa_gpa = ghcb_sa_gpa; 1830 __entry->ghcb_sa_len = ghcb_sa_len; 1831 __entry->ghcb_sa_alloc_len = ghcb_sa_alloc_len; 1832 __entry->ghcb_sa_sync = ghcb_sa_sync; 1833 __entry->ghcb_in_use = ghcb_in_use; 1834 __entry->ghcb_sa0 = ghcb_sa0; 1835 __entry->ghcb_sa1 = ghcb_sa1; 1836 ), 1837 1838 TP_printk("ghcb_sa_hva %016llx, ghcb_gpa %016llx, ghcb_sa_len 0x%x, ghcb_sa_alloc_len 0x%x, ghcb_sa_sync %d, ghcb_in_use %d, ghcb_sa0 0x%x, ghcb_sa1 0x%x", 1839 __entry->ghcb_sa_hva, __entry->ghcb_sa_gpa, __entry->ghcb_sa_len, 1840 __entry->ghcb_sa_alloc_len, __entry->ghcb_sa_sync, __entry->ghcb_in_use, 1841 __entry->ghcb_sa0, __entry->ghcb_sa1) 1842); 1843 1844#endif /* _TRACE_KVM_H */ 1845 1846#undef TRACE_INCLUDE_PATH 1847#define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1848#undef TRACE_INCLUDE_FILE 1849#define TRACE_INCLUDE_FILE trace 1850 1851/* This part must be outside protection */ 1852#include <trace/define_trace.h>