book3s_hv_rmhandlers.S (73051B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * 6 * Derived from book3s_rmhandlers.S and other files, which are: 7 * 8 * Copyright SUSE Linux Products GmbH 2009 9 * 10 * Authors: Alexander Graf <agraf@suse.de> 11 */ 12 13#include <asm/ppc_asm.h> 14#include <asm/code-patching-asm.h> 15#include <asm/kvm_asm.h> 16#include <asm/reg.h> 17#include <asm/mmu.h> 18#include <asm/page.h> 19#include <asm/ptrace.h> 20#include <asm/hvcall.h> 21#include <asm/asm-offsets.h> 22#include <asm/exception-64s.h> 23#include <asm/kvm_book3s_asm.h> 24#include <asm/book3s/64/mmu-hash.h> 25#include <asm/export.h> 26#include <asm/tm.h> 27#include <asm/opal.h> 28#include <asm/thread_info.h> 29#include <asm/asm-compat.h> 30#include <asm/feature-fixups.h> 31#include <asm/cpuidle.h> 32 33/* Values in HSTATE_NAPPING(r13) */ 34#define NAPPING_CEDE 1 35#define NAPPING_NOVCPU 2 36#define NAPPING_UNSPLIT 3 37 38/* Stack frame offsets for kvmppc_hv_entry */ 39#define SFS 160 40#define STACK_SLOT_TRAP (SFS-4) 41#define STACK_SLOT_TID (SFS-16) 42#define STACK_SLOT_PSSCR (SFS-24) 43#define STACK_SLOT_PID (SFS-32) 44#define STACK_SLOT_IAMR (SFS-40) 45#define STACK_SLOT_CIABR (SFS-48) 46#define STACK_SLOT_DAWR0 (SFS-56) 47#define STACK_SLOT_DAWRX0 (SFS-64) 48#define STACK_SLOT_HFSCR (SFS-72) 49#define STACK_SLOT_AMR (SFS-80) 50#define STACK_SLOT_UAMOR (SFS-88) 51#define STACK_SLOT_FSCR (SFS-96) 52 53/* 54 * Use the last LPID (all implemented LPID bits = 1) for partition switching. 55 * This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but 56 * we write 0xfff into the LPID SPR anyway, which seems to work and just 57 * ignores the top bits. 58 */ 59#define LPID_RSVD 0xfff 60 61/* 62 * Call kvmppc_hv_entry in real mode. 63 * Must be called with interrupts hard-disabled. 64 * 65 * Input Registers: 66 * 67 * LR = return address to continue at after eventually re-enabling MMU 68 */ 69_GLOBAL_TOC(kvmppc_hv_entry_trampoline) 70 mflr r0 71 std r0, PPC_LR_STKOFF(r1) 72 stdu r1, -112(r1) 73 mfmsr r10 74 std r10, HSTATE_HOST_MSR(r13) 75 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 76 li r0,MSR_RI 77 andc r0,r10,r0 78 li r6,MSR_IR | MSR_DR 79 andc r6,r10,r6 80 mtmsrd r0,1 /* clear RI in MSR */ 81 mtsrr0 r5 82 mtsrr1 r6 83 RFI_TO_KERNEL 84 85kvmppc_call_hv_entry: 86 ld r4, HSTATE_KVM_VCPU(r13) 87 bl kvmppc_hv_entry 88 89 /* Back from guest - restore host state and return to caller */ 90 91BEGIN_FTR_SECTION 92 /* Restore host DABR and DABRX */ 93 ld r5,HSTATE_DABR(r13) 94 li r6,7 95 mtspr SPRN_DABR,r5 96 mtspr SPRN_DABRX,r6 97END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 98 99 /* Restore SPRG3 */ 100 ld r3,PACA_SPRG_VDSO(r13) 101 mtspr SPRN_SPRG_VDSO_WRITE,r3 102 103 /* Reload the host's PMU registers */ 104 bl kvmhv_load_host_pmu 105 106 /* 107 * Reload DEC. HDEC interrupts were disabled when 108 * we reloaded the host's LPCR value. 109 */ 110 ld r3, HSTATE_DECEXP(r13) 111 mftb r4 112 subf r4, r4, r3 113 mtspr SPRN_DEC, r4 114 115 /* hwthread_req may have got set by cede or no vcpu, so clear it */ 116 li r0, 0 117 stb r0, HSTATE_HWTHREAD_REQ(r13) 118 119 /* 120 * For external interrupts we need to call the Linux 121 * handler to process the interrupt. We do that by jumping 122 * to absolute address 0x500 for external interrupts. 123 * The [h]rfid at the end of the handler will return to 124 * the book3s_hv_interrupts.S code. For other interrupts 125 * we do the rfid to get back to the book3s_hv_interrupts.S 126 * code here. 127 */ 128 ld r8, 112+PPC_LR_STKOFF(r1) 129 addi r1, r1, 112 130 ld r7, HSTATE_HOST_MSR(r13) 131 132 /* Return the trap number on this thread as the return value */ 133 mr r3, r12 134 135 /* RFI into the highmem handler */ 136 mfmsr r6 137 li r0, MSR_RI 138 andc r6, r6, r0 139 mtmsrd r6, 1 /* Clear RI in MSR */ 140 mtsrr0 r8 141 mtsrr1 r7 142 RFI_TO_KERNEL 143 144kvmppc_primary_no_guest: 145 /* We handle this much like a ceded vcpu */ 146 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 147 /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 148 /* HDEC value came from DEC in the first place, it will fit */ 149 mfspr r3, SPRN_HDEC 150 mtspr SPRN_DEC, r3 151 /* 152 * Make sure the primary has finished the MMU switch. 153 * We should never get here on a secondary thread, but 154 * check it for robustness' sake. 155 */ 156 ld r5, HSTATE_KVM_VCORE(r13) 15765: lbz r0, VCORE_IN_GUEST(r5) 158 cmpwi r0, 0 159 beq 65b 160 /* Set LPCR. */ 161 ld r8,VCORE_LPCR(r5) 162 mtspr SPRN_LPCR,r8 163 isync 164 /* set our bit in napping_threads */ 165 ld r5, HSTATE_KVM_VCORE(r13) 166 lbz r7, HSTATE_PTID(r13) 167 li r0, 1 168 sld r0, r0, r7 169 addi r6, r5, VCORE_NAPPING_THREADS 1701: lwarx r3, 0, r6 171 or r3, r3, r0 172 stwcx. r3, 0, r6 173 bne 1b 174 /* order napping_threads update vs testing entry_exit_map */ 175 isync 176 li r12, 0 177 lwz r7, VCORE_ENTRY_EXIT(r5) 178 cmpwi r7, 0x100 179 bge kvm_novcpu_exit /* another thread already exiting */ 180 li r3, NAPPING_NOVCPU 181 stb r3, HSTATE_NAPPING(r13) 182 183 li r3, 0 /* Don't wake on privileged (OS) doorbell */ 184 b kvm_do_nap 185 186/* 187 * kvm_novcpu_wakeup 188 * Entered from kvm_start_guest if kvm_hstate.napping is set 189 * to NAPPING_NOVCPU 190 * r2 = kernel TOC 191 * r13 = paca 192 */ 193kvm_novcpu_wakeup: 194 ld r1, HSTATE_HOST_R1(r13) 195 ld r5, HSTATE_KVM_VCORE(r13) 196 li r0, 0 197 stb r0, HSTATE_NAPPING(r13) 198 199 /* check the wake reason */ 200 bl kvmppc_check_wake_reason 201 202 /* 203 * Restore volatile registers since we could have called 204 * a C routine in kvmppc_check_wake_reason. 205 * r5 = VCORE 206 */ 207 ld r5, HSTATE_KVM_VCORE(r13) 208 209 /* see if any other thread is already exiting */ 210 lwz r0, VCORE_ENTRY_EXIT(r5) 211 cmpwi r0, 0x100 212 bge kvm_novcpu_exit 213 214 /* clear our bit in napping_threads */ 215 lbz r7, HSTATE_PTID(r13) 216 li r0, 1 217 sld r0, r0, r7 218 addi r6, r5, VCORE_NAPPING_THREADS 2194: lwarx r7, 0, r6 220 andc r7, r7, r0 221 stwcx. r7, 0, r6 222 bne 4b 223 224 /* See if the wake reason means we need to exit */ 225 cmpdi r3, 0 226 bge kvm_novcpu_exit 227 228 /* See if our timeslice has expired (HDEC is negative) */ 229 mfspr r0, SPRN_HDEC 230 extsw r0, r0 231 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 232 cmpdi r0, 0 233 blt kvm_novcpu_exit 234 235 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 236 ld r4, HSTATE_KVM_VCPU(r13) 237 cmpdi r4, 0 238 beq kvmppc_primary_no_guest 239 240#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 241 addi r3, r4, VCPU_TB_RMENTRY 242 bl kvmhv_start_timing 243#endif 244 b kvmppc_got_guest 245 246kvm_novcpu_exit: 247#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 248 ld r4, HSTATE_KVM_VCPU(r13) 249 cmpdi r4, 0 250 beq 13f 251 addi r3, r4, VCPU_TB_RMEXIT 252 bl kvmhv_accumulate_time 253#endif 25413: mr r3, r12 255 stw r12, STACK_SLOT_TRAP(r1) 256 bl kvmhv_commence_exit 257 nop 258 b kvmhv_switch_to_host 259 260/* 261 * We come in here when wakened from Linux offline idle code. 262 * Relocation is off 263 * r3 contains the SRR1 wakeup value, SRR1 is trashed. 264 */ 265_GLOBAL(idle_kvm_start_guest) 266 mfcr r5 267 mflr r0 268 std r5, 8(r1) // Save CR in caller's frame 269 std r0, 16(r1) // Save LR in caller's frame 270 // Create frame on emergency stack 271 ld r4, PACAEMERGSP(r13) 272 stdu r1, -SWITCH_FRAME_SIZE(r4) 273 // Switch to new frame on emergency stack 274 mr r1, r4 275 std r3, 32(r1) // Save SRR1 wakeup value 276 SAVE_NVGPRS(r1) 277 278 /* 279 * Could avoid this and pass it through in r3. For now, 280 * code expects it to be in SRR1. 281 */ 282 mtspr SPRN_SRR1,r3 283 284 li r0,0 285 stb r0,PACA_FTRACE_ENABLED(r13) 286 287 li r0,KVM_HWTHREAD_IN_KVM 288 stb r0,HSTATE_HWTHREAD_STATE(r13) 289 290 /* kvm cede / napping does not come through here */ 291 lbz r0,HSTATE_NAPPING(r13) 292 twnei r0,0 293 294 b 1f 295 296kvm_unsplit_wakeup: 297 li r0, 0 298 stb r0, HSTATE_NAPPING(r13) 299 3001: 301 302 /* 303 * We weren't napping due to cede, so this must be a secondary 304 * thread being woken up to run a guest, or being woken up due 305 * to a stray IPI. (Or due to some machine check or hypervisor 306 * maintenance interrupt while the core is in KVM.) 307 */ 308 309 /* Check the wake reason in SRR1 to see why we got here */ 310 bl kvmppc_check_wake_reason 311 /* 312 * kvmppc_check_wake_reason could invoke a C routine, but we 313 * have no volatile registers to restore when we return. 314 */ 315 316 cmpdi r3, 0 317 bge kvm_no_guest 318 319 /* get vcore pointer, NULL if we have nothing to run */ 320 ld r5,HSTATE_KVM_VCORE(r13) 321 cmpdi r5,0 322 /* if we have no vcore to run, go back to sleep */ 323 beq kvm_no_guest 324 325kvm_secondary_got_guest: 326 327 // About to go to guest, clear saved SRR1 328 li r0, 0 329 std r0, 32(r1) 330 331 /* Set HSTATE_DSCR(r13) to something sensible */ 332 ld r6, PACA_DSCR_DEFAULT(r13) 333 std r6, HSTATE_DSCR(r13) 334 335 /* On thread 0 of a subcore, set HDEC to max */ 336 lbz r4, HSTATE_PTID(r13) 337 cmpwi r4, 0 338 bne 63f 339 lis r6,0x7fff /* MAX_INT@h */ 340 mtspr SPRN_HDEC, r6 341 /* and set per-LPAR registers, if doing dynamic micro-threading */ 342 ld r6, HSTATE_SPLIT_MODE(r13) 343 cmpdi r6, 0 344 beq 63f 345 ld r0, KVM_SPLIT_RPR(r6) 346 mtspr SPRN_RPR, r0 347 ld r0, KVM_SPLIT_PMMAR(r6) 348 mtspr SPRN_PMMAR, r0 349 ld r0, KVM_SPLIT_LDBAR(r6) 350 mtspr SPRN_LDBAR, r0 351 isync 35263: 353 /* Order load of vcpu after load of vcore */ 354 lwsync 355 ld r4, HSTATE_KVM_VCPU(r13) 356 bl kvmppc_hv_entry 357 358 /* Back from the guest, go back to nap */ 359 /* Clear our vcpu and vcore pointers so we don't come back in early */ 360 li r0, 0 361 std r0, HSTATE_KVM_VCPU(r13) 362 /* 363 * Once we clear HSTATE_KVM_VCORE(r13), the code in 364 * kvmppc_run_core() is going to assume that all our vcpu 365 * state is visible in memory. This lwsync makes sure 366 * that that is true. 367 */ 368 lwsync 369 std r0, HSTATE_KVM_VCORE(r13) 370 371 /* 372 * All secondaries exiting guest will fall through this path. 373 * Before proceeding, just check for HMI interrupt and 374 * invoke opal hmi handler. By now we are sure that the 375 * primary thread on this core/subcore has already made partition 376 * switch/TB resync and we are good to call opal hmi handler. 377 */ 378 cmpwi r12, BOOK3S_INTERRUPT_HMI 379 bne kvm_no_guest 380 381 li r3,0 /* NULL argument */ 382 bl hmi_exception_realmode 383/* 384 * At this point we have finished executing in the guest. 385 * We need to wait for hwthread_req to become zero, since 386 * we may not turn on the MMU while hwthread_req is non-zero. 387 * While waiting we also need to check if we get given a vcpu to run. 388 */ 389kvm_no_guest: 390 lbz r3, HSTATE_HWTHREAD_REQ(r13) 391 cmpwi r3, 0 392 bne 53f 393 HMT_MEDIUM 394 li r0, KVM_HWTHREAD_IN_KERNEL 395 stb r0, HSTATE_HWTHREAD_STATE(r13) 396 /* need to recheck hwthread_req after a barrier, to avoid race */ 397 sync 398 lbz r3, HSTATE_HWTHREAD_REQ(r13) 399 cmpwi r3, 0 400 bne 54f 401 402 /* 403 * Jump to idle_return_gpr_loss, which returns to the 404 * idle_kvm_start_guest caller. 405 */ 406 li r3, LPCR_PECE0 407 mfspr r4, SPRN_LPCR 408 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 409 mtspr SPRN_LPCR, r4 410 // Return SRR1 wakeup value, or 0 if we went into the guest 411 ld r3, 32(r1) 412 REST_NVGPRS(r1) 413 ld r1, 0(r1) // Switch back to caller stack 414 ld r0, 16(r1) // Reload LR 415 ld r5, 8(r1) // Reload CR 416 mtlr r0 417 mtcr r5 418 blr 419 42053: 421 HMT_LOW 422 ld r5, HSTATE_KVM_VCORE(r13) 423 cmpdi r5, 0 424 bne 60f 425 ld r3, HSTATE_SPLIT_MODE(r13) 426 cmpdi r3, 0 427 beq kvm_no_guest 428 lbz r0, KVM_SPLIT_DO_NAP(r3) 429 cmpwi r0, 0 430 beq kvm_no_guest 431 HMT_MEDIUM 432 b kvm_unsplit_nap 43360: HMT_MEDIUM 434 b kvm_secondary_got_guest 435 43654: li r0, KVM_HWTHREAD_IN_KVM 437 stb r0, HSTATE_HWTHREAD_STATE(r13) 438 b kvm_no_guest 439 440/* 441 * Here the primary thread is trying to return the core to 442 * whole-core mode, so we need to nap. 443 */ 444kvm_unsplit_nap: 445 /* 446 * When secondaries are napping in kvm_unsplit_nap() with 447 * hwthread_req = 1, HMI goes ignored even though subcores are 448 * already exited the guest. Hence HMI keeps waking up secondaries 449 * from nap in a loop and secondaries always go back to nap since 450 * no vcore is assigned to them. This makes impossible for primary 451 * thread to get hold of secondary threads resulting into a soft 452 * lockup in KVM path. 453 * 454 * Let us check if HMI is pending and handle it before we go to nap. 455 */ 456 cmpwi r12, BOOK3S_INTERRUPT_HMI 457 bne 55f 458 li r3, 0 /* NULL argument */ 459 bl hmi_exception_realmode 46055: 461 /* 462 * Ensure that secondary doesn't nap when it has 463 * its vcore pointer set. 464 */ 465 sync /* matches smp_mb() before setting split_info.do_nap */ 466 ld r0, HSTATE_KVM_VCORE(r13) 467 cmpdi r0, 0 468 bne kvm_no_guest 469 /* clear any pending message */ 470BEGIN_FTR_SECTION 471 lis r6, (PPC_DBELL_SERVER << (63-36))@h 472 PPC_MSGCLR(6) 473END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 474 /* Set kvm_split_mode.napped[tid] = 1 */ 475 ld r3, HSTATE_SPLIT_MODE(r13) 476 li r0, 1 477 lhz r4, PACAPACAINDEX(r13) 478 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ 479 addi r4, r4, KVM_SPLIT_NAPPED 480 stbx r0, r3, r4 481 /* Check the do_nap flag again after setting napped[] */ 482 sync 483 lbz r0, KVM_SPLIT_DO_NAP(r3) 484 cmpwi r0, 0 485 beq 57f 486 li r3, NAPPING_UNSPLIT 487 stb r3, HSTATE_NAPPING(r13) 488 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 489 mfspr r5, SPRN_LPCR 490 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) 491 b kvm_nap_sequence 492 49357: li r0, 0 494 stbx r0, r3, r4 495 b kvm_no_guest 496 497/****************************************************************************** 498 * * 499 * Entry code * 500 * * 501 *****************************************************************************/ 502 503.global kvmppc_hv_entry 504kvmppc_hv_entry: 505 506 /* Required state: 507 * 508 * R4 = vcpu pointer (or NULL) 509 * MSR = ~IR|DR 510 * R13 = PACA 511 * R1 = host R1 512 * R2 = TOC 513 * all other volatile GPRS = free 514 * Does not preserve non-volatile GPRs or CR fields 515 */ 516 mflr r0 517 std r0, PPC_LR_STKOFF(r1) 518 stdu r1, -SFS(r1) 519 520 /* Save R1 in the PACA */ 521 std r1, HSTATE_HOST_R1(r13) 522 523 li r6, KVM_GUEST_MODE_HOST_HV 524 stb r6, HSTATE_IN_GUEST(r13) 525 526#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 527 /* Store initial timestamp */ 528 cmpdi r4, 0 529 beq 1f 530 addi r3, r4, VCPU_TB_RMENTRY 531 bl kvmhv_start_timing 5321: 533#endif 534 535 ld r5, HSTATE_KVM_VCORE(r13) 536 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ 537 538 /* 539 * POWER7/POWER8 host -> guest partition switch code. 540 * We don't have to lock against concurrent tlbies, 541 * but we do have to coordinate across hardware threads. 542 */ 543 /* Set bit in entry map iff exit map is zero. */ 544 li r7, 1 545 lbz r6, HSTATE_PTID(r13) 546 sld r7, r7, r6 547 addi r8, r5, VCORE_ENTRY_EXIT 54821: lwarx r3, 0, r8 549 cmpwi r3, 0x100 /* any threads starting to exit? */ 550 bge secondary_too_late /* if so we're too late to the party */ 551 or r3, r3, r7 552 stwcx. r3, 0, r8 553 bne 21b 554 555 /* Primary thread switches to guest partition. */ 556 cmpwi r6,0 557 bne 10f 558 559 lwz r7,KVM_LPID(r9) 560 ld r6,KVM_SDR1(r9) 561 li r0,LPID_RSVD /* switch to reserved LPID */ 562 mtspr SPRN_LPID,r0 563 ptesync 564 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 565 mtspr SPRN_LPID,r7 566 isync 567 568 /* See if we need to flush the TLB. */ 569 mr r3, r9 /* kvm pointer */ 570 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ 571 li r5, 0 /* nested vcpu pointer */ 572 bl kvmppc_check_need_tlb_flush 573 nop 574 ld r5, HSTATE_KVM_VCORE(r13) 575 576 /* Add timebase offset onto timebase */ 57722: ld r8,VCORE_TB_OFFSET(r5) 578 cmpdi r8,0 579 beq 37f 580 std r8, VCORE_TB_OFFSET_APPL(r5) 581 mftb r6 /* current host timebase */ 582 add r8,r8,r6 583 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 584 mftb r7 /* check if lower 24 bits overflowed */ 585 clrldi r6,r6,40 586 clrldi r7,r7,40 587 cmpld r7,r6 588 bge 37f 589 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 590 mtspr SPRN_TBU40,r8 591 592 /* Load guest PCR value to select appropriate compat mode */ 59337: ld r7, VCORE_PCR(r5) 594 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 595 cmpld r7, r6 596 beq 38f 597 or r7, r7, r6 598 mtspr SPRN_PCR, r7 59938: 600 601BEGIN_FTR_SECTION 602 /* DPDES and VTB are shared between threads */ 603 ld r8, VCORE_DPDES(r5) 604 ld r7, VCORE_VTB(r5) 605 mtspr SPRN_DPDES, r8 606 mtspr SPRN_VTB, r7 607END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 608 609 /* Mark the subcore state as inside guest */ 610 bl kvmppc_subcore_enter_guest 611 nop 612 ld r5, HSTATE_KVM_VCORE(r13) 613 ld r4, HSTATE_KVM_VCPU(r13) 614 li r0,1 615 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 616 617 /* Do we have a guest vcpu to run? */ 61810: cmpdi r4, 0 619 beq kvmppc_primary_no_guest 620kvmppc_got_guest: 621 /* Increment yield count if they have a VPA */ 622 ld r3, VCPU_VPA(r4) 623 cmpdi r3, 0 624 beq 25f 625 li r6, LPPACA_YIELDCOUNT 626 LWZX_BE r5, r3, r6 627 addi r5, r5, 1 628 STWX_BE r5, r3, r6 629 li r6, 1 630 stb r6, VCPU_VPA_DIRTY(r4) 63125: 632 633 /* Save purr/spurr */ 634 mfspr r5,SPRN_PURR 635 mfspr r6,SPRN_SPURR 636 std r5,HSTATE_PURR(r13) 637 std r6,HSTATE_SPURR(r13) 638 ld r7,VCPU_PURR(r4) 639 ld r8,VCPU_SPURR(r4) 640 mtspr SPRN_PURR,r7 641 mtspr SPRN_SPURR,r8 642 643 /* Save host values of some registers */ 644BEGIN_FTR_SECTION 645 mfspr r5, SPRN_CIABR 646 mfspr r6, SPRN_DAWR0 647 mfspr r7, SPRN_DAWRX0 648 mfspr r8, SPRN_IAMR 649 std r5, STACK_SLOT_CIABR(r1) 650 std r6, STACK_SLOT_DAWR0(r1) 651 std r7, STACK_SLOT_DAWRX0(r1) 652 std r8, STACK_SLOT_IAMR(r1) 653 mfspr r5, SPRN_FSCR 654 std r5, STACK_SLOT_FSCR(r1) 655END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 656 657 mfspr r5, SPRN_AMR 658 std r5, STACK_SLOT_AMR(r1) 659 mfspr r6, SPRN_UAMOR 660 std r6, STACK_SLOT_UAMOR(r1) 661 662BEGIN_FTR_SECTION 663 /* Set partition DABR */ 664 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 665 lwz r5,VCPU_DABRX(r4) 666 ld r6,VCPU_DABR(r4) 667 mtspr SPRN_DABRX,r5 668 mtspr SPRN_DABR,r6 669 isync 670END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 671 672#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 673BEGIN_FTR_SECTION 674 b 91f 675END_FTR_SECTION_IFCLR(CPU_FTR_TM) 676 /* 677 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 678 */ 679 mr r3, r4 680 ld r4, VCPU_MSR(r3) 681 li r5, 0 /* don't preserve non-vol regs */ 682 bl kvmppc_restore_tm_hv 683 nop 684 ld r4, HSTATE_KVM_VCPU(r13) 68591: 686#endif 687 688 /* Load guest PMU registers; r4 = vcpu pointer here */ 689 mr r3, r4 690 bl kvmhv_load_guest_pmu 691 692 /* Load up FP, VMX and VSX registers */ 693 ld r4, HSTATE_KVM_VCPU(r13) 694 bl kvmppc_load_fp 695 696 ld r14, VCPU_GPR(R14)(r4) 697 ld r15, VCPU_GPR(R15)(r4) 698 ld r16, VCPU_GPR(R16)(r4) 699 ld r17, VCPU_GPR(R17)(r4) 700 ld r18, VCPU_GPR(R18)(r4) 701 ld r19, VCPU_GPR(R19)(r4) 702 ld r20, VCPU_GPR(R20)(r4) 703 ld r21, VCPU_GPR(R21)(r4) 704 ld r22, VCPU_GPR(R22)(r4) 705 ld r23, VCPU_GPR(R23)(r4) 706 ld r24, VCPU_GPR(R24)(r4) 707 ld r25, VCPU_GPR(R25)(r4) 708 ld r26, VCPU_GPR(R26)(r4) 709 ld r27, VCPU_GPR(R27)(r4) 710 ld r28, VCPU_GPR(R28)(r4) 711 ld r29, VCPU_GPR(R29)(r4) 712 ld r30, VCPU_GPR(R30)(r4) 713 ld r31, VCPU_GPR(R31)(r4) 714 715 /* Switch DSCR to guest value */ 716 ld r5, VCPU_DSCR(r4) 717 mtspr SPRN_DSCR, r5 718 719BEGIN_FTR_SECTION 720 /* Skip next section on POWER7 */ 721 b 8f 722END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 723 /* Load up POWER8-specific registers */ 724 ld r5, VCPU_IAMR(r4) 725 lwz r6, VCPU_PSPB(r4) 726 ld r7, VCPU_FSCR(r4) 727 mtspr SPRN_IAMR, r5 728 mtspr SPRN_PSPB, r6 729 mtspr SPRN_FSCR, r7 730 /* 731 * Handle broken DAWR case by not writing it. This means we 732 * can still store the DAWR register for migration. 733 */ 734 LOAD_REG_ADDR(r5, dawr_force_enable) 735 lbz r5, 0(r5) 736 cmpdi r5, 0 737 beq 1f 738 ld r5, VCPU_DAWR0(r4) 739 ld r6, VCPU_DAWRX0(r4) 740 mtspr SPRN_DAWR0, r5 741 mtspr SPRN_DAWRX0, r6 7421: 743 ld r7, VCPU_CIABR(r4) 744 ld r8, VCPU_TAR(r4) 745 mtspr SPRN_CIABR, r7 746 mtspr SPRN_TAR, r8 747 ld r5, VCPU_IC(r4) 748 ld r8, VCPU_EBBHR(r4) 749 mtspr SPRN_IC, r5 750 mtspr SPRN_EBBHR, r8 751 ld r5, VCPU_EBBRR(r4) 752 ld r6, VCPU_BESCR(r4) 753 lwz r7, VCPU_GUEST_PID(r4) 754 ld r8, VCPU_WORT(r4) 755 mtspr SPRN_EBBRR, r5 756 mtspr SPRN_BESCR, r6 757 mtspr SPRN_PID, r7 758 mtspr SPRN_WORT, r8 759 /* POWER8-only registers */ 760 ld r5, VCPU_TCSCR(r4) 761 ld r6, VCPU_ACOP(r4) 762 ld r7, VCPU_CSIGR(r4) 763 ld r8, VCPU_TACR(r4) 764 mtspr SPRN_TCSCR, r5 765 mtspr SPRN_ACOP, r6 766 mtspr SPRN_CSIGR, r7 767 mtspr SPRN_TACR, r8 768 nop 7698: 770 771 ld r5, VCPU_SPRG0(r4) 772 ld r6, VCPU_SPRG1(r4) 773 ld r7, VCPU_SPRG2(r4) 774 ld r8, VCPU_SPRG3(r4) 775 mtspr SPRN_SPRG0, r5 776 mtspr SPRN_SPRG1, r6 777 mtspr SPRN_SPRG2, r7 778 mtspr SPRN_SPRG3, r8 779 780 /* Load up DAR and DSISR */ 781 ld r5, VCPU_DAR(r4) 782 lwz r6, VCPU_DSISR(r4) 783 mtspr SPRN_DAR, r5 784 mtspr SPRN_DSISR, r6 785 786 /* Restore AMR and UAMOR, set AMOR to all 1s */ 787 ld r5,VCPU_AMR(r4) 788 ld r6,VCPU_UAMOR(r4) 789 mtspr SPRN_AMR,r5 790 mtspr SPRN_UAMOR,r6 791 792 /* Restore state of CTRL run bit; the host currently has it set to 1 */ 793 lwz r5,VCPU_CTRL(r4) 794 andi. r5,r5,1 795 bne 4f 796 li r6,0 797 mtspr SPRN_CTRLT,r6 7984: 799 /* Secondary threads wait for primary to have done partition switch */ 800 ld r5, HSTATE_KVM_VCORE(r13) 801 lbz r6, HSTATE_PTID(r13) 802 cmpwi r6, 0 803 beq 21f 804 lbz r0, VCORE_IN_GUEST(r5) 805 cmpwi r0, 0 806 bne 21f 807 HMT_LOW 80820: lwz r3, VCORE_ENTRY_EXIT(r5) 809 cmpwi r3, 0x100 810 bge no_switch_exit 811 lbz r0, VCORE_IN_GUEST(r5) 812 cmpwi r0, 0 813 beq 20b 814 HMT_MEDIUM 81521: 816 /* Set LPCR. */ 817 ld r8,VCORE_LPCR(r5) 818 mtspr SPRN_LPCR,r8 819 isync 820 821 /* 822 * Set the decrementer to the guest decrementer. 823 */ 824 ld r8,VCPU_DEC_EXPIRES(r4) 825 mftb r7 826 subf r3,r7,r8 827 mtspr SPRN_DEC,r3 828 829 /* Check if HDEC expires soon */ 830 mfspr r3, SPRN_HDEC 831 extsw r3, r3 832 cmpdi r3, 512 /* 1 microsecond */ 833 blt hdec_soon 834 835 /* Clear out and reload the SLB */ 836 li r6, 0 837 slbmte r6, r6 838 PPC_SLBIA(6) 839 ptesync 840 841 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ 842 lwz r5,VCPU_SLB_MAX(r4) 843 cmpwi r5,0 844 beq 9f 845 mtctr r5 846 addi r6,r4,VCPU_SLB 8471: ld r8,VCPU_SLB_E(r6) 848 ld r9,VCPU_SLB_V(r6) 849 slbmte r9,r8 850 addi r6,r6,VCPU_SLB_SIZE 851 bdnz 1b 8529: 853 854deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ 855 /* Check if we can deliver an external or decrementer interrupt now */ 856 ld r0, VCPU_PENDING_EXC(r4) 857 cmpdi r0, 0 858 beq 71f 859 mr r3, r4 860 bl kvmppc_guest_entry_inject_int 861 ld r4, HSTATE_KVM_VCPU(r13) 86271: 863 ld r6, VCPU_SRR0(r4) 864 ld r7, VCPU_SRR1(r4) 865 mtspr SPRN_SRR0, r6 866 mtspr SPRN_SRR1, r7 867 868 ld r10, VCPU_PC(r4) 869 ld r11, VCPU_MSR(r4) 870 /* r11 = vcpu->arch.msr & ~MSR_HV */ 871 rldicl r11, r11, 63 - MSR_HV_LG, 1 872 rotldi r11, r11, 1 + MSR_HV_LG 873 ori r11, r11, MSR_ME 874 875 ld r6, VCPU_CTR(r4) 876 ld r7, VCPU_XER(r4) 877 mtctr r6 878 mtxer r7 879 880/* 881 * Required state: 882 * R4 = vcpu 883 * R10: value for HSRR0 884 * R11: value for HSRR1 885 * R13 = PACA 886 */ 887fast_guest_return: 888 li r0,0 889 stb r0,VCPU_CEDED(r4) /* cancel cede */ 890 mtspr SPRN_HSRR0,r10 891 mtspr SPRN_HSRR1,r11 892 893 /* Activate guest mode, so faults get handled by KVM */ 894 li r9, KVM_GUEST_MODE_GUEST_HV 895 stb r9, HSTATE_IN_GUEST(r13) 896 897#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 898 /* Accumulate timing */ 899 addi r3, r4, VCPU_TB_GUEST 900 bl kvmhv_accumulate_time 901#endif 902 903 /* Enter guest */ 904 905BEGIN_FTR_SECTION 906 ld r5, VCPU_CFAR(r4) 907 mtspr SPRN_CFAR, r5 908END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 909BEGIN_FTR_SECTION 910 ld r0, VCPU_PPR(r4) 911END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 912 913 ld r5, VCPU_LR(r4) 914 mtlr r5 915 916 ld r1, VCPU_GPR(R1)(r4) 917 ld r5, VCPU_GPR(R5)(r4) 918 ld r8, VCPU_GPR(R8)(r4) 919 ld r9, VCPU_GPR(R9)(r4) 920 ld r10, VCPU_GPR(R10)(r4) 921 ld r11, VCPU_GPR(R11)(r4) 922 ld r12, VCPU_GPR(R12)(r4) 923 ld r13, VCPU_GPR(R13)(r4) 924 925BEGIN_FTR_SECTION 926 mtspr SPRN_PPR, r0 927END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 928 929 ld r6, VCPU_GPR(R6)(r4) 930 ld r7, VCPU_GPR(R7)(r4) 931 932 ld r0, VCPU_CR(r4) 933 mtcr r0 934 935 ld r0, VCPU_GPR(R0)(r4) 936 ld r2, VCPU_GPR(R2)(r4) 937 ld r3, VCPU_GPR(R3)(r4) 938 ld r4, VCPU_GPR(R4)(r4) 939 HRFI_TO_GUEST 940 b . 941 942secondary_too_late: 943 li r12, 0 944 stw r12, STACK_SLOT_TRAP(r1) 945 cmpdi r4, 0 946 beq 11f 947 stw r12, VCPU_TRAP(r4) 948#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 949 addi r3, r4, VCPU_TB_RMEXIT 950 bl kvmhv_accumulate_time 951#endif 95211: b kvmhv_switch_to_host 953 954no_switch_exit: 955 HMT_MEDIUM 956 li r12, 0 957 b 12f 958hdec_soon: 959 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 96012: stw r12, VCPU_TRAP(r4) 961 mr r9, r4 962#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 963 addi r3, r4, VCPU_TB_RMEXIT 964 bl kvmhv_accumulate_time 965#endif 966 b guest_bypass 967 968/****************************************************************************** 969 * * 970 * Exit code * 971 * * 972 *****************************************************************************/ 973 974/* 975 * We come here from the first-level interrupt handlers. 976 */ 977 .globl kvmppc_interrupt_hv 978kvmppc_interrupt_hv: 979 /* 980 * Register contents: 981 * R9 = HSTATE_IN_GUEST 982 * R12 = (guest CR << 32) | interrupt vector 983 * R13 = PACA 984 * guest R12 saved in shadow VCPU SCRATCH0 985 * guest R13 saved in SPRN_SCRATCH0 986 * guest R9 saved in HSTATE_SCRATCH2 987 */ 988 /* We're now back in the host but in guest MMU context */ 989 cmpwi r9,KVM_GUEST_MODE_HOST_HV 990 beq kvmppc_bad_host_intr 991 li r9, KVM_GUEST_MODE_HOST_HV 992 stb r9, HSTATE_IN_GUEST(r13) 993 994 ld r9, HSTATE_KVM_VCPU(r13) 995 996 /* Save registers */ 997 998 std r0, VCPU_GPR(R0)(r9) 999 std r1, VCPU_GPR(R1)(r9) 1000 std r2, VCPU_GPR(R2)(r9) 1001 std r3, VCPU_GPR(R3)(r9) 1002 std r4, VCPU_GPR(R4)(r9) 1003 std r5, VCPU_GPR(R5)(r9) 1004 std r6, VCPU_GPR(R6)(r9) 1005 std r7, VCPU_GPR(R7)(r9) 1006 std r8, VCPU_GPR(R8)(r9) 1007 ld r0, HSTATE_SCRATCH2(r13) 1008 std r0, VCPU_GPR(R9)(r9) 1009 std r10, VCPU_GPR(R10)(r9) 1010 std r11, VCPU_GPR(R11)(r9) 1011 ld r3, HSTATE_SCRATCH0(r13) 1012 std r3, VCPU_GPR(R12)(r9) 1013 /* CR is in the high half of r12 */ 1014 srdi r4, r12, 32 1015 std r4, VCPU_CR(r9) 1016BEGIN_FTR_SECTION 1017 ld r3, HSTATE_CFAR(r13) 1018 std r3, VCPU_CFAR(r9) 1019END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1020BEGIN_FTR_SECTION 1021 ld r4, HSTATE_PPR(r13) 1022 std r4, VCPU_PPR(r9) 1023END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1024 1025 /* Restore R1/R2 so we can handle faults */ 1026 ld r1, HSTATE_HOST_R1(r13) 1027 ld r2, PACATOC(r13) 1028 1029 mfspr r10, SPRN_SRR0 1030 mfspr r11, SPRN_SRR1 1031 std r10, VCPU_SRR0(r9) 1032 std r11, VCPU_SRR1(r9) 1033 /* trap is in the low half of r12, clear CR from the high half */ 1034 clrldi r12, r12, 32 1035 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1036 beq 1f 1037 mfspr r10, SPRN_HSRR0 1038 mfspr r11, SPRN_HSRR1 1039 clrrdi r12, r12, 2 10401: std r10, VCPU_PC(r9) 1041 std r11, VCPU_MSR(r9) 1042 1043 GET_SCRATCH0(r3) 1044 mflr r4 1045 std r3, VCPU_GPR(R13)(r9) 1046 std r4, VCPU_LR(r9) 1047 1048 stw r12,VCPU_TRAP(r9) 1049 1050 /* 1051 * Now that we have saved away SRR0/1 and HSRR0/1, 1052 * interrupts are recoverable in principle, so set MSR_RI. 1053 * This becomes important for relocation-on interrupts from 1054 * the guest, which we can get in radix mode on POWER9. 1055 */ 1056 li r0, MSR_RI 1057 mtmsrd r0, 1 1058 1059#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1060 addi r3, r9, VCPU_TB_RMINTR 1061 mr r4, r9 1062 bl kvmhv_accumulate_time 1063 ld r5, VCPU_GPR(R5)(r9) 1064 ld r6, VCPU_GPR(R6)(r9) 1065 ld r7, VCPU_GPR(R7)(r9) 1066 ld r8, VCPU_GPR(R8)(r9) 1067#endif 1068 1069 /* Save HEIR (HV emulation assist reg) in emul_inst 1070 if this is an HEI (HV emulation interrupt, e40) */ 1071 li r3,KVM_INST_FETCH_FAILED 1072 stw r3,VCPU_LAST_INST(r9) 1073 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1074 bne 11f 1075 mfspr r3,SPRN_HEIR 107611: stw r3,VCPU_HEIR(r9) 1077 1078 /* these are volatile across C function calls */ 1079 mfctr r3 1080 mfxer r4 1081 std r3, VCPU_CTR(r9) 1082 std r4, VCPU_XER(r9) 1083 1084 /* Save more register state */ 1085 mfdar r3 1086 mfdsisr r4 1087 std r3, VCPU_DAR(r9) 1088 stw r4, VCPU_DSISR(r9) 1089 1090 /* If this is a page table miss then see if it's theirs or ours */ 1091 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1092 beq kvmppc_hdsi 1093 std r3, VCPU_FAULT_DAR(r9) 1094 stw r4, VCPU_FAULT_DSISR(r9) 1095 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1096 beq kvmppc_hisi 1097 1098 /* See if this is a leftover HDEC interrupt */ 1099 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1100 bne 2f 1101 mfspr r3,SPRN_HDEC 1102 extsw r3, r3 1103 cmpdi r3,0 1104 mr r4,r9 1105 bge fast_guest_return 11062: 1107 /* See if this is an hcall we can handle in real mode */ 1108 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1109 beq hcall_try_real_mode 1110 1111 /* Hypervisor doorbell - exit only if host IPI flag set */ 1112 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL 1113 bne 3f 1114 lbz r0, HSTATE_HOST_IPI(r13) 1115 cmpwi r0, 0 1116 beq maybe_reenter_guest 1117 b guest_exit_cont 11183: 1119 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ 1120 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL 1121 bne 14f 1122 mfspr r3, SPRN_HFSCR 1123 std r3, VCPU_HFSCR(r9) 1124 b guest_exit_cont 112514: 1126 /* External interrupt ? */ 1127 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1128 beq kvmppc_guest_external 1129 /* See if it is a machine check */ 1130 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1131 beq machine_check_realmode 1132 /* Or a hypervisor maintenance interrupt */ 1133 cmpwi r12, BOOK3S_INTERRUPT_HMI 1134 beq hmi_realmode 1135 1136guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1137 1138#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1139 addi r3, r9, VCPU_TB_RMEXIT 1140 mr r4, r9 1141 bl kvmhv_accumulate_time 1142#endif 1143 1144 /* 1145 * Possibly flush the link stack here, before we do a blr in 1146 * kvmhv_switch_to_host. 1147 */ 11481: nop 1149 patch_site 1b patch__call_kvm_flush_link_stack 1150 1151 /* For hash guest, read the guest SLB and save it away */ 1152 li r5, 0 1153 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1154 mtctr r0 1155 li r6,0 1156 addi r7,r9,VCPU_SLB 11571: slbmfee r8,r6 1158 andis. r0,r8,SLB_ESID_V@h 1159 beq 2f 1160 add r8,r8,r6 /* put index in */ 1161 slbmfev r3,r6 1162 std r8,VCPU_SLB_E(r7) 1163 std r3,VCPU_SLB_V(r7) 1164 addi r7,r7,VCPU_SLB_SIZE 1165 addi r5,r5,1 11662: addi r6,r6,1 1167 bdnz 1b 1168 /* Finally clear out the SLB */ 1169 li r0,0 1170 slbmte r0,r0 1171 PPC_SLBIA(6) 1172 ptesync 1173 stw r5,VCPU_SLB_MAX(r9) 1174 1175 /* load host SLB entries */ 1176 ld r8,PACA_SLBSHADOWPTR(r13) 1177 1178 .rept SLB_NUM_BOLTED 1179 li r3, SLBSHADOW_SAVEAREA 1180 LDX_BE r5, r8, r3 1181 addi r3, r3, 8 1182 LDX_BE r6, r8, r3 1183 andis. r7,r5,SLB_ESID_V@h 1184 beq 1f 1185 slbmte r6,r5 11861: addi r8,r8,16 1187 .endr 1188 1189guest_bypass: 1190 stw r12, STACK_SLOT_TRAP(r1) 1191 1192 /* Save DEC */ 1193 /* Do this before kvmhv_commence_exit so we know TB is guest TB */ 1194 ld r3, HSTATE_KVM_VCORE(r13) 1195 mfspr r5,SPRN_DEC 1196 mftb r6 1197 extsw r5,r5 119816: add r5,r5,r6 1199 std r5,VCPU_DEC_EXPIRES(r9) 1200 1201 /* Increment exit count, poke other threads to exit */ 1202 mr r3, r12 1203 bl kvmhv_commence_exit 1204 nop 1205 ld r9, HSTATE_KVM_VCPU(r13) 1206 1207 /* Stop others sending VCPU interrupts to this physical CPU */ 1208 li r0, -1 1209 stw r0, VCPU_CPU(r9) 1210 stw r0, VCPU_THREAD_CPU(r9) 1211 1212 /* Save guest CTRL register, set runlatch to 1 if it was clear */ 1213 mfspr r6,SPRN_CTRLF 1214 stw r6,VCPU_CTRL(r9) 1215 andi. r0,r6,1 1216 bne 4f 1217 li r6,1 1218 mtspr SPRN_CTRLT,r6 12194: 1220 /* 1221 * Save the guest PURR/SPURR 1222 */ 1223 mfspr r5,SPRN_PURR 1224 mfspr r6,SPRN_SPURR 1225 ld r7,VCPU_PURR(r9) 1226 ld r8,VCPU_SPURR(r9) 1227 std r5,VCPU_PURR(r9) 1228 std r6,VCPU_SPURR(r9) 1229 subf r5,r7,r5 1230 subf r6,r8,r6 1231 1232 /* 1233 * Restore host PURR/SPURR and add guest times 1234 * so that the time in the guest gets accounted. 1235 */ 1236 ld r3,HSTATE_PURR(r13) 1237 ld r4,HSTATE_SPURR(r13) 1238 add r3,r3,r5 1239 add r4,r4,r6 1240 mtspr SPRN_PURR,r3 1241 mtspr SPRN_SPURR,r4 1242 1243BEGIN_FTR_SECTION 1244 b 8f 1245END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1246 /* Save POWER8-specific registers */ 1247 mfspr r5, SPRN_IAMR 1248 mfspr r6, SPRN_PSPB 1249 mfspr r7, SPRN_FSCR 1250 std r5, VCPU_IAMR(r9) 1251 stw r6, VCPU_PSPB(r9) 1252 std r7, VCPU_FSCR(r9) 1253 mfspr r5, SPRN_IC 1254 mfspr r7, SPRN_TAR 1255 std r5, VCPU_IC(r9) 1256 std r7, VCPU_TAR(r9) 1257 mfspr r8, SPRN_EBBHR 1258 std r8, VCPU_EBBHR(r9) 1259 mfspr r5, SPRN_EBBRR 1260 mfspr r6, SPRN_BESCR 1261 mfspr r7, SPRN_PID 1262 mfspr r8, SPRN_WORT 1263 std r5, VCPU_EBBRR(r9) 1264 std r6, VCPU_BESCR(r9) 1265 stw r7, VCPU_GUEST_PID(r9) 1266 std r8, VCPU_WORT(r9) 1267 mfspr r5, SPRN_TCSCR 1268 mfspr r6, SPRN_ACOP 1269 mfspr r7, SPRN_CSIGR 1270 mfspr r8, SPRN_TACR 1271 std r5, VCPU_TCSCR(r9) 1272 std r6, VCPU_ACOP(r9) 1273 std r7, VCPU_CSIGR(r9) 1274 std r8, VCPU_TACR(r9) 1275BEGIN_FTR_SECTION 1276 ld r5, STACK_SLOT_FSCR(r1) 1277 mtspr SPRN_FSCR, r5 1278END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1279 /* 1280 * Restore various registers to 0, where non-zero values 1281 * set by the guest could disrupt the host. 1282 */ 1283 li r0, 0 1284 mtspr SPRN_PSPB, r0 1285 mtspr SPRN_WORT, r0 1286 mtspr SPRN_TCSCR, r0 1287 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1288 li r0, 1 1289 sldi r0, r0, 31 1290 mtspr SPRN_MMCRS, r0 1291 1292 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ 1293 ld r8, STACK_SLOT_IAMR(r1) 1294 mtspr SPRN_IAMR, r8 1295 12968: /* Power7 jumps back in here */ 1297 mfspr r5,SPRN_AMR 1298 mfspr r6,SPRN_UAMOR 1299 std r5,VCPU_AMR(r9) 1300 std r6,VCPU_UAMOR(r9) 1301 ld r5,STACK_SLOT_AMR(r1) 1302 ld r6,STACK_SLOT_UAMOR(r1) 1303 mtspr SPRN_AMR, r5 1304 mtspr SPRN_UAMOR, r6 1305 1306 /* Switch DSCR back to host value */ 1307 mfspr r8, SPRN_DSCR 1308 ld r7, HSTATE_DSCR(r13) 1309 std r8, VCPU_DSCR(r9) 1310 mtspr SPRN_DSCR, r7 1311 1312 /* Save non-volatile GPRs */ 1313 std r14, VCPU_GPR(R14)(r9) 1314 std r15, VCPU_GPR(R15)(r9) 1315 std r16, VCPU_GPR(R16)(r9) 1316 std r17, VCPU_GPR(R17)(r9) 1317 std r18, VCPU_GPR(R18)(r9) 1318 std r19, VCPU_GPR(R19)(r9) 1319 std r20, VCPU_GPR(R20)(r9) 1320 std r21, VCPU_GPR(R21)(r9) 1321 std r22, VCPU_GPR(R22)(r9) 1322 std r23, VCPU_GPR(R23)(r9) 1323 std r24, VCPU_GPR(R24)(r9) 1324 std r25, VCPU_GPR(R25)(r9) 1325 std r26, VCPU_GPR(R26)(r9) 1326 std r27, VCPU_GPR(R27)(r9) 1327 std r28, VCPU_GPR(R28)(r9) 1328 std r29, VCPU_GPR(R29)(r9) 1329 std r30, VCPU_GPR(R30)(r9) 1330 std r31, VCPU_GPR(R31)(r9) 1331 1332 /* Save SPRGs */ 1333 mfspr r3, SPRN_SPRG0 1334 mfspr r4, SPRN_SPRG1 1335 mfspr r5, SPRN_SPRG2 1336 mfspr r6, SPRN_SPRG3 1337 std r3, VCPU_SPRG0(r9) 1338 std r4, VCPU_SPRG1(r9) 1339 std r5, VCPU_SPRG2(r9) 1340 std r6, VCPU_SPRG3(r9) 1341 1342 /* save FP state */ 1343 mr r3, r9 1344 bl kvmppc_save_fp 1345 1346#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1347BEGIN_FTR_SECTION 1348 b 91f 1349END_FTR_SECTION_IFCLR(CPU_FTR_TM) 1350 /* 1351 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 1352 */ 1353 mr r3, r9 1354 ld r4, VCPU_MSR(r3) 1355 li r5, 0 /* don't preserve non-vol regs */ 1356 bl kvmppc_save_tm_hv 1357 nop 1358 ld r9, HSTATE_KVM_VCPU(r13) 135991: 1360#endif 1361 1362 /* Increment yield count if they have a VPA */ 1363 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1364 cmpdi r8, 0 1365 beq 25f 1366 li r4, LPPACA_YIELDCOUNT 1367 LWZX_BE r3, r8, r4 1368 addi r3, r3, 1 1369 STWX_BE r3, r8, r4 1370 li r3, 1 1371 stb r3, VCPU_VPA_DIRTY(r9) 137225: 1373 /* Save PMU registers if requested */ 1374 /* r8 and cr0.eq are live here */ 1375 mr r3, r9 1376 li r4, 1 1377 beq 21f /* if no VPA, save PMU stuff anyway */ 1378 lbz r4, LPPACA_PMCINUSE(r8) 137921: bl kvmhv_save_guest_pmu 1380 ld r9, HSTATE_KVM_VCPU(r13) 1381 1382 /* Restore host values of some registers */ 1383BEGIN_FTR_SECTION 1384 ld r5, STACK_SLOT_CIABR(r1) 1385 ld r6, STACK_SLOT_DAWR0(r1) 1386 ld r7, STACK_SLOT_DAWRX0(r1) 1387 mtspr SPRN_CIABR, r5 1388 /* 1389 * If the DAWR doesn't work, it's ok to write these here as 1390 * this value should always be zero 1391 */ 1392 mtspr SPRN_DAWR0, r6 1393 mtspr SPRN_DAWRX0, r7 1394END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1395 1396 /* 1397 * POWER7/POWER8 guest -> host partition switch code. 1398 * We don't have to lock against tlbies but we do 1399 * have to coordinate the hardware threads. 1400 * Here STACK_SLOT_TRAP(r1) contains the trap number. 1401 */ 1402kvmhv_switch_to_host: 1403 /* Secondary threads wait for primary to do partition switch */ 1404 ld r5,HSTATE_KVM_VCORE(r13) 1405 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1406 lbz r3,HSTATE_PTID(r13) 1407 cmpwi r3,0 1408 beq 15f 1409 HMT_LOW 141013: lbz r3,VCORE_IN_GUEST(r5) 1411 cmpwi r3,0 1412 bne 13b 1413 HMT_MEDIUM 1414 b 16f 1415 1416 /* Primary thread waits for all the secondaries to exit guest */ 141715: lwz r3,VCORE_ENTRY_EXIT(r5) 1418 rlwinm r0,r3,32-8,0xff 1419 clrldi r3,r3,56 1420 cmpw r3,r0 1421 bne 15b 1422 isync 1423 1424 /* Did we actually switch to the guest at all? */ 1425 lbz r6, VCORE_IN_GUEST(r5) 1426 cmpwi r6, 0 1427 beq 19f 1428 1429 /* Primary thread switches back to host partition */ 1430 lwz r7,KVM_HOST_LPID(r4) 1431 ld r6,KVM_HOST_SDR1(r4) 1432 li r8,LPID_RSVD /* switch to reserved LPID */ 1433 mtspr SPRN_LPID,r8 1434 ptesync 1435 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1436 mtspr SPRN_LPID,r7 1437 isync 1438 1439BEGIN_FTR_SECTION 1440 /* DPDES and VTB are shared between threads */ 1441 mfspr r7, SPRN_DPDES 1442 mfspr r8, SPRN_VTB 1443 std r7, VCORE_DPDES(r5) 1444 std r8, VCORE_VTB(r5) 1445 /* clear DPDES so we don't get guest doorbells in the host */ 1446 li r8, 0 1447 mtspr SPRN_DPDES, r8 1448END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1449 1450 /* Subtract timebase offset from timebase */ 1451 ld r8, VCORE_TB_OFFSET_APPL(r5) 1452 cmpdi r8,0 1453 beq 17f 1454 li r0, 0 1455 std r0, VCORE_TB_OFFSET_APPL(r5) 1456 mftb r6 /* current guest timebase */ 1457 subf r8,r8,r6 1458 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1459 mftb r7 /* check if lower 24 bits overflowed */ 1460 clrldi r6,r6,40 1461 clrldi r7,r7,40 1462 cmpld r7,r6 1463 bge 17f 1464 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1465 mtspr SPRN_TBU40,r8 1466 146717: 1468 /* 1469 * If this is an HMI, we called kvmppc_realmode_hmi_handler 1470 * above, which may or may not have already called 1471 * kvmppc_subcore_exit_guest. Fortunately, all that 1472 * kvmppc_subcore_exit_guest does is clear a flag, so calling 1473 * it again here is benign even if kvmppc_realmode_hmi_handler 1474 * has already called it. 1475 */ 1476 bl kvmppc_subcore_exit_guest 1477 nop 147830: ld r5,HSTATE_KVM_VCORE(r13) 1479 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1480 1481 /* Reset PCR */ 1482 ld r0, VCORE_PCR(r5) 1483 LOAD_REG_IMMEDIATE(r6, PCR_MASK) 1484 cmpld r0, r6 1485 beq 18f 1486 mtspr SPRN_PCR, r6 148718: 1488 /* Signal secondary CPUs to continue */ 1489 li r0, 0 1490 stb r0,VCORE_IN_GUEST(r5) 149119: lis r8,0x7fff /* MAX_INT@h */ 1492 mtspr SPRN_HDEC,r8 1493 149416: ld r8,KVM_HOST_LPCR(r4) 1495 mtspr SPRN_LPCR,r8 1496 isync 1497 1498#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 1499 /* Finish timing, if we have a vcpu */ 1500 ld r4, HSTATE_KVM_VCPU(r13) 1501 cmpdi r4, 0 1502 li r3, 0 1503 beq 2f 1504 bl kvmhv_accumulate_time 15052: 1506#endif 1507 /* Unset guest mode */ 1508 li r0, KVM_GUEST_MODE_NONE 1509 stb r0, HSTATE_IN_GUEST(r13) 1510 1511 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ 1512 ld r0, SFS+PPC_LR_STKOFF(r1) 1513 addi r1, r1, SFS 1514 mtlr r0 1515 blr 1516 1517.balign 32 1518.global kvm_flush_link_stack 1519kvm_flush_link_stack: 1520 /* Save LR into r0 */ 1521 mflr r0 1522 1523 /* Flush the link stack. On Power8 it's up to 32 entries in size. */ 1524 .rept 32 1525 bl .+4 1526 .endr 1527 1528 /* And on Power9 it's up to 64. */ 1529BEGIN_FTR_SECTION 1530 .rept 32 1531 bl .+4 1532 .endr 1533END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1534 1535 /* Restore LR */ 1536 mtlr r0 1537 blr 1538 1539kvmppc_guest_external: 1540 /* External interrupt, first check for host_ipi. If this is 1541 * set, we know the host wants us out so let's do it now 1542 */ 1543 bl kvmppc_read_intr 1544 1545 /* 1546 * Restore the active volatile registers after returning from 1547 * a C function. 1548 */ 1549 ld r9, HSTATE_KVM_VCPU(r13) 1550 li r12, BOOK3S_INTERRUPT_EXTERNAL 1551 1552 /* 1553 * kvmppc_read_intr return codes: 1554 * 1555 * Exit to host (r3 > 0) 1556 * 1 An interrupt is pending that needs to be handled by the host 1557 * Exit guest and return to host by branching to guest_exit_cont 1558 * 1559 * 2 Passthrough that needs completion in the host 1560 * Exit guest and return to host by branching to guest_exit_cont 1561 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD 1562 * to indicate to the host to complete handling the interrupt 1563 * 1564 * Before returning to guest, we check if any CPU is heading out 1565 * to the host and if so, we head out also. If no CPUs are heading 1566 * check return values <= 0. 1567 * 1568 * Return to guest (r3 <= 0) 1569 * 0 No external interrupt is pending 1570 * -1 A guest wakeup IPI (which has now been cleared) 1571 * In either case, we return to guest to deliver any pending 1572 * guest interrupts. 1573 * 1574 * -2 A PCI passthrough external interrupt was handled 1575 * (interrupt was delivered directly to guest) 1576 * Return to guest to deliver any pending guest interrupts. 1577 */ 1578 1579 cmpdi r3, 1 1580 ble 1f 1581 1582 /* Return code = 2 */ 1583 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 1584 stw r12, VCPU_TRAP(r9) 1585 b guest_exit_cont 1586 15871: /* Return code <= 1 */ 1588 cmpdi r3, 0 1589 bgt guest_exit_cont 1590 1591 /* Return code <= 0 */ 1592maybe_reenter_guest: 1593 ld r5, HSTATE_KVM_VCORE(r13) 1594 lwz r0, VCORE_ENTRY_EXIT(r5) 1595 cmpwi r0, 0x100 1596 mr r4, r9 1597 blt deliver_guest_interrupt 1598 b guest_exit_cont 1599 1600/* 1601 * Check whether an HDSI is an HPTE not found fault or something else. 1602 * If it is an HPTE not found fault that is due to the guest accessing 1603 * a page that they have mapped but which we have paged out, then 1604 * we continue on with the guest exit path. In all other cases, 1605 * reflect the HDSI to the guest as a DSI. 1606 */ 1607kvmppc_hdsi: 1608 mfspr r4, SPRN_HDAR 1609 mfspr r6, SPRN_HDSISR 1610 /* HPTE not found fault or protection fault? */ 1611 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1612 beq 1f /* if not, send it to the guest */ 1613 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1614 beq 3f 1615 clrrdi r0, r4, 28 1616 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1617 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT 1618 bne 7f /* if no SLB entry found */ 16194: std r4, VCPU_FAULT_DAR(r9) 1620 stw r6, VCPU_FAULT_DSISR(r9) 1621 1622 /* Search the hash table. */ 1623 mr r3, r9 /* vcpu pointer */ 1624 li r7, 1 /* data fault */ 1625 bl kvmppc_hpte_hv_fault 1626 ld r9, HSTATE_KVM_VCPU(r13) 1627 ld r10, VCPU_PC(r9) 1628 ld r11, VCPU_MSR(r9) 1629 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1630 cmpdi r3, 0 /* retry the instruction */ 1631 beq 6f 1632 cmpdi r3, -1 /* handle in kernel mode */ 1633 beq guest_exit_cont 1634 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1635 beq 2f 1636 1637 /* Synthesize a DSI (or DSegI) for the guest */ 1638 ld r4, VCPU_FAULT_DAR(r9) 1639 mr r6, r3 16401: li r0, BOOK3S_INTERRUPT_DATA_STORAGE 1641 mtspr SPRN_DSISR, r6 16427: mtspr SPRN_DAR, r4 1643 mtspr SPRN_SRR0, r10 1644 mtspr SPRN_SRR1, r11 1645 mr r10, r0 1646 bl kvmppc_msr_interrupt 1647fast_interrupt_c_return: 16486: ld r7, VCPU_CTR(r9) 1649 ld r8, VCPU_XER(r9) 1650 mtctr r7 1651 mtxer r8 1652 mr r4, r9 1653 b fast_guest_return 1654 16553: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1656 ld r5, KVM_VRMA_SLB_V(r5) 1657 b 4b 1658 1659 /* If this is for emulated MMIO, load the instruction word */ 16602: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1661 1662 /* Set guest mode to 'jump over instruction' so if lwz faults 1663 * we'll just continue at the next IP. */ 1664 li r0, KVM_GUEST_MODE_SKIP 1665 stb r0, HSTATE_IN_GUEST(r13) 1666 1667 /* Do the access with MSR:DR enabled */ 1668 mfmsr r3 1669 ori r4, r3, MSR_DR /* Enable paging for data */ 1670 mtmsrd r4 1671 lwz r8, 0(r10) 1672 mtmsrd r3 1673 1674 /* Store the result */ 1675 stw r8, VCPU_LAST_INST(r9) 1676 1677 /* Unset guest mode. */ 1678 li r0, KVM_GUEST_MODE_HOST_HV 1679 stb r0, HSTATE_IN_GUEST(r13) 1680 b guest_exit_cont 1681 1682/* 1683 * Similarly for an HISI, reflect it to the guest as an ISI unless 1684 * it is an HPTE not found fault for a page that we have paged out. 1685 */ 1686kvmppc_hisi: 1687 andis. r0, r11, SRR1_ISI_NOPT@h 1688 beq 1f 1689 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1690 beq 3f 1691 clrrdi r0, r10, 28 1692 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1693 li r0, BOOK3S_INTERRUPT_INST_SEGMENT 1694 bne 7f /* if no SLB entry found */ 16954: 1696 /* Search the hash table. */ 1697 mr r3, r9 /* vcpu pointer */ 1698 mr r4, r10 1699 mr r6, r11 1700 li r7, 0 /* instruction fault */ 1701 bl kvmppc_hpte_hv_fault 1702 ld r9, HSTATE_KVM_VCPU(r13) 1703 ld r10, VCPU_PC(r9) 1704 ld r11, VCPU_MSR(r9) 1705 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1706 cmpdi r3, 0 /* retry the instruction */ 1707 beq fast_interrupt_c_return 1708 cmpdi r3, -1 /* handle in kernel mode */ 1709 beq guest_exit_cont 1710 1711 /* Synthesize an ISI (or ISegI) for the guest */ 1712 mr r11, r3 17131: li r0, BOOK3S_INTERRUPT_INST_STORAGE 17147: mtspr SPRN_SRR0, r10 1715 mtspr SPRN_SRR1, r11 1716 mr r10, r0 1717 bl kvmppc_msr_interrupt 1718 b fast_interrupt_c_return 1719 17203: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1721 ld r5, KVM_VRMA_SLB_V(r6) 1722 b 4b 1723 1724/* 1725 * Try to handle an hcall in real mode. 1726 * Returns to the guest if we handle it, or continues on up to 1727 * the kernel if we can't (i.e. if we don't have a handler for 1728 * it, or if the handler returns H_TOO_HARD). 1729 * 1730 * r5 - r8 contain hcall args, 1731 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca 1732 */ 1733hcall_try_real_mode: 1734 ld r3,VCPU_GPR(R3)(r9) 1735 andi. r0,r11,MSR_PR 1736 /* sc 1 from userspace - reflect to guest syscall */ 1737 bne sc_1_fast_return 1738 clrrdi r3,r3,2 1739 cmpldi r3,hcall_real_table_end - hcall_real_table 1740 bge guest_exit_cont 1741 /* See if this hcall is enabled for in-kernel handling */ 1742 ld r4, VCPU_KVM(r9) 1743 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ 1744 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ 1745 add r4, r4, r0 1746 ld r0, KVM_ENABLED_HCALLS(r4) 1747 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ 1748 srd r0, r0, r4 1749 andi. r0, r0, 1 1750 beq guest_exit_cont 1751 /* Get pointer to handler, if any, and call it */ 1752 LOAD_REG_ADDR(r4, hcall_real_table) 1753 lwax r3,r3,r4 1754 cmpwi r3,0 1755 beq guest_exit_cont 1756 add r12,r3,r4 1757 mtctr r12 1758 mr r3,r9 /* get vcpu pointer */ 1759 ld r4,VCPU_GPR(R4)(r9) 1760 bctrl 1761 cmpdi r3,H_TOO_HARD 1762 beq hcall_real_fallback 1763 ld r4,HSTATE_KVM_VCPU(r13) 1764 std r3,VCPU_GPR(R3)(r4) 1765 ld r10,VCPU_PC(r4) 1766 ld r11,VCPU_MSR(r4) 1767 b fast_guest_return 1768 1769sc_1_fast_return: 1770 mtspr SPRN_SRR0,r10 1771 mtspr SPRN_SRR1,r11 1772 li r10, BOOK3S_INTERRUPT_SYSCALL 1773 bl kvmppc_msr_interrupt 1774 mr r4,r9 1775 b fast_guest_return 1776 1777 /* We've attempted a real mode hcall, but it's punted it back 1778 * to userspace. We need to restore some clobbered volatiles 1779 * before resuming the pass-it-to-qemu path */ 1780hcall_real_fallback: 1781 li r12,BOOK3S_INTERRUPT_SYSCALL 1782 ld r9, HSTATE_KVM_VCPU(r13) 1783 1784 b guest_exit_cont 1785 1786 .globl hcall_real_table 1787hcall_real_table: 1788 .long 0 /* 0 - unused */ 1789 .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1790 .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1791 .long DOTSYM(kvmppc_h_read) - hcall_real_table 1792 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table 1793 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table 1794 .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1795 .long 0 /* 0x1c */ 1796 .long 0 /* 0x20 */ 1797 .long 0 /* 0x24 - H_SET_SPRG0 */ 1798 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1799 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table 1800 .long 0 /* 0x30 */ 1801 .long 0 /* 0x34 */ 1802 .long 0 /* 0x38 */ 1803 .long 0 /* 0x3c */ 1804 .long 0 /* 0x40 */ 1805 .long 0 /* 0x44 */ 1806 .long 0 /* 0x48 */ 1807 .long 0 /* 0x4c */ 1808 .long 0 /* 0x50 */ 1809 .long 0 /* 0x54 */ 1810 .long 0 /* 0x58 */ 1811 .long 0 /* 0x5c */ 1812 .long 0 /* 0x60 */ 1813#ifdef CONFIG_KVM_XICS 1814 .long DOTSYM(xics_rm_h_eoi) - hcall_real_table 1815 .long DOTSYM(xics_rm_h_cppr) - hcall_real_table 1816 .long DOTSYM(xics_rm_h_ipi) - hcall_real_table 1817 .long 0 /* 0x70 - H_IPOLL */ 1818 .long DOTSYM(xics_rm_h_xirr) - hcall_real_table 1819#else 1820 .long 0 /* 0x64 - H_EOI */ 1821 .long 0 /* 0x68 - H_CPPR */ 1822 .long 0 /* 0x6c - H_IPI */ 1823 .long 0 /* 0x70 - H_IPOLL */ 1824 .long 0 /* 0x74 - H_XIRR */ 1825#endif 1826 .long 0 /* 0x78 */ 1827 .long 0 /* 0x7c */ 1828 .long 0 /* 0x80 */ 1829 .long 0 /* 0x84 */ 1830 .long 0 /* 0x88 */ 1831 .long 0 /* 0x8c */ 1832 .long 0 /* 0x90 */ 1833 .long 0 /* 0x94 */ 1834 .long 0 /* 0x98 */ 1835 .long 0 /* 0x9c */ 1836 .long 0 /* 0xa0 */ 1837 .long 0 /* 0xa4 */ 1838 .long 0 /* 0xa8 */ 1839 .long 0 /* 0xac */ 1840 .long 0 /* 0xb0 */ 1841 .long 0 /* 0xb4 */ 1842 .long 0 /* 0xb8 */ 1843 .long 0 /* 0xbc */ 1844 .long 0 /* 0xc0 */ 1845 .long 0 /* 0xc4 */ 1846 .long 0 /* 0xc8 */ 1847 .long 0 /* 0xcc */ 1848 .long 0 /* 0xd0 */ 1849 .long 0 /* 0xd4 */ 1850 .long 0 /* 0xd8 */ 1851 .long 0 /* 0xdc */ 1852 .long DOTSYM(kvmppc_h_cede) - hcall_real_table 1853 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table 1854 .long 0 /* 0xe8 */ 1855 .long 0 /* 0xec */ 1856 .long 0 /* 0xf0 */ 1857 .long 0 /* 0xf4 */ 1858 .long 0 /* 0xf8 */ 1859 .long 0 /* 0xfc */ 1860 .long 0 /* 0x100 */ 1861 .long 0 /* 0x104 */ 1862 .long 0 /* 0x108 */ 1863 .long 0 /* 0x10c */ 1864 .long 0 /* 0x110 */ 1865 .long 0 /* 0x114 */ 1866 .long 0 /* 0x118 */ 1867 .long 0 /* 0x11c */ 1868 .long 0 /* 0x120 */ 1869 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 1870 .long 0 /* 0x128 */ 1871 .long 0 /* 0x12c */ 1872 .long 0 /* 0x130 */ 1873 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 1874 .long 0 /* 0x138 */ 1875 .long 0 /* 0x13c */ 1876 .long 0 /* 0x140 */ 1877 .long 0 /* 0x144 */ 1878 .long 0 /* 0x148 */ 1879 .long 0 /* 0x14c */ 1880 .long 0 /* 0x150 */ 1881 .long 0 /* 0x154 */ 1882 .long 0 /* 0x158 */ 1883 .long 0 /* 0x15c */ 1884 .long 0 /* 0x160 */ 1885 .long 0 /* 0x164 */ 1886 .long 0 /* 0x168 */ 1887 .long 0 /* 0x16c */ 1888 .long 0 /* 0x170 */ 1889 .long 0 /* 0x174 */ 1890 .long 0 /* 0x178 */ 1891 .long 0 /* 0x17c */ 1892 .long 0 /* 0x180 */ 1893 .long 0 /* 0x184 */ 1894 .long 0 /* 0x188 */ 1895 .long 0 /* 0x18c */ 1896 .long 0 /* 0x190 */ 1897 .long 0 /* 0x194 */ 1898 .long 0 /* 0x198 */ 1899 .long 0 /* 0x19c */ 1900 .long 0 /* 0x1a0 */ 1901 .long 0 /* 0x1a4 */ 1902 .long 0 /* 0x1a8 */ 1903 .long 0 /* 0x1ac */ 1904 .long 0 /* 0x1b0 */ 1905 .long 0 /* 0x1b4 */ 1906 .long 0 /* 0x1b8 */ 1907 .long 0 /* 0x1bc */ 1908 .long 0 /* 0x1c0 */ 1909 .long 0 /* 0x1c4 */ 1910 .long 0 /* 0x1c8 */ 1911 .long 0 /* 0x1cc */ 1912 .long 0 /* 0x1d0 */ 1913 .long 0 /* 0x1d4 */ 1914 .long 0 /* 0x1d8 */ 1915 .long 0 /* 0x1dc */ 1916 .long 0 /* 0x1e0 */ 1917 .long 0 /* 0x1e4 */ 1918 .long 0 /* 0x1e8 */ 1919 .long 0 /* 0x1ec */ 1920 .long 0 /* 0x1f0 */ 1921 .long 0 /* 0x1f4 */ 1922 .long 0 /* 0x1f8 */ 1923 .long 0 /* 0x1fc */ 1924 .long 0 /* 0x200 */ 1925 .long 0 /* 0x204 */ 1926 .long 0 /* 0x208 */ 1927 .long 0 /* 0x20c */ 1928 .long 0 /* 0x210 */ 1929 .long 0 /* 0x214 */ 1930 .long 0 /* 0x218 */ 1931 .long 0 /* 0x21c */ 1932 .long 0 /* 0x220 */ 1933 .long 0 /* 0x224 */ 1934 .long 0 /* 0x228 */ 1935 .long 0 /* 0x22c */ 1936 .long 0 /* 0x230 */ 1937 .long 0 /* 0x234 */ 1938 .long 0 /* 0x238 */ 1939 .long 0 /* 0x23c */ 1940 .long 0 /* 0x240 */ 1941 .long 0 /* 0x244 */ 1942 .long 0 /* 0x248 */ 1943 .long 0 /* 0x24c */ 1944 .long 0 /* 0x250 */ 1945 .long 0 /* 0x254 */ 1946 .long 0 /* 0x258 */ 1947 .long 0 /* 0x25c */ 1948 .long 0 /* 0x260 */ 1949 .long 0 /* 0x264 */ 1950 .long 0 /* 0x268 */ 1951 .long 0 /* 0x26c */ 1952 .long 0 /* 0x270 */ 1953 .long 0 /* 0x274 */ 1954 .long 0 /* 0x278 */ 1955 .long 0 /* 0x27c */ 1956 .long 0 /* 0x280 */ 1957 .long 0 /* 0x284 */ 1958 .long 0 /* 0x288 */ 1959 .long 0 /* 0x28c */ 1960 .long 0 /* 0x290 */ 1961 .long 0 /* 0x294 */ 1962 .long 0 /* 0x298 */ 1963 .long 0 /* 0x29c */ 1964 .long 0 /* 0x2a0 */ 1965 .long 0 /* 0x2a4 */ 1966 .long 0 /* 0x2a8 */ 1967 .long 0 /* 0x2ac */ 1968 .long 0 /* 0x2b0 */ 1969 .long 0 /* 0x2b4 */ 1970 .long 0 /* 0x2b8 */ 1971 .long 0 /* 0x2bc */ 1972 .long 0 /* 0x2c0 */ 1973 .long 0 /* 0x2c4 */ 1974 .long 0 /* 0x2c8 */ 1975 .long 0 /* 0x2cc */ 1976 .long 0 /* 0x2d0 */ 1977 .long 0 /* 0x2d4 */ 1978 .long 0 /* 0x2d8 */ 1979 .long 0 /* 0x2dc */ 1980 .long 0 /* 0x2e0 */ 1981 .long 0 /* 0x2e4 */ 1982 .long 0 /* 0x2e8 */ 1983 .long 0 /* 0x2ec */ 1984 .long 0 /* 0x2f0 */ 1985 .long 0 /* 0x2f4 */ 1986 .long 0 /* 0x2f8 */ 1987#ifdef CONFIG_KVM_XICS 1988 .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table 1989#else 1990 .long 0 /* 0x2fc - H_XIRR_X*/ 1991#endif 1992 .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table 1993 .globl hcall_real_table_end 1994hcall_real_table_end: 1995 1996_GLOBAL_TOC(kvmppc_h_set_xdabr) 1997EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) 1998 andi. r0, r5, DABRX_USER | DABRX_KERNEL 1999 beq 6f 2000 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 2001 andc. r0, r5, r0 2002 beq 3f 20036: li r3, H_PARAMETER 2004 blr 2005 2006_GLOBAL_TOC(kvmppc_h_set_dabr) 2007EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) 2008 li r5, DABRX_USER | DABRX_KERNEL 20093: 2010BEGIN_FTR_SECTION 2011 b 2f 2012END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2013 std r4,VCPU_DABR(r3) 2014 stw r5, VCPU_DABRX(r3) 2015 mtspr SPRN_DABRX, r5 2016 /* Work around P7 bug where DABR can get corrupted on mtspr */ 20171: mtspr SPRN_DABR,r4 2018 mfspr r5, SPRN_DABR 2019 cmpd r4, r5 2020 bne 1b 2021 isync 2022 li r3,0 2023 blr 2024 20252: 2026 LOAD_REG_ADDR(r11, dawr_force_enable) 2027 lbz r11, 0(r11) 2028 cmpdi r11, 0 2029 bne 3f 2030 li r3, H_HARDWARE 2031 blr 20323: 2033 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2034 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 2035 rlwimi r5, r4, 2, DAWRX_WT 2036 clrrdi r4, r4, 3 2037 std r4, VCPU_DAWR0(r3) 2038 std r5, VCPU_DAWRX0(r3) 2039 /* 2040 * If came in through the real mode hcall handler then it is necessary 2041 * to write the registers since the return path won't. Otherwise it is 2042 * sufficient to store then in the vcpu struct as they will be loaded 2043 * next time the vcpu is run. 2044 */ 2045 mfmsr r6 2046 andi. r6, r6, MSR_DR /* in real mode? */ 2047 bne 4f 2048 mtspr SPRN_DAWR0, r4 2049 mtspr SPRN_DAWRX0, r5 20504: li r3, 0 2051 blr 2052 2053_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ 2054 ori r11,r11,MSR_EE 2055 std r11,VCPU_MSR(r3) 2056 li r0,1 2057 stb r0,VCPU_CEDED(r3) 2058 sync /* order setting ceded vs. testing prodded */ 2059 lbz r5,VCPU_PRODDED(r3) 2060 cmpwi r5,0 2061 bne kvm_cede_prodded 2062 li r12,0 /* set trap to 0 to say hcall is handled */ 2063 stw r12,VCPU_TRAP(r3) 2064 li r0,H_SUCCESS 2065 std r0,VCPU_GPR(R3)(r3) 2066 2067 /* 2068 * Set our bit in the bitmask of napping threads unless all the 2069 * other threads are already napping, in which case we send this 2070 * up to the host. 2071 */ 2072 ld r5,HSTATE_KVM_VCORE(r13) 2073 lbz r6,HSTATE_PTID(r13) 2074 lwz r8,VCORE_ENTRY_EXIT(r5) 2075 clrldi r8,r8,56 2076 li r0,1 2077 sld r0,r0,r6 2078 addi r6,r5,VCORE_NAPPING_THREADS 207931: lwarx r4,0,r6 2080 or r4,r4,r0 2081 cmpw r4,r8 2082 beq kvm_cede_exit 2083 stwcx. r4,0,r6 2084 bne 31b 2085 /* order napping_threads update vs testing entry_exit_map */ 2086 isync 2087 li r0,NAPPING_CEDE 2088 stb r0,HSTATE_NAPPING(r13) 2089 lwz r7,VCORE_ENTRY_EXIT(r5) 2090 cmpwi r7,0x100 2091 bge 33f /* another thread already exiting */ 2092 2093/* 2094 * Although not specifically required by the architecture, POWER7 2095 * preserves the following registers in nap mode, even if an SMT mode 2096 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 2097 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 2098 */ 2099 /* Save non-volatile GPRs */ 2100 std r14, VCPU_GPR(R14)(r3) 2101 std r15, VCPU_GPR(R15)(r3) 2102 std r16, VCPU_GPR(R16)(r3) 2103 std r17, VCPU_GPR(R17)(r3) 2104 std r18, VCPU_GPR(R18)(r3) 2105 std r19, VCPU_GPR(R19)(r3) 2106 std r20, VCPU_GPR(R20)(r3) 2107 std r21, VCPU_GPR(R21)(r3) 2108 std r22, VCPU_GPR(R22)(r3) 2109 std r23, VCPU_GPR(R23)(r3) 2110 std r24, VCPU_GPR(R24)(r3) 2111 std r25, VCPU_GPR(R25)(r3) 2112 std r26, VCPU_GPR(R26)(r3) 2113 std r27, VCPU_GPR(R27)(r3) 2114 std r28, VCPU_GPR(R28)(r3) 2115 std r29, VCPU_GPR(R29)(r3) 2116 std r30, VCPU_GPR(R30)(r3) 2117 std r31, VCPU_GPR(R31)(r3) 2118 2119 /* save FP state */ 2120 bl kvmppc_save_fp 2121 2122#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2123BEGIN_FTR_SECTION 2124 b 91f 2125END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2126 /* 2127 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2128 */ 2129 ld r3, HSTATE_KVM_VCPU(r13) 2130 ld r4, VCPU_MSR(r3) 2131 li r5, 0 /* don't preserve non-vol regs */ 2132 bl kvmppc_save_tm_hv 2133 nop 213491: 2135#endif 2136 2137 /* 2138 * Set DEC to the smaller of DEC and HDEC, so that we wake 2139 * no later than the end of our timeslice (HDEC interrupts 2140 * don't wake us from nap). 2141 */ 2142 mfspr r3, SPRN_DEC 2143 mfspr r4, SPRN_HDEC 2144 mftb r5 2145 extsw r3, r3 2146 extsw r4, r4 2147 cmpd r3, r4 2148 ble 67f 2149 mtspr SPRN_DEC, r4 215067: 2151 /* save expiry time of guest decrementer */ 2152 add r3, r3, r5 2153 ld r4, HSTATE_KVM_VCPU(r13) 2154 std r3, VCPU_DEC_EXPIRES(r4) 2155 2156#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2157 ld r4, HSTATE_KVM_VCPU(r13) 2158 addi r3, r4, VCPU_TB_CEDE 2159 bl kvmhv_accumulate_time 2160#endif 2161 2162 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ 2163 2164 /* Go back to host stack */ 2165 ld r1, HSTATE_HOST_R1(r13) 2166 2167 /* 2168 * Take a nap until a decrementer or external or doobell interrupt 2169 * occurs, with PECE1 and PECE0 set in LPCR. 2170 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. 2171 * Also clear the runlatch bit before napping. 2172 */ 2173kvm_do_nap: 2174 li r0,0 2175 mtspr SPRN_CTRLT, r0 2176 2177 li r0,1 2178 stb r0,HSTATE_HWTHREAD_REQ(r13) 2179 mfspr r5,SPRN_LPCR 2180 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2181BEGIN_FTR_SECTION 2182 ori r5, r5, LPCR_PECEDH 2183 rlwimi r5, r3, 0, LPCR_PECEDP 2184END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2185 2186kvm_nap_sequence: /* desired LPCR value in r5 */ 2187 li r3, PNV_THREAD_NAP 2188 mtspr SPRN_LPCR,r5 2189 isync 2190 2191 bl isa206_idle_insn_mayloss 2192 2193 li r0,1 2194 mtspr SPRN_CTRLT, r0 2195 2196 mtspr SPRN_SRR1, r3 2197 2198 li r0, 0 2199 stb r0, PACA_FTRACE_ENABLED(r13) 2200 2201 li r0, KVM_HWTHREAD_IN_KVM 2202 stb r0, HSTATE_HWTHREAD_STATE(r13) 2203 2204 lbz r0, HSTATE_NAPPING(r13) 2205 cmpwi r0, NAPPING_CEDE 2206 beq kvm_end_cede 2207 cmpwi r0, NAPPING_NOVCPU 2208 beq kvm_novcpu_wakeup 2209 cmpwi r0, NAPPING_UNSPLIT 2210 beq kvm_unsplit_wakeup 2211 twi 31,0,0 /* Nap state must not be zero */ 2212 221333: mr r4, r3 2214 li r3, 0 2215 li r12, 0 2216 b 34f 2217 2218kvm_end_cede: 2219 /* Woken by external or decrementer interrupt */ 2220 2221 /* get vcpu pointer */ 2222 ld r4, HSTATE_KVM_VCPU(r13) 2223 2224#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2225 addi r3, r4, VCPU_TB_RMINTR 2226 bl kvmhv_accumulate_time 2227#endif 2228 2229#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2230BEGIN_FTR_SECTION 2231 b 91f 2232END_FTR_SECTION_IFCLR(CPU_FTR_TM) 2233 /* 2234 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) 2235 */ 2236 mr r3, r4 2237 ld r4, VCPU_MSR(r3) 2238 li r5, 0 /* don't preserve non-vol regs */ 2239 bl kvmppc_restore_tm_hv 2240 nop 2241 ld r4, HSTATE_KVM_VCPU(r13) 224291: 2243#endif 2244 2245 /* load up FP state */ 2246 bl kvmppc_load_fp 2247 2248 /* Restore guest decrementer */ 2249 ld r3, VCPU_DEC_EXPIRES(r4) 2250 mftb r7 2251 subf r3, r7, r3 2252 mtspr SPRN_DEC, r3 2253 2254 /* Load NV GPRS */ 2255 ld r14, VCPU_GPR(R14)(r4) 2256 ld r15, VCPU_GPR(R15)(r4) 2257 ld r16, VCPU_GPR(R16)(r4) 2258 ld r17, VCPU_GPR(R17)(r4) 2259 ld r18, VCPU_GPR(R18)(r4) 2260 ld r19, VCPU_GPR(R19)(r4) 2261 ld r20, VCPU_GPR(R20)(r4) 2262 ld r21, VCPU_GPR(R21)(r4) 2263 ld r22, VCPU_GPR(R22)(r4) 2264 ld r23, VCPU_GPR(R23)(r4) 2265 ld r24, VCPU_GPR(R24)(r4) 2266 ld r25, VCPU_GPR(R25)(r4) 2267 ld r26, VCPU_GPR(R26)(r4) 2268 ld r27, VCPU_GPR(R27)(r4) 2269 ld r28, VCPU_GPR(R28)(r4) 2270 ld r29, VCPU_GPR(R29)(r4) 2271 ld r30, VCPU_GPR(R30)(r4) 2272 ld r31, VCPU_GPR(R31)(r4) 2273 2274 /* Check the wake reason in SRR1 to see why we got here */ 2275 bl kvmppc_check_wake_reason 2276 2277 /* 2278 * Restore volatile registers since we could have called a 2279 * C routine in kvmppc_check_wake_reason 2280 * r4 = VCPU 2281 * r3 tells us whether we need to return to host or not 2282 * WARNING: it gets checked further down: 2283 * should not modify r3 until this check is done. 2284 */ 2285 ld r4, HSTATE_KVM_VCPU(r13) 2286 2287 /* clear our bit in vcore->napping_threads */ 228834: ld r5,HSTATE_KVM_VCORE(r13) 2289 lbz r7,HSTATE_PTID(r13) 2290 li r0,1 2291 sld r0,r0,r7 2292 addi r6,r5,VCORE_NAPPING_THREADS 229332: lwarx r7,0,r6 2294 andc r7,r7,r0 2295 stwcx. r7,0,r6 2296 bne 32b 2297 li r0,0 2298 stb r0,HSTATE_NAPPING(r13) 2299 2300 /* See if the wake reason saved in r3 means we need to exit */ 2301 stw r12, VCPU_TRAP(r4) 2302 mr r9, r4 2303 cmpdi r3, 0 2304 bgt guest_exit_cont 2305 b maybe_reenter_guest 2306 2307 /* cede when already previously prodded case */ 2308kvm_cede_prodded: 2309 li r0,0 2310 stb r0,VCPU_PRODDED(r3) 2311 sync /* order testing prodded vs. clearing ceded */ 2312 stb r0,VCPU_CEDED(r3) 2313 li r3,H_SUCCESS 2314 blr 2315 2316 /* we've ceded but we want to give control to the host */ 2317kvm_cede_exit: 2318 ld r9, HSTATE_KVM_VCPU(r13) 2319 b guest_exit_cont 2320 2321 /* Try to do machine check recovery in real mode */ 2322machine_check_realmode: 2323 mr r3, r9 /* get vcpu pointer */ 2324 bl kvmppc_realmode_machine_check 2325 nop 2326 /* all machine checks go to virtual mode for further handling */ 2327 ld r9, HSTATE_KVM_VCPU(r13) 2328 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2329 b guest_exit_cont 2330 2331/* 2332 * Call C code to handle a HMI in real mode. 2333 * Only the primary thread does the call, secondary threads are handled 2334 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. 2335 * r9 points to the vcpu on entry 2336 */ 2337hmi_realmode: 2338 lbz r0, HSTATE_PTID(r13) 2339 cmpwi r0, 0 2340 bne guest_exit_cont 2341 bl kvmppc_realmode_hmi_handler 2342 ld r9, HSTATE_KVM_VCPU(r13) 2343 li r12, BOOK3S_INTERRUPT_HMI 2344 b guest_exit_cont 2345 2346/* 2347 * Check the reason we woke from nap, and take appropriate action. 2348 * Returns (in r3): 2349 * 0 if nothing needs to be done 2350 * 1 if something happened that needs to be handled by the host 2351 * -1 if there was a guest wakeup (IPI or msgsnd) 2352 * -2 if we handled a PCI passthrough interrupt (returned by 2353 * kvmppc_read_intr only) 2354 * 2355 * Also sets r12 to the interrupt vector for any interrupt that needs 2356 * to be handled now by the host (0x500 for external interrupt), or zero. 2357 * Modifies all volatile registers (since it may call a C function). 2358 * This routine calls kvmppc_read_intr, a C function, if an external 2359 * interrupt is pending. 2360 */ 2361kvmppc_check_wake_reason: 2362 mfspr r6, SPRN_SRR1 2363BEGIN_FTR_SECTION 2364 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2365FTR_SECTION_ELSE 2366 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2367ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2368 cmpwi r6, 8 /* was it an external interrupt? */ 2369 beq 7f /* if so, see what it was */ 2370 li r3, 0 2371 li r12, 0 2372 cmpwi r6, 6 /* was it the decrementer? */ 2373 beq 0f 2374BEGIN_FTR_SECTION 2375 cmpwi r6, 5 /* privileged doorbell? */ 2376 beq 0f 2377 cmpwi r6, 3 /* hypervisor doorbell? */ 2378 beq 3f 2379END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2380 cmpwi r6, 0xa /* Hypervisor maintenance ? */ 2381 beq 4f 2382 li r3, 1 /* anything else, return 1 */ 23830: blr 2384 2385 /* hypervisor doorbell */ 23863: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2387 2388 /* 2389 * Clear the doorbell as we will invoke the handler 2390 * explicitly in the guest exit path. 2391 */ 2392 lis r6, (PPC_DBELL_SERVER << (63-36))@h 2393 PPC_MSGCLR(6) 2394 /* see if it's a host IPI */ 2395 li r3, 1 2396 lbz r0, HSTATE_HOST_IPI(r13) 2397 cmpwi r0, 0 2398 bnelr 2399 /* if not, return -1 */ 2400 li r3, -1 2401 blr 2402 2403 /* Woken up due to Hypervisor maintenance interrupt */ 24044: li r12, BOOK3S_INTERRUPT_HMI 2405 li r3, 1 2406 blr 2407 2408 /* external interrupt - create a stack frame so we can call C */ 24097: mflr r0 2410 std r0, PPC_LR_STKOFF(r1) 2411 stdu r1, -PPC_MIN_STKFRM(r1) 2412 bl kvmppc_read_intr 2413 nop 2414 li r12, BOOK3S_INTERRUPT_EXTERNAL 2415 cmpdi r3, 1 2416 ble 1f 2417 2418 /* 2419 * Return code of 2 means PCI passthrough interrupt, but 2420 * we need to return back to host to complete handling the 2421 * interrupt. Trap reason is expected in r12 by guest 2422 * exit code. 2423 */ 2424 li r12, BOOK3S_INTERRUPT_HV_RM_HARD 24251: 2426 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 2427 addi r1, r1, PPC_MIN_STKFRM 2428 mtlr r0 2429 blr 2430 2431/* 2432 * Save away FP, VMX and VSX registers. 2433 * r3 = vcpu pointer 2434 * N.B. r30 and r31 are volatile across this function, 2435 * thus it is not callable from C. 2436 */ 2437kvmppc_save_fp: 2438 mflr r30 2439 mr r31,r3 2440 mfmsr r5 2441 ori r8,r5,MSR_FP 2442#ifdef CONFIG_ALTIVEC 2443BEGIN_FTR_SECTION 2444 oris r8,r8,MSR_VEC@h 2445END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2446#endif 2447#ifdef CONFIG_VSX 2448BEGIN_FTR_SECTION 2449 oris r8,r8,MSR_VSX@h 2450END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2451#endif 2452 mtmsrd r8 2453 addi r3,r3,VCPU_FPRS 2454 bl store_fp_state 2455#ifdef CONFIG_ALTIVEC 2456BEGIN_FTR_SECTION 2457 addi r3,r31,VCPU_VRS 2458 bl store_vr_state 2459END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2460#endif 2461 mfspr r6,SPRN_VRSAVE 2462 stw r6,VCPU_VRSAVE(r31) 2463 mtlr r30 2464 blr 2465 2466/* 2467 * Load up FP, VMX and VSX registers 2468 * r4 = vcpu pointer 2469 * N.B. r30 and r31 are volatile across this function, 2470 * thus it is not callable from C. 2471 */ 2472kvmppc_load_fp: 2473 mflr r30 2474 mr r31,r4 2475 mfmsr r9 2476 ori r8,r9,MSR_FP 2477#ifdef CONFIG_ALTIVEC 2478BEGIN_FTR_SECTION 2479 oris r8,r8,MSR_VEC@h 2480END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2481#endif 2482#ifdef CONFIG_VSX 2483BEGIN_FTR_SECTION 2484 oris r8,r8,MSR_VSX@h 2485END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2486#endif 2487 mtmsrd r8 2488 addi r3,r4,VCPU_FPRS 2489 bl load_fp_state 2490#ifdef CONFIG_ALTIVEC 2491BEGIN_FTR_SECTION 2492 addi r3,r31,VCPU_VRS 2493 bl load_vr_state 2494END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2495#endif 2496 lwz r7,VCPU_VRSAVE(r31) 2497 mtspr SPRN_VRSAVE,r7 2498 mtlr r30 2499 mr r4,r31 2500 blr 2501 2502#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2503/* 2504 * Save transactional state and TM-related registers. 2505 * Called with r3 pointing to the vcpu struct and r4 containing 2506 * the guest MSR value. 2507 * r5 is non-zero iff non-volatile register state needs to be maintained. 2508 * If r5 == 0, this can modify all checkpointed registers, but 2509 * restores r1 and r2 before exit. 2510 */ 2511_GLOBAL_TOC(kvmppc_save_tm_hv) 2512EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) 2513 /* See if we need to handle fake suspend mode */ 2514BEGIN_FTR_SECTION 2515 b __kvmppc_save_tm 2516END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2517 2518 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ 2519 cmpwi r0, 0 2520 beq __kvmppc_save_tm 2521 2522 /* The following code handles the fake_suspend = 1 case */ 2523 mflr r0 2524 std r0, PPC_LR_STKOFF(r1) 2525 stdu r1, -TM_FRAME_SIZE(r1) 2526 2527 /* Turn on TM. */ 2528 mfmsr r8 2529 li r0, 1 2530 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 2531 mtmsrd r8 2532 2533 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ 2534 beq 4f 2535BEGIN_FTR_SECTION 2536 bl pnv_power9_force_smt4_catch 2537END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2538 nop 2539 2540 /* 2541 * It's possible that treclaim. may modify registers, if we have lost 2542 * track of fake-suspend state in the guest due to it using rfscv. 2543 * Save and restore registers in case this occurs. 2544 */ 2545 mfspr r3, SPRN_DSCR 2546 mfspr r4, SPRN_XER 2547 mfspr r5, SPRN_AMR 2548 /* SPRN_TAR would need to be saved here if the kernel ever used it */ 2549 mfcr r12 2550 SAVE_NVGPRS(r1) 2551 SAVE_GPR(2, r1) 2552 SAVE_GPR(3, r1) 2553 SAVE_GPR(4, r1) 2554 SAVE_GPR(5, r1) 2555 stw r12, 8(r1) 2556 std r1, HSTATE_HOST_R1(r13) 2557 2558 /* We have to treclaim here because that's the only way to do S->N */ 2559 li r3, TM_CAUSE_KVM_RESCHED 2560 TRECLAIM(R3) 2561 2562 GET_PACA(r13) 2563 ld r1, HSTATE_HOST_R1(r13) 2564 REST_GPR(2, r1) 2565 REST_GPR(3, r1) 2566 REST_GPR(4, r1) 2567 REST_GPR(5, r1) 2568 lwz r12, 8(r1) 2569 REST_NVGPRS(r1) 2570 mtspr SPRN_DSCR, r3 2571 mtspr SPRN_XER, r4 2572 mtspr SPRN_AMR, r5 2573 mtcr r12 2574 HMT_MEDIUM 2575 2576 /* 2577 * We were in fake suspend, so we are not going to save the 2578 * register state as the guest checkpointed state (since 2579 * we already have it), therefore we can now use any volatile GPR. 2580 * In fact treclaim in fake suspend state doesn't modify 2581 * any registers. 2582 */ 2583 2584BEGIN_FTR_SECTION 2585 bl pnv_power9_force_smt4_release 2586END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) 2587 nop 2588 25894: 2590 mfspr r3, SPRN_PSSCR 2591 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ 2592 li r0, PSSCR_FAKE_SUSPEND 2593 andc r3, r3, r0 2594 mtspr SPRN_PSSCR, r3 2595 2596 /* Don't save TEXASR, use value from last exit in real suspend state */ 2597 ld r9, HSTATE_KVM_VCPU(r13) 2598 mfspr r5, SPRN_TFHAR 2599 mfspr r6, SPRN_TFIAR 2600 std r5, VCPU_TFHAR(r9) 2601 std r6, VCPU_TFIAR(r9) 2602 2603 addi r1, r1, TM_FRAME_SIZE 2604 ld r0, PPC_LR_STKOFF(r1) 2605 mtlr r0 2606 blr 2607 2608/* 2609 * Restore transactional state and TM-related registers. 2610 * Called with r3 pointing to the vcpu struct 2611 * and r4 containing the guest MSR value. 2612 * r5 is non-zero iff non-volatile register state needs to be maintained. 2613 * This potentially modifies all checkpointed registers. 2614 * It restores r1 and r2 from the PACA. 2615 */ 2616_GLOBAL_TOC(kvmppc_restore_tm_hv) 2617EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) 2618 /* 2619 * If we are doing TM emulation for the guest on a POWER9 DD2, 2620 * then we don't actually do a trechkpt -- we either set up 2621 * fake-suspend mode, or emulate a TM rollback. 2622 */ 2623BEGIN_FTR_SECTION 2624 b __kvmppc_restore_tm 2625END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) 2626 mflr r0 2627 std r0, PPC_LR_STKOFF(r1) 2628 2629 li r0, 0 2630 stb r0, HSTATE_FAKE_SUSPEND(r13) 2631 2632 /* Turn on TM so we can restore TM SPRs */ 2633 mfmsr r5 2634 li r0, 1 2635 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG 2636 mtmsrd r5 2637 2638 /* 2639 * The user may change these outside of a transaction, so they must 2640 * always be context switched. 2641 */ 2642 ld r5, VCPU_TFHAR(r3) 2643 ld r6, VCPU_TFIAR(r3) 2644 ld r7, VCPU_TEXASR(r3) 2645 mtspr SPRN_TFHAR, r5 2646 mtspr SPRN_TFIAR, r6 2647 mtspr SPRN_TEXASR, r7 2648 2649 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 2650 beqlr /* TM not active in guest */ 2651 2652 /* Make sure the failure summary is set */ 2653 oris r7, r7, (TEXASR_FS)@h 2654 mtspr SPRN_TEXASR, r7 2655 2656 cmpwi r5, 1 /* check for suspended state */ 2657 bgt 10f 2658 stb r5, HSTATE_FAKE_SUSPEND(r13) 2659 b 9f /* and return */ 266010: stdu r1, -PPC_MIN_STKFRM(r1) 2661 /* guest is in transactional state, so simulate rollback */ 2662 bl kvmhv_emulate_tm_rollback 2663 nop 2664 addi r1, r1, PPC_MIN_STKFRM 26659: ld r0, PPC_LR_STKOFF(r1) 2666 mtlr r0 2667 blr 2668#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 2669 2670/* 2671 * We come here if we get any exception or interrupt while we are 2672 * executing host real mode code while in guest MMU context. 2673 * r12 is (CR << 32) | vector 2674 * r13 points to our PACA 2675 * r12 is saved in HSTATE_SCRATCH0(r13) 2676 * r9 is saved in HSTATE_SCRATCH2(r13) 2677 * r13 is saved in HSPRG1 2678 * cfar is saved in HSTATE_CFAR(r13) 2679 * ppr is saved in HSTATE_PPR(r13) 2680 */ 2681kvmppc_bad_host_intr: 2682 /* 2683 * Switch to the emergency stack, but start half-way down in 2684 * case we were already on it. 2685 */ 2686 mr r9, r1 2687 std r1, PACAR1(r13) 2688 ld r1, PACAEMERGSP(r13) 2689 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE 2690 std r9, 0(r1) 2691 std r0, GPR0(r1) 2692 std r9, GPR1(r1) 2693 std r2, GPR2(r1) 2694 SAVE_GPRS(3, 8, r1) 2695 srdi r0, r12, 32 2696 clrldi r12, r12, 32 2697 std r0, _CCR(r1) 2698 std r12, _TRAP(r1) 2699 andi. r0, r12, 2 2700 beq 1f 2701 mfspr r3, SPRN_HSRR0 2702 mfspr r4, SPRN_HSRR1 2703 mfspr r5, SPRN_HDAR 2704 mfspr r6, SPRN_HDSISR 2705 b 2f 27061: mfspr r3, SPRN_SRR0 2707 mfspr r4, SPRN_SRR1 2708 mfspr r5, SPRN_DAR 2709 mfspr r6, SPRN_DSISR 27102: std r3, _NIP(r1) 2711 std r4, _MSR(r1) 2712 std r5, _DAR(r1) 2713 std r6, _DSISR(r1) 2714 ld r9, HSTATE_SCRATCH2(r13) 2715 ld r12, HSTATE_SCRATCH0(r13) 2716 GET_SCRATCH0(r0) 2717 SAVE_GPRS(9, 12, r1) 2718 std r0, GPR13(r1) 2719 SAVE_NVGPRS(r1) 2720 ld r5, HSTATE_CFAR(r13) 2721 std r5, ORIG_GPR3(r1) 2722 mflr r3 2723 mfctr r4 2724 mfxer r5 2725 lbz r6, PACAIRQSOFTMASK(r13) 2726 std r3, _LINK(r1) 2727 std r4, _CTR(r1) 2728 std r5, _XER(r1) 2729 std r6, SOFTE(r1) 2730 ld r2, PACATOC(r13) 2731 LOAD_REG_IMMEDIATE(3, 0x7265677368657265) 2732 std r3, STACK_FRAME_OVERHEAD-16(r1) 2733 2734 /* 2735 * XXX On POWER7 and POWER8, we just spin here since we don't 2736 * know what the other threads are doing (and we don't want to 2737 * coordinate with them) - but at least we now have register state 2738 * in memory that we might be able to look at from another CPU. 2739 */ 2740 b . 2741 2742/* 2743 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2744 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2745 * r11 has the guest MSR value (in/out) 2746 * r9 has a vcpu pointer (in) 2747 * r0 is used as a scratch register 2748 */ 2749kvmppc_msr_interrupt: 2750 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2751 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2752 ld r11, VCPU_INTR_MSR(r9) 2753 bne 1f 2754 /* ... if transactional, change to suspended */ 2755 li r0, 1 27561: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2757 blr 2758 2759/* 2760 * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) 2761 * 2762 * Load up guest PMU state. R3 points to the vcpu struct. 2763 */ 2764kvmhv_load_guest_pmu: 2765 mr r4, r3 2766 mflr r0 2767 li r3, 1 2768 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2769 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2770 isync 2771BEGIN_FTR_SECTION 2772 ld r3, VCPU_MMCR(r4) 2773 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2774 cmpwi r5, MMCR0_PMAO 2775 beql kvmppc_fix_pmao 2776END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2777 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 2778 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 2779 lwz r6, VCPU_PMC + 8(r4) 2780 lwz r7, VCPU_PMC + 12(r4) 2781 lwz r8, VCPU_PMC + 16(r4) 2782 lwz r9, VCPU_PMC + 20(r4) 2783 mtspr SPRN_PMC1, r3 2784 mtspr SPRN_PMC2, r5 2785 mtspr SPRN_PMC3, r6 2786 mtspr SPRN_PMC4, r7 2787 mtspr SPRN_PMC5, r8 2788 mtspr SPRN_PMC6, r9 2789 ld r3, VCPU_MMCR(r4) 2790 ld r5, VCPU_MMCR + 8(r4) 2791 ld r6, VCPU_MMCRA(r4) 2792 ld r7, VCPU_SIAR(r4) 2793 ld r8, VCPU_SDAR(r4) 2794 mtspr SPRN_MMCR1, r5 2795 mtspr SPRN_MMCRA, r6 2796 mtspr SPRN_SIAR, r7 2797 mtspr SPRN_SDAR, r8 2798BEGIN_FTR_SECTION 2799 ld r5, VCPU_MMCR + 16(r4) 2800 ld r6, VCPU_SIER(r4) 2801 mtspr SPRN_MMCR2, r5 2802 mtspr SPRN_SIER, r6 2803 lwz r7, VCPU_PMC + 24(r4) 2804 lwz r8, VCPU_PMC + 28(r4) 2805 ld r9, VCPU_MMCRS(r4) 2806 mtspr SPRN_SPMC1, r7 2807 mtspr SPRN_SPMC2, r8 2808 mtspr SPRN_MMCRS, r9 2809END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2810 mtspr SPRN_MMCR0, r3 2811 isync 2812 mtlr r0 2813 blr 2814 2815/* 2816 * void kvmhv_load_host_pmu(void) 2817 * 2818 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. 2819 */ 2820kvmhv_load_host_pmu: 2821 mflr r0 2822 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ 2823 cmpwi r4, 0 2824 beq 23f /* skip if not */ 2825BEGIN_FTR_SECTION 2826 ld r3, HSTATE_MMCR0(r13) 2827 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO 2828 cmpwi r4, MMCR0_PMAO 2829 beql kvmppc_fix_pmao 2830END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) 2831 lwz r3, HSTATE_PMC1(r13) 2832 lwz r4, HSTATE_PMC2(r13) 2833 lwz r5, HSTATE_PMC3(r13) 2834 lwz r6, HSTATE_PMC4(r13) 2835 lwz r8, HSTATE_PMC5(r13) 2836 lwz r9, HSTATE_PMC6(r13) 2837 mtspr SPRN_PMC1, r3 2838 mtspr SPRN_PMC2, r4 2839 mtspr SPRN_PMC3, r5 2840 mtspr SPRN_PMC4, r6 2841 mtspr SPRN_PMC5, r8 2842 mtspr SPRN_PMC6, r9 2843 ld r3, HSTATE_MMCR0(r13) 2844 ld r4, HSTATE_MMCR1(r13) 2845 ld r5, HSTATE_MMCRA(r13) 2846 ld r6, HSTATE_SIAR(r13) 2847 ld r7, HSTATE_SDAR(r13) 2848 mtspr SPRN_MMCR1, r4 2849 mtspr SPRN_MMCRA, r5 2850 mtspr SPRN_SIAR, r6 2851 mtspr SPRN_SDAR, r7 2852BEGIN_FTR_SECTION 2853 ld r8, HSTATE_MMCR2(r13) 2854 ld r9, HSTATE_SIER(r13) 2855 mtspr SPRN_MMCR2, r8 2856 mtspr SPRN_SIER, r9 2857END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2858 mtspr SPRN_MMCR0, r3 2859 isync 2860 mtlr r0 286123: blr 2862 2863/* 2864 * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) 2865 * 2866 * Save guest PMU state into the vcpu struct. 2867 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) 2868 */ 2869kvmhv_save_guest_pmu: 2870 mr r9, r3 2871 mr r8, r4 2872BEGIN_FTR_SECTION 2873 /* 2874 * POWER8 seems to have a hardware bug where setting 2875 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] 2876 * when some counters are already negative doesn't seem 2877 * to cause a performance monitor alert (and hence interrupt). 2878 * The effect of this is that when saving the PMU state, 2879 * if there is no PMU alert pending when we read MMCR0 2880 * before freezing the counters, but one becomes pending 2881 * before we read the counters, we lose it. 2882 * To work around this, we need a way to freeze the counters 2883 * before reading MMCR0. Normally, freezing the counters 2884 * is done by writing MMCR0 (to set MMCR0[FC]) which 2885 * unavoidably writes MMCR0[PMA0] as well. On POWER8, 2886 * we can also freeze the counters using MMCR2, by writing 2887 * 1s to all the counter freeze condition bits (there are 2888 * 9 bits each for 6 counters). 2889 */ 2890 li r3, -1 /* set all freeze bits */ 2891 clrrdi r3, r3, 10 2892 mfspr r10, SPRN_MMCR2 2893 mtspr SPRN_MMCR2, r3 2894 isync 2895END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2896 li r3, 1 2897 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 2898 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 2899 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 2900 mfspr r6, SPRN_MMCRA 2901 /* Clear MMCRA in order to disable SDAR updates */ 2902 li r7, 0 2903 mtspr SPRN_MMCRA, r7 2904 isync 2905 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ 2906 bne 21f 2907 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 2908 b 22f 290921: mfspr r5, SPRN_MMCR1 2910 mfspr r7, SPRN_SIAR 2911 mfspr r8, SPRN_SDAR 2912 std r4, VCPU_MMCR(r9) 2913 std r5, VCPU_MMCR + 8(r9) 2914 std r6, VCPU_MMCRA(r9) 2915BEGIN_FTR_SECTION 2916 std r10, VCPU_MMCR + 16(r9) 2917END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2918 std r7, VCPU_SIAR(r9) 2919 std r8, VCPU_SDAR(r9) 2920 mfspr r3, SPRN_PMC1 2921 mfspr r4, SPRN_PMC2 2922 mfspr r5, SPRN_PMC3 2923 mfspr r6, SPRN_PMC4 2924 mfspr r7, SPRN_PMC5 2925 mfspr r8, SPRN_PMC6 2926 stw r3, VCPU_PMC(r9) 2927 stw r4, VCPU_PMC + 4(r9) 2928 stw r5, VCPU_PMC + 8(r9) 2929 stw r6, VCPU_PMC + 12(r9) 2930 stw r7, VCPU_PMC + 16(r9) 2931 stw r8, VCPU_PMC + 20(r9) 2932BEGIN_FTR_SECTION 2933 mfspr r5, SPRN_SIER 2934 std r5, VCPU_SIER(r9) 2935 mfspr r6, SPRN_SPMC1 2936 mfspr r7, SPRN_SPMC2 2937 mfspr r8, SPRN_MMCRS 2938 stw r6, VCPU_PMC + 24(r9) 2939 stw r7, VCPU_PMC + 28(r9) 2940 std r8, VCPU_MMCRS(r9) 2941 lis r4, 0x8000 2942 mtspr SPRN_MMCRS, r4 2943END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 294422: blr 2945 2946/* 2947 * This works around a hardware bug on POWER8E processors, where 2948 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a 2949 * performance monitor interrupt. Instead, when we need to have 2950 * an interrupt pending, we have to arrange for a counter to overflow. 2951 */ 2952kvmppc_fix_pmao: 2953 li r3, 0 2954 mtspr SPRN_MMCR2, r3 2955 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h 2956 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN 2957 mtspr SPRN_MMCR0, r3 2958 lis r3, 0x7fff 2959 ori r3, r3, 0xffff 2960 mtspr SPRN_PMC6, r3 2961 isync 2962 blr 2963 2964#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING 2965/* 2966 * Start timing an activity 2967 * r3 = pointer to time accumulation struct, r4 = vcpu 2968 */ 2969kvmhv_start_timing: 2970 ld r5, HSTATE_KVM_VCORE(r13) 2971 ld r6, VCORE_TB_OFFSET_APPL(r5) 2972 mftb r5 2973 subf r5, r6, r5 /* subtract current timebase offset */ 2974 std r3, VCPU_CUR_ACTIVITY(r4) 2975 std r5, VCPU_ACTIVITY_START(r4) 2976 blr 2977 2978/* 2979 * Accumulate time to one activity and start another. 2980 * r3 = pointer to new time accumulation struct, r4 = vcpu 2981 */ 2982kvmhv_accumulate_time: 2983 ld r5, HSTATE_KVM_VCORE(r13) 2984 ld r8, VCORE_TB_OFFSET_APPL(r5) 2985 ld r5, VCPU_CUR_ACTIVITY(r4) 2986 ld r6, VCPU_ACTIVITY_START(r4) 2987 std r3, VCPU_CUR_ACTIVITY(r4) 2988 mftb r7 2989 subf r7, r8, r7 /* subtract current timebase offset */ 2990 std r7, VCPU_ACTIVITY_START(r4) 2991 cmpdi r5, 0 2992 beqlr 2993 subf r3, r6, r7 2994 ld r8, TAS_SEQCOUNT(r5) 2995 cmpdi r8, 0 2996 addi r8, r8, 1 2997 std r8, TAS_SEQCOUNT(r5) 2998 lwsync 2999 ld r7, TAS_TOTAL(r5) 3000 add r7, r7, r3 3001 std r7, TAS_TOTAL(r5) 3002 ld r6, TAS_MIN(r5) 3003 ld r7, TAS_MAX(r5) 3004 beq 3f 3005 cmpd r3, r6 3006 bge 1f 30073: std r3, TAS_MIN(r5) 30081: cmpd r3, r7 3009 ble 2f 3010 std r3, TAS_MAX(r5) 30112: lwsync 3012 addi r8, r8, 1 3013 std r8, TAS_SEQCOUNT(r5) 3014 blr 3015#endif