misc_helper.c (14982B)
1/* 2 * x86 misc helpers - sysemu code 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20#include "qemu/osdep.h" 21#include "qemu/main-loop.h" 22#include "cpu.h" 23#include "exec/helper-proto.h" 24#include "exec/cpu_ldst.h" 25#include "exec/address-spaces.h" 26#include "tcg/helper-tcg.h" 27 28void helper_outb(CPUX86State *env, uint32_t port, uint32_t data) 29{ 30 address_space_stb(&address_space_io, port, data, 31 cpu_get_mem_attrs(env), NULL); 32} 33 34target_ulong helper_inb(CPUX86State *env, uint32_t port) 35{ 36 return address_space_ldub(&address_space_io, port, 37 cpu_get_mem_attrs(env), NULL); 38} 39 40void helper_outw(CPUX86State *env, uint32_t port, uint32_t data) 41{ 42 address_space_stw(&address_space_io, port, data, 43 cpu_get_mem_attrs(env), NULL); 44} 45 46target_ulong helper_inw(CPUX86State *env, uint32_t port) 47{ 48 return address_space_lduw(&address_space_io, port, 49 cpu_get_mem_attrs(env), NULL); 50} 51 52void helper_outl(CPUX86State *env, uint32_t port, uint32_t data) 53{ 54 address_space_stl(&address_space_io, port, data, 55 cpu_get_mem_attrs(env), NULL); 56} 57 58target_ulong helper_inl(CPUX86State *env, uint32_t port) 59{ 60 return address_space_ldl(&address_space_io, port, 61 cpu_get_mem_attrs(env), NULL); 62} 63 64target_ulong helper_read_crN(CPUX86State *env, int reg) 65{ 66 target_ulong val; 67 68 switch (reg) { 69 default: 70 val = env->cr[reg]; 71 break; 72 case 8: 73 if (!(env->hflags2 & HF2_VINTR_MASK)) { 74 val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); 75 } else { 76 val = env->int_ctl & V_TPR_MASK; 77 } 78 break; 79 } 80 return val; 81} 82 83void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) 84{ 85 switch (reg) { 86 case 0: 87 /* 88 * If we reach this point, the CR0 write intercept is disabled. 89 * But we could still exit if the hypervisor has requested the selective 90 * intercept for bits other than TS and MP 91 */ 92 if (cpu_svm_has_intercept(env, SVM_EXIT_CR0_SEL_WRITE) && 93 ((env->cr[0] ^ t0) & ~(CR0_TS_MASK | CR0_MP_MASK))) { 94 cpu_vmexit(env, SVM_EXIT_CR0_SEL_WRITE, 0, GETPC()); 95 } 96 cpu_x86_update_cr0(env, t0); 97 break; 98 case 3: 99 if ((env->efer & MSR_EFER_LMA) && 100 (t0 & ((~0ULL) << env_archcpu(env)->phys_bits))) { 101 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 102 } 103 if (!(env->efer & MSR_EFER_LMA)) { 104 t0 &= 0xffffffffUL; 105 } 106 cpu_x86_update_cr3(env, t0); 107 break; 108 case 4: 109 if (t0 & cr4_reserved_bits(env)) { 110 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); 111 } 112 if (((t0 ^ env->cr[4]) & CR4_LA57_MASK) && 113 (env->hflags & HF_CS64_MASK)) { 114 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 115 } 116 cpu_x86_update_cr4(env, t0); 117 break; 118 case 8: 119 if (!(env->hflags2 & HF2_VINTR_MASK)) { 120 qemu_mutex_lock_iothread(); 121 cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 122 qemu_mutex_unlock_iothread(); 123 } 124 env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); 125 126 CPUState *cs = env_cpu(env); 127 if (ctl_has_irq(env)) { 128 cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); 129 } else { 130 cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); 131 } 132 break; 133 default: 134 env->cr[reg] = t0; 135 break; 136 } 137} 138 139void helper_wrmsr(CPUX86State *env) 140{ 141 uint64_t val; 142 CPUState *cs = env_cpu(env); 143 144 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1, GETPC()); 145 146 val = ((uint32_t)env->regs[R_EAX]) | 147 ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32); 148 149 switch ((uint32_t)env->regs[R_ECX]) { 150 case MSR_IA32_SYSENTER_CS: 151 env->sysenter_cs = val & 0xffff; 152 break; 153 case MSR_IA32_SYSENTER_ESP: 154 env->sysenter_esp = val; 155 break; 156 case MSR_IA32_SYSENTER_EIP: 157 env->sysenter_eip = val; 158 break; 159 case MSR_IA32_APICBASE: 160 cpu_set_apic_base(env_archcpu(env)->apic_state, val); 161 break; 162 case MSR_EFER: 163 { 164 uint64_t update_mask; 165 166 update_mask = 0; 167 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) { 168 update_mask |= MSR_EFER_SCE; 169 } 170 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 171 update_mask |= MSR_EFER_LME; 172 } 173 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 174 update_mask |= MSR_EFER_FFXSR; 175 } 176 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) { 177 update_mask |= MSR_EFER_NXE; 178 } 179 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 180 update_mask |= MSR_EFER_SVME; 181 } 182 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) { 183 update_mask |= MSR_EFER_FFXSR; 184 } 185 cpu_load_efer(env, (env->efer & ~update_mask) | 186 (val & update_mask)); 187 } 188 break; 189 case MSR_STAR: 190 env->star = val; 191 break; 192 case MSR_PAT: 193 env->pat = val; 194 break; 195 case MSR_IA32_PKRS: 196 if (val & 0xFFFFFFFF00000000ull) { 197 goto error; 198 } 199 env->pkrs = val; 200 tlb_flush(cs); 201 break; 202 case MSR_VM_HSAVE_PA: 203 env->vm_hsave = val; 204 break; 205#ifdef TARGET_X86_64 206 case MSR_LSTAR: 207 env->lstar = val; 208 break; 209 case MSR_CSTAR: 210 env->cstar = val; 211 break; 212 case MSR_FMASK: 213 env->fmask = val; 214 break; 215 case MSR_FSBASE: 216 env->segs[R_FS].base = val; 217 break; 218 case MSR_GSBASE: 219 env->segs[R_GS].base = val; 220 break; 221 case MSR_KERNELGSBASE: 222 env->kernelgsbase = val; 223 break; 224#endif 225 case MSR_MTRRphysBase(0): 226 case MSR_MTRRphysBase(1): 227 case MSR_MTRRphysBase(2): 228 case MSR_MTRRphysBase(3): 229 case MSR_MTRRphysBase(4): 230 case MSR_MTRRphysBase(5): 231 case MSR_MTRRphysBase(6): 232 case MSR_MTRRphysBase(7): 233 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 234 MSR_MTRRphysBase(0)) / 2].base = val; 235 break; 236 case MSR_MTRRphysMask(0): 237 case MSR_MTRRphysMask(1): 238 case MSR_MTRRphysMask(2): 239 case MSR_MTRRphysMask(3): 240 case MSR_MTRRphysMask(4): 241 case MSR_MTRRphysMask(5): 242 case MSR_MTRRphysMask(6): 243 case MSR_MTRRphysMask(7): 244 env->mtrr_var[((uint32_t)env->regs[R_ECX] - 245 MSR_MTRRphysMask(0)) / 2].mask = val; 246 break; 247 case MSR_MTRRfix64K_00000: 248 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 249 MSR_MTRRfix64K_00000] = val; 250 break; 251 case MSR_MTRRfix16K_80000: 252 case MSR_MTRRfix16K_A0000: 253 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 254 MSR_MTRRfix16K_80000 + 1] = val; 255 break; 256 case MSR_MTRRfix4K_C0000: 257 case MSR_MTRRfix4K_C8000: 258 case MSR_MTRRfix4K_D0000: 259 case MSR_MTRRfix4K_D8000: 260 case MSR_MTRRfix4K_E0000: 261 case MSR_MTRRfix4K_E8000: 262 case MSR_MTRRfix4K_F0000: 263 case MSR_MTRRfix4K_F8000: 264 env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 265 MSR_MTRRfix4K_C0000 + 3] = val; 266 break; 267 case MSR_MTRRdefType: 268 env->mtrr_deftype = val; 269 break; 270 case MSR_MCG_STATUS: 271 env->mcg_status = val; 272 break; 273 case MSR_MCG_CTL: 274 if ((env->mcg_cap & MCG_CTL_P) 275 && (val == 0 || val == ~(uint64_t)0)) { 276 env->mcg_ctl = val; 277 } 278 break; 279 case MSR_TSC_AUX: 280 env->tsc_aux = val; 281 break; 282 case MSR_IA32_MISC_ENABLE: 283 env->msr_ia32_misc_enable = val; 284 break; 285 case MSR_IA32_BNDCFGS: 286 /* FIXME: #GP if reserved bits are set. */ 287 /* FIXME: Extend highest implemented bit of linear address. */ 288 env->msr_bndcfgs = val; 289 cpu_sync_bndcs_hflags(env); 290 break; 291 default: 292 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 293 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 294 (4 * env->mcg_cap & 0xff)) { 295 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 296 if ((offset & 0x3) != 0 297 || (val == 0 || val == ~(uint64_t)0)) { 298 env->mce_banks[offset] = val; 299 } 300 break; 301 } 302 /* XXX: exception? */ 303 break; 304 } 305 return; 306error: 307 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 308} 309 310void helper_rdmsr(CPUX86State *env) 311{ 312 X86CPU *x86_cpu = env_archcpu(env); 313 uint64_t val; 314 315 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0, GETPC()); 316 317 switch ((uint32_t)env->regs[R_ECX]) { 318 case MSR_IA32_SYSENTER_CS: 319 val = env->sysenter_cs; 320 break; 321 case MSR_IA32_SYSENTER_ESP: 322 val = env->sysenter_esp; 323 break; 324 case MSR_IA32_SYSENTER_EIP: 325 val = env->sysenter_eip; 326 break; 327 case MSR_IA32_APICBASE: 328 val = cpu_get_apic_base(env_archcpu(env)->apic_state); 329 break; 330 case MSR_EFER: 331 val = env->efer; 332 break; 333 case MSR_STAR: 334 val = env->star; 335 break; 336 case MSR_PAT: 337 val = env->pat; 338 break; 339 case MSR_IA32_PKRS: 340 val = env->pkrs; 341 break; 342 case MSR_VM_HSAVE_PA: 343 val = env->vm_hsave; 344 break; 345 case MSR_IA32_PERF_STATUS: 346 /* tsc_increment_by_tick */ 347 val = 1000ULL; 348 /* CPU multiplier */ 349 val |= (((uint64_t)4ULL) << 40); 350 break; 351#ifdef TARGET_X86_64 352 case MSR_LSTAR: 353 val = env->lstar; 354 break; 355 case MSR_CSTAR: 356 val = env->cstar; 357 break; 358 case MSR_FMASK: 359 val = env->fmask; 360 break; 361 case MSR_FSBASE: 362 val = env->segs[R_FS].base; 363 break; 364 case MSR_GSBASE: 365 val = env->segs[R_GS].base; 366 break; 367 case MSR_KERNELGSBASE: 368 val = env->kernelgsbase; 369 break; 370 case MSR_TSC_AUX: 371 val = env->tsc_aux; 372 break; 373#endif 374 case MSR_SMI_COUNT: 375 val = env->msr_smi_count; 376 break; 377 case MSR_MTRRphysBase(0): 378 case MSR_MTRRphysBase(1): 379 case MSR_MTRRphysBase(2): 380 case MSR_MTRRphysBase(3): 381 case MSR_MTRRphysBase(4): 382 case MSR_MTRRphysBase(5): 383 case MSR_MTRRphysBase(6): 384 case MSR_MTRRphysBase(7): 385 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 386 MSR_MTRRphysBase(0)) / 2].base; 387 break; 388 case MSR_MTRRphysMask(0): 389 case MSR_MTRRphysMask(1): 390 case MSR_MTRRphysMask(2): 391 case MSR_MTRRphysMask(3): 392 case MSR_MTRRphysMask(4): 393 case MSR_MTRRphysMask(5): 394 case MSR_MTRRphysMask(6): 395 case MSR_MTRRphysMask(7): 396 val = env->mtrr_var[((uint32_t)env->regs[R_ECX] - 397 MSR_MTRRphysMask(0)) / 2].mask; 398 break; 399 case MSR_MTRRfix64K_00000: 400 val = env->mtrr_fixed[0]; 401 break; 402 case MSR_MTRRfix16K_80000: 403 case MSR_MTRRfix16K_A0000: 404 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 405 MSR_MTRRfix16K_80000 + 1]; 406 break; 407 case MSR_MTRRfix4K_C0000: 408 case MSR_MTRRfix4K_C8000: 409 case MSR_MTRRfix4K_D0000: 410 case MSR_MTRRfix4K_D8000: 411 case MSR_MTRRfix4K_E0000: 412 case MSR_MTRRfix4K_E8000: 413 case MSR_MTRRfix4K_F0000: 414 case MSR_MTRRfix4K_F8000: 415 val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] - 416 MSR_MTRRfix4K_C0000 + 3]; 417 break; 418 case MSR_MTRRdefType: 419 val = env->mtrr_deftype; 420 break; 421 case MSR_MTRRcap: 422 if (env->features[FEAT_1_EDX] & CPUID_MTRR) { 423 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | 424 MSR_MTRRcap_WC_SUPPORTED; 425 } else { 426 /* XXX: exception? */ 427 val = 0; 428 } 429 break; 430 case MSR_MCG_CAP: 431 val = env->mcg_cap; 432 break; 433 case MSR_MCG_CTL: 434 if (env->mcg_cap & MCG_CTL_P) { 435 val = env->mcg_ctl; 436 } else { 437 val = 0; 438 } 439 break; 440 case MSR_MCG_STATUS: 441 val = env->mcg_status; 442 break; 443 case MSR_IA32_MISC_ENABLE: 444 val = env->msr_ia32_misc_enable; 445 break; 446 case MSR_IA32_BNDCFGS: 447 val = env->msr_bndcfgs; 448 break; 449 case MSR_IA32_UCODE_REV: 450 val = x86_cpu->ucode_rev; 451 break; 452 default: 453 if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL 454 && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL + 455 (4 * env->mcg_cap & 0xff)) { 456 uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL; 457 val = env->mce_banks[offset]; 458 break; 459 } 460 /* XXX: exception? */ 461 val = 0; 462 break; 463 } 464 env->regs[R_EAX] = (uint32_t)(val); 465 env->regs[R_EDX] = (uint32_t)(val >> 32); 466} 467 468void helper_flush_page(CPUX86State *env, target_ulong addr) 469{ 470 tlb_flush_page(env_cpu(env), addr); 471} 472 473static void QEMU_NORETURN do_hlt(CPUX86State *env) 474{ 475 CPUState *cs = env_cpu(env); 476 477 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */ 478 cs->halted = 1; 479 cs->exception_index = EXCP_HLT; 480 cpu_loop_exit(cs); 481} 482 483void QEMU_NORETURN helper_hlt(CPUX86State *env, int next_eip_addend) 484{ 485 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); 486 env->eip += next_eip_addend; 487 488 do_hlt(env); 489} 490 491void helper_monitor(CPUX86State *env, target_ulong ptr) 492{ 493 if ((uint32_t)env->regs[R_ECX] != 0) { 494 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 495 } 496 /* XXX: store address? */ 497 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0, GETPC()); 498} 499 500void QEMU_NORETURN helper_mwait(CPUX86State *env, int next_eip_addend) 501{ 502 CPUState *cs = env_cpu(env); 503 504 if ((uint32_t)env->regs[R_ECX] != 0) { 505 raise_exception_ra(env, EXCP0D_GPF, GETPC()); 506 } 507 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); 508 env->eip += next_eip_addend; 509 510 /* XXX: not complete but not completely erroneous */ 511 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { 512 do_pause(env); 513 } else { 514 do_hlt(env); 515 } 516}