cacheinfo.c (5226B)
1/* 2 * cacheinfo.c - helpers to query the host about its caches 3 * 4 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org> 5 * License: GNU GPL, version 2 or later. 6 * See the COPYING file in the top-level directory. 7 */ 8 9#include "qemu/osdep.h" 10#include "qemu/host-utils.h" 11#include "qemu/atomic.h" 12 13int qemu_icache_linesize = 0; 14int qemu_icache_linesize_log; 15int qemu_dcache_linesize = 0; 16int qemu_dcache_linesize_log; 17 18/* 19 * Operating system specific detection mechanisms. 20 */ 21 22#if defined(_WIN32) 23 24static void sys_cache_info(int *isize, int *dsize) 25{ 26 SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf; 27 DWORD size = 0; 28 BOOL success; 29 size_t i, n; 30 31 /* Check for the required buffer size first. Note that if the zero 32 size we use for the probe results in success, then there is no 33 data available; fail in that case. */ 34 success = GetLogicalProcessorInformation(0, &size); 35 if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { 36 return; 37 } 38 39 n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); 40 size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); 41 buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n); 42 if (!GetLogicalProcessorInformation(buf, &size)) { 43 goto fail; 44 } 45 46 for (i = 0; i < n; i++) { 47 if (buf[i].Relationship == RelationCache 48 && buf[i].Cache.Level == 1) { 49 switch (buf[i].Cache.Type) { 50 case CacheUnified: 51 *isize = *dsize = buf[i].Cache.LineSize; 52 break; 53 case CacheInstruction: 54 *isize = buf[i].Cache.LineSize; 55 break; 56 case CacheData: 57 *dsize = buf[i].Cache.LineSize; 58 break; 59 default: 60 break; 61 } 62 } 63 } 64 fail: 65 g_free(buf); 66} 67 68#elif defined(__APPLE__) 69# include <sys/sysctl.h> 70static void sys_cache_info(int *isize, int *dsize) 71{ 72 /* There's only a single sysctl for both I/D cache line sizes. */ 73 long size; 74 size_t len = sizeof(size); 75 if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) { 76 *isize = *dsize = size; 77 } 78} 79#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 80# include <sys/sysctl.h> 81static void sys_cache_info(int *isize, int *dsize) 82{ 83 /* There's only a single sysctl for both I/D cache line sizes. */ 84 int size; 85 size_t len = sizeof(size); 86 if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) { 87 *isize = *dsize = size; 88 } 89} 90#else 91/* POSIX */ 92 93static void sys_cache_info(int *isize, int *dsize) 94{ 95# ifdef _SC_LEVEL1_ICACHE_LINESIZE 96 int tmp_isize = (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE); 97 if (tmp_isize > 0) { 98 *isize = tmp_isize; 99 } 100# endif 101# ifdef _SC_LEVEL1_DCACHE_LINESIZE 102 int tmp_dsize = (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE); 103 if (tmp_dsize > 0) { 104 *dsize = tmp_dsize; 105 } 106# endif 107} 108#endif /* sys_cache_info */ 109 110/* 111 * Architecture (+ OS) specific detection mechanisms. 112 */ 113 114#if defined(__aarch64__) 115 116static void arch_cache_info(int *isize, int *dsize) 117{ 118 if (*isize == 0 || *dsize == 0) { 119 uint64_t ctr; 120 121 /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1, 122 but (at least under Linux) these are marked protected by the 123 kernel. However, CTR_EL0 contains the minimum linesize in the 124 entire hierarchy, and is used by userspace cache flushing. */ 125 asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr)); 126 if (*isize == 0) { 127 *isize = 4 << (ctr & 0xf); 128 } 129 if (*dsize == 0) { 130 *dsize = 4 << ((ctr >> 16) & 0xf); 131 } 132 } 133} 134 135#elif defined(_ARCH_PPC) && defined(__linux__) 136# include "elf.h" 137 138static void arch_cache_info(int *isize, int *dsize) 139{ 140 if (*isize == 0) { 141 *isize = qemu_getauxval(AT_ICACHEBSIZE); 142 } 143 if (*dsize == 0) { 144 *dsize = qemu_getauxval(AT_DCACHEBSIZE); 145 } 146} 147 148#else 149static void arch_cache_info(int *isize, int *dsize) { } 150#endif /* arch_cache_info */ 151 152/* 153 * ... and if all else fails ... 154 */ 155 156static void fallback_cache_info(int *isize, int *dsize) 157{ 158 /* If we can only find one of the two, assume they're the same. */ 159 if (*isize) { 160 if (*dsize) { 161 /* Success! */ 162 } else { 163 *dsize = *isize; 164 } 165 } else if (*dsize) { 166 *isize = *dsize; 167 } else { 168#if defined(_ARCH_PPC) 169 /* 170 * For PPC, we're going to use the cache sizes computed for 171 * flush_idcache_range. Which means that we must use the 172 * architecture minimum. 173 */ 174 *isize = *dsize = 16; 175#else 176 /* Otherwise, 64 bytes is not uncommon. */ 177 *isize = *dsize = 64; 178#endif 179 } 180} 181 182static void __attribute__((constructor)) init_cache_info(void) 183{ 184 int isize = 0, dsize = 0; 185 186 sys_cache_info(&isize, &dsize); 187 arch_cache_info(&isize, &dsize); 188 fallback_cache_info(&isize, &dsize); 189 190 assert((isize & (isize - 1)) == 0); 191 assert((dsize & (dsize - 1)) == 0); 192 193 qemu_icache_linesize = isize; 194 qemu_icache_linesize_log = ctz32(isize); 195 qemu_dcache_linesize = dsize; 196 qemu_dcache_linesize_log = ctz32(dsize); 197 198 qatomic64_init(); 199}