init.c (8465B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * arch/sh/kernel/cpu/init.c 4 * 5 * CPU init code 6 * 7 * Copyright (C) 2002 - 2009 Paul Mundt 8 * Copyright (C) 2003 Richard Curnow 9 */ 10#include <linux/init.h> 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/log2.h> 14#include <asm/mmu_context.h> 15#include <asm/processor.h> 16#include <linux/uaccess.h> 17#include <asm/page.h> 18#include <asm/cacheflush.h> 19#include <asm/cache.h> 20#include <asm/elf.h> 21#include <asm/io.h> 22#include <asm/smp.h> 23#include <asm/sh_bios.h> 24#include <asm/setup.h> 25 26#ifdef CONFIG_SH_FPU 27#define cpu_has_fpu 1 28#else 29#define cpu_has_fpu 0 30#endif 31 32#ifdef CONFIG_SH_DSP 33#define cpu_has_dsp 1 34#else 35#define cpu_has_dsp 0 36#endif 37 38/* 39 * Generic wrapper for command line arguments to disable on-chip 40 * peripherals (nofpu, nodsp, and so forth). 41 */ 42#define onchip_setup(x) \ 43static int x##_disabled = !cpu_has_##x; \ 44 \ 45static int x##_setup(char *opts) \ 46{ \ 47 x##_disabled = 1; \ 48 return 1; \ 49} \ 50__setup("no" __stringify(x), x##_setup); 51 52onchip_setup(fpu); 53onchip_setup(dsp); 54 55#ifdef CONFIG_SPECULATIVE_EXECUTION 56#define CPUOPM 0xff2f0000 57#define CPUOPM_RABD (1 << 5) 58 59static void speculative_execution_init(void) 60{ 61 /* Clear RABD */ 62 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 63 64 /* Flush the update */ 65 (void)__raw_readl(CPUOPM); 66 ctrl_barrier(); 67} 68#else 69#define speculative_execution_init() do { } while (0) 70#endif 71 72#ifdef CONFIG_CPU_SH4A 73#define EXPMASK 0xff2f0004 74#define EXPMASK_RTEDS (1 << 0) 75#define EXPMASK_BRDSSLP (1 << 1) 76#define EXPMASK_MMCAW (1 << 4) 77 78static void expmask_init(void) 79{ 80 unsigned long expmask = __raw_readl(EXPMASK); 81 82 /* 83 * Future proofing. 84 * 85 * Disable support for slottable sleep instruction, non-nop 86 * instructions in the rte delay slot, and associative writes to 87 * the memory-mapped cache array. 88 */ 89 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); 90 91 __raw_writel(expmask, EXPMASK); 92 ctrl_barrier(); 93} 94#else 95#define expmask_init() do { } while (0) 96#endif 97 98/* 2nd-level cache init */ 99void __attribute__ ((weak)) l2_cache_init(void) 100{ 101} 102 103/* 104 * Generic first-level cache init 105 */ 106#if !defined(CONFIG_CPU_J2) 107static void cache_init(void) 108{ 109 unsigned long ccr, flags; 110 111 jump_to_uncached(); 112 ccr = __raw_readl(SH_CCR); 113 114 /* 115 * At this point we don't know whether the cache is enabled or not - a 116 * bootloader may have enabled it. There are at least 2 things that 117 * could be dirty in the cache at this point: 118 * 1. kernel command line set up by boot loader 119 * 2. spilled registers from the prolog of this function 120 * => before re-initialising the cache, we must do a purge of the whole 121 * cache out to memory for safety. As long as nothing is spilled 122 * during the loop to lines that have already been done, this is safe. 123 * - RPC 124 */ 125 if (ccr & CCR_CACHE_ENABLE) { 126 unsigned long ways, waysize, addrstart; 127 128 waysize = current_cpu_data.dcache.sets; 129 130#ifdef CCR_CACHE_ORA 131 /* 132 * If the OC is already in RAM mode, we only have 133 * half of the entries to flush.. 134 */ 135 if (ccr & CCR_CACHE_ORA) 136 waysize >>= 1; 137#endif 138 139 waysize <<= current_cpu_data.dcache.entry_shift; 140 141#ifdef CCR_CACHE_EMODE 142 /* If EMODE is not set, we only have 1 way to flush. */ 143 if (!(ccr & CCR_CACHE_EMODE)) 144 ways = 1; 145 else 146#endif 147 ways = current_cpu_data.dcache.ways; 148 149 addrstart = CACHE_OC_ADDRESS_ARRAY; 150 do { 151 unsigned long addr; 152 153 for (addr = addrstart; 154 addr < addrstart + waysize; 155 addr += current_cpu_data.dcache.linesz) 156 __raw_writel(0, addr); 157 158 addrstart += current_cpu_data.dcache.way_incr; 159 } while (--ways); 160 } 161 162 /* 163 * Default CCR values .. enable the caches 164 * and invalidate them immediately.. 165 */ 166 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; 167 168#ifdef CCR_CACHE_EMODE 169 /* Force EMODE if possible */ 170 if (current_cpu_data.dcache.ways > 1) 171 flags |= CCR_CACHE_EMODE; 172 else 173 flags &= ~CCR_CACHE_EMODE; 174#endif 175 176#if defined(CONFIG_CACHE_WRITETHROUGH) 177 /* Write-through */ 178 flags |= CCR_CACHE_WT; 179#elif defined(CONFIG_CACHE_WRITEBACK) 180 /* Write-back */ 181 flags |= CCR_CACHE_CB; 182#else 183 /* Off */ 184 flags &= ~CCR_CACHE_ENABLE; 185#endif 186 187 l2_cache_init(); 188 189 __raw_writel(flags, SH_CCR); 190 back_to_cached(); 191} 192#else 193#define cache_init() do { } while (0) 194#endif 195 196#define CSHAPE(totalsize, linesize, assoc) \ 197 ((totalsize & ~0xff) | (linesize << 4) | assoc) 198 199#define CACHE_DESC_SHAPE(desc) \ 200 CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) 201 202static void detect_cache_shape(void) 203{ 204 l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); 205 206 if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) 207 l1i_cache_shape = l1d_cache_shape; 208 else 209 l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); 210 211 if (current_cpu_data.flags & CPU_HAS_L2_CACHE) 212 l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); 213 else 214 l2_cache_shape = -1; /* No S-cache */ 215} 216 217static void fpu_init(void) 218{ 219 /* Disable the FPU */ 220 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { 221 printk("FPU Disabled\n"); 222 current_cpu_data.flags &= ~CPU_HAS_FPU; 223 } 224 225 disable_fpu(); 226 clear_used_math(); 227} 228 229#ifdef CONFIG_SH_DSP 230static void release_dsp(void) 231{ 232 unsigned long sr; 233 234 /* Clear SR.DSP bit */ 235 __asm__ __volatile__ ( 236 "stc\tsr, %0\n\t" 237 "and\t%1, %0\n\t" 238 "ldc\t%0, sr\n\t" 239 : "=&r" (sr) 240 : "r" (~SR_DSP) 241 ); 242} 243 244static void dsp_init(void) 245{ 246 unsigned long sr; 247 248 /* 249 * Set the SR.DSP bit, wait for one instruction, and then read 250 * back the SR value. 251 */ 252 __asm__ __volatile__ ( 253 "stc\tsr, %0\n\t" 254 "or\t%1, %0\n\t" 255 "ldc\t%0, sr\n\t" 256 "nop\n\t" 257 "stc\tsr, %0\n\t" 258 : "=&r" (sr) 259 : "r" (SR_DSP) 260 ); 261 262 /* If the DSP bit is still set, this CPU has a DSP */ 263 if (sr & SR_DSP) 264 current_cpu_data.flags |= CPU_HAS_DSP; 265 266 /* Disable the DSP */ 267 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { 268 printk("DSP Disabled\n"); 269 current_cpu_data.flags &= ~CPU_HAS_DSP; 270 } 271 272 /* Now that we've determined the DSP status, clear the DSP bit. */ 273 release_dsp(); 274} 275#else 276static inline void dsp_init(void) { } 277#endif /* CONFIG_SH_DSP */ 278 279/** 280 * cpu_init 281 * 282 * This is our initial entry point for each CPU, and is invoked on the 283 * boot CPU prior to calling start_kernel(). For SMP, a combination of 284 * this and start_secondary() will bring up each processor to a ready 285 * state prior to hand forking the idle loop. 286 * 287 * We do all of the basic processor init here, including setting up 288 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and 289 * subsequently platform_setup()) things like determining the CPU 290 * subtype and initial configuration will all be done. 291 * 292 * Each processor family is still responsible for doing its own probing 293 * and cache configuration in cpu_probe(). 294 */ 295asmlinkage void cpu_init(void) 296{ 297 current_thread_info()->cpu = hard_smp_processor_id(); 298 299 /* First, probe the CPU */ 300 cpu_probe(); 301 302 if (current_cpu_data.type == CPU_SH_NONE) 303 panic("Unknown CPU"); 304 305 /* First setup the rest of the I-cache info */ 306 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - 307 current_cpu_data.icache.linesz; 308 309 current_cpu_data.icache.way_size = current_cpu_data.icache.sets * 310 current_cpu_data.icache.linesz; 311 312 /* And the D-cache too */ 313 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - 314 current_cpu_data.dcache.linesz; 315 316 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * 317 current_cpu_data.dcache.linesz; 318 319 /* Init the cache */ 320 cache_init(); 321 322 if (raw_smp_processor_id() == 0) { 323#ifdef CONFIG_MMU 324 shm_align_mask = max_t(unsigned long, 325 current_cpu_data.dcache.way_size - 1, 326 PAGE_SIZE - 1); 327#else 328 shm_align_mask = PAGE_SIZE - 1; 329#endif 330 331 /* Boot CPU sets the cache shape */ 332 detect_cache_shape(); 333 } 334 335 fpu_init(); 336 dsp_init(); 337 338 /* 339 * Initialize the per-CPU ASID cache very early, since the 340 * TLB flushing routines depend on this being setup. 341 */ 342 current_cpu_data.asid_cache = NO_CONTEXT; 343 344 current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; 345 346 speculative_execution_init(); 347 expmask_init(); 348 349 /* Do the rest of the boot processor setup */ 350 if (raw_smp_processor_id() == 0) { 351 /* Save off the BIOS VBR, if there is one */ 352 sh_bios_vbr_init(); 353 354 /* 355 * Setup VBR for boot CPU. Secondary CPUs do this through 356 * start_secondary(). 357 */ 358 per_cpu_trap_init(); 359 360 /* 361 * Boot processor to setup the FP and extended state 362 * context info. 363 */ 364 init_thread_xstate(); 365 } 366}