process.c (8065B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * PARISC Architecture-dependent parts of process handling 4 * based on the work for i386 5 * 6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> 7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> 8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> 9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> 10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> 11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> 12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> 13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> 15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> 16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> 17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de> 18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 19 */ 20#include <linux/elf.h> 21#include <linux/errno.h> 22#include <linux/kernel.h> 23#include <linux/mm.h> 24#include <linux/fs.h> 25#include <linux/cpu.h> 26#include <linux/module.h> 27#include <linux/personality.h> 28#include <linux/ptrace.h> 29#include <linux/reboot.h> 30#include <linux/sched.h> 31#include <linux/sched/debug.h> 32#include <linux/sched/task.h> 33#include <linux/sched/task_stack.h> 34#include <linux/slab.h> 35#include <linux/stddef.h> 36#include <linux/unistd.h> 37#include <linux/kallsyms.h> 38#include <linux/uaccess.h> 39#include <linux/rcupdate.h> 40#include <linux/random.h> 41#include <linux/nmi.h> 42#include <linux/sched/hotplug.h> 43 44#include <asm/io.h> 45#include <asm/asm-offsets.h> 46#include <asm/assembly.h> 47#include <asm/pdc.h> 48#include <asm/pdc_chassis.h> 49#include <asm/unwind.h> 50#include <asm/sections.h> 51#include <asm/cacheflush.h> 52 53#define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 54#define CMD_RESET 5 /* reset any module */ 55 56/* 57** The Wright Brothers and Gecko systems have a H/W problem 58** (Lasi...'nuf said) may cause a broadcast reset to lockup 59** the system. An HVERSION dependent PDC call was developed 60** to perform a "safe", platform specific broadcast reset instead 61** of kludging up all the code. 62** 63** Older machines which do not implement PDC_BROADCAST_RESET will 64** return (with an error) and the regular broadcast reset can be 65** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET 66** the PDC call will not return (the system will be reset). 67*/ 68void machine_restart(char *cmd) 69{ 70#ifdef FASTBOOT_SELFTEST_SUPPORT 71 /* 72 ** If user has modified the Firmware Selftest Bitmap, 73 ** run the tests specified in the bitmap after the 74 ** system is rebooted w/PDC_DO_RESET. 75 ** 76 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests" 77 ** 78 ** Using "directed resets" at each processor with the MEM_TOC 79 ** vector cleared will also avoid running destructive 80 ** memory self tests. (Not implemented yet) 81 */ 82 if (ftc_bitmap) { 83 pdc_do_firm_test_reset(ftc_bitmap); 84 } 85#endif 86 /* set up a new led state on systems shipped with a LED State panel */ 87 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 88 89 /* "Normal" system reset */ 90 pdc_do_reset(); 91 92 /* Nope...box should reset with just CMD_RESET now */ 93 gsc_writel(CMD_RESET, COMMAND_GLOBAL); 94 95 /* Wait for RESET to lay us to rest. */ 96 while (1) ; 97 98} 99 100void (*chassis_power_off)(void); 101 102/* 103 * This routine is called from sys_reboot to actually turn off the 104 * machine 105 */ 106void machine_power_off(void) 107{ 108 /* If there is a registered power off handler, call it. */ 109 if (chassis_power_off) 110 chassis_power_off(); 111 112 /* Put the soft power button back under hardware control. 113 * If the user had already pressed the power button, the 114 * following call will immediately power off. */ 115 pdc_soft_power_button(0); 116 117 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 118 119 /* ipmi_poweroff may have been installed. */ 120 do_kernel_power_off(); 121 122 /* It seems we have no way to power the system off via 123 * software. The user has to press the button himself. */ 124 125 printk(KERN_EMERG "System shut down completed.\n" 126 "Please power this system off now."); 127 128 /* prevent soft lockup/stalled CPU messages for endless loop. */ 129 rcu_sysrq_start(); 130 lockup_detector_soft_poweroff(); 131 for (;;); 132} 133 134void (*pm_power_off)(void); 135EXPORT_SYMBOL(pm_power_off); 136 137void machine_halt(void) 138{ 139 machine_power_off(); 140} 141 142void flush_thread(void) 143{ 144 /* Only needs to handle fpu stuff or perf monitors. 145 ** REVISIT: several arches implement a "lazy fpu state". 146 */ 147} 148 149void release_thread(struct task_struct *dead_task) 150{ 151} 152 153/* 154 * Idle thread support 155 * 156 * Detect when running on QEMU with SeaBIOS PDC Firmware and let 157 * QEMU idle the host too. 158 */ 159 160int running_on_qemu __ro_after_init; 161EXPORT_SYMBOL(running_on_qemu); 162 163/* 164 * Called from the idle thread for the CPU which has been shutdown. 165 */ 166void arch_cpu_idle_dead(void) 167{ 168#ifdef CONFIG_HOTPLUG_CPU 169 idle_task_exit(); 170 171 local_irq_disable(); 172 173 /* Tell __cpu_die() that this CPU is now safe to dispose of. */ 174 (void)cpu_report_death(); 175 176 /* Ensure that the cache lines are written out. */ 177 flush_cache_all_local(); 178 flush_tlb_all_local(NULL); 179 180 /* Let PDC firmware put CPU into firmware idle loop. */ 181 __pdc_cpu_rendezvous(); 182 183 pr_warn("PDC does not provide rendezvous function.\n"); 184#endif 185 while (1); 186} 187 188void __cpuidle arch_cpu_idle(void) 189{ 190 raw_local_irq_enable(); 191 192 /* nop on real hardware, qemu will idle sleep. */ 193 asm volatile("or %%r10,%%r10,%%r10\n":::); 194} 195 196static int __init parisc_idle_init(void) 197{ 198 if (!running_on_qemu) 199 cpu_idle_poll_ctrl(1); 200 201 return 0; 202} 203arch_initcall(parisc_idle_init); 204 205/* 206 * Copy architecture-specific thread state 207 */ 208int 209copy_thread(struct task_struct *p, const struct kernel_clone_args *args) 210{ 211 unsigned long clone_flags = args->flags; 212 unsigned long usp = args->stack; 213 unsigned long tls = args->tls; 214 struct pt_regs *cregs = &(p->thread.regs); 215 void *stack = task_stack_page(p); 216 217 /* We have to use void * instead of a function pointer, because 218 * function pointers aren't a pointer to the function on 64-bit. 219 * Make them const so the compiler knows they live in .text */ 220 extern void * const ret_from_kernel_thread; 221 extern void * const child_return; 222 223 if (unlikely(args->fn)) { 224 /* kernel thread */ 225 memset(cregs, 0, sizeof(struct pt_regs)); 226 if (args->idle) /* idle thread */ 227 return 0; 228 /* Must exit via ret_from_kernel_thread in order 229 * to call schedule_tail() 230 */ 231 cregs->ksp = (unsigned long) stack + FRAME_SIZE + PT_SZ_ALGN; 232 cregs->kpc = (unsigned long) &ret_from_kernel_thread; 233 /* 234 * Copy function and argument to be called from 235 * ret_from_kernel_thread. 236 */ 237#ifdef CONFIG_64BIT 238 cregs->gr[27] = ((unsigned long *)args->fn)[3]; 239 cregs->gr[26] = ((unsigned long *)args->fn)[2]; 240#else 241 cregs->gr[26] = (unsigned long) args->fn; 242#endif 243 cregs->gr[25] = (unsigned long) args->fn_arg; 244 } else { 245 /* user thread */ 246 /* usp must be word aligned. This also prevents users from 247 * passing in the value 1 (which is the signal for a special 248 * return for a kernel thread) */ 249 if (usp) { 250 usp = ALIGN(usp, 4); 251 if (likely(usp)) 252 cregs->gr[30] = usp; 253 } 254 cregs->ksp = (unsigned long) stack + FRAME_SIZE; 255 cregs->kpc = (unsigned long) &child_return; 256 257 /* Setup thread TLS area */ 258 if (clone_flags & CLONE_SETTLS) 259 cregs->cr27 = tls; 260 } 261 262 return 0; 263} 264 265unsigned long 266__get_wchan(struct task_struct *p) 267{ 268 struct unwind_frame_info info; 269 unsigned long ip; 270 int count = 0; 271 272 /* 273 * These bracket the sleeping functions.. 274 */ 275 276 unwind_frame_init_from_blocked_task(&info, p); 277 do { 278 if (unwind_once(&info) < 0) 279 return 0; 280 if (task_is_running(p)) 281 return 0; 282 ip = info.ip; 283 if (!in_sched_functions(ip)) 284 return ip; 285 } while (count++ < MAX_UNWIND_ENTRIES); 286 return 0; 287} 288 289static inline unsigned long brk_rnd(void) 290{ 291 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; 292} 293 294unsigned long arch_randomize_brk(struct mm_struct *mm) 295{ 296 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 297 298 if (ret < mm->brk) 299 return mm->brk; 300 return ret; 301}