cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

unwind_orc.c (19051B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2#include <linux/objtool.h>
      3#include <linux/module.h>
      4#include <linux/sort.h>
      5#include <asm/ptrace.h>
      6#include <asm/stacktrace.h>
      7#include <asm/unwind.h>
      8#include <asm/orc_types.h>
      9#include <asm/orc_lookup.h>
     10
     11#define orc_warn(fmt, ...) \
     12	printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
     13
     14#define orc_warn_current(args...)					\
     15({									\
     16	if (state->task == current && !state->error)			\
     17		orc_warn(args);						\
     18})
     19
     20extern int __start_orc_unwind_ip[];
     21extern int __stop_orc_unwind_ip[];
     22extern struct orc_entry __start_orc_unwind[];
     23extern struct orc_entry __stop_orc_unwind[];
     24
     25static bool orc_init __ro_after_init;
     26static unsigned int lookup_num_blocks __ro_after_init;
     27
     28static inline unsigned long orc_ip(const int *ip)
     29{
     30	return (unsigned long)ip + *ip;
     31}
     32
     33static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
     34				    unsigned int num_entries, unsigned long ip)
     35{
     36	int *first = ip_table;
     37	int *last = ip_table + num_entries - 1;
     38	int *mid = first, *found = first;
     39
     40	if (!num_entries)
     41		return NULL;
     42
     43	/*
     44	 * Do a binary range search to find the rightmost duplicate of a given
     45	 * starting address.  Some entries are section terminators which are
     46	 * "weak" entries for ensuring there are no gaps.  They should be
     47	 * ignored when they conflict with a real entry.
     48	 */
     49	while (first <= last) {
     50		mid = first + ((last - first) / 2);
     51
     52		if (orc_ip(mid) <= ip) {
     53			found = mid;
     54			first = mid + 1;
     55		} else
     56			last = mid - 1;
     57	}
     58
     59	return u_table + (found - ip_table);
     60}
     61
     62#ifdef CONFIG_MODULES
     63static struct orc_entry *orc_module_find(unsigned long ip)
     64{
     65	struct module *mod;
     66
     67	mod = __module_address(ip);
     68	if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
     69		return NULL;
     70	return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
     71			  mod->arch.num_orcs, ip);
     72}
     73#else
     74static struct orc_entry *orc_module_find(unsigned long ip)
     75{
     76	return NULL;
     77}
     78#endif
     79
     80#ifdef CONFIG_DYNAMIC_FTRACE
     81static struct orc_entry *orc_find(unsigned long ip);
     82
     83/*
     84 * Ftrace dynamic trampolines do not have orc entries of their own.
     85 * But they are copies of the ftrace entries that are static and
     86 * defined in ftrace_*.S, which do have orc entries.
     87 *
     88 * If the unwinder comes across a ftrace trampoline, then find the
     89 * ftrace function that was used to create it, and use that ftrace
     90 * function's orc entry, as the placement of the return code in
     91 * the stack will be identical.
     92 */
     93static struct orc_entry *orc_ftrace_find(unsigned long ip)
     94{
     95	struct ftrace_ops *ops;
     96	unsigned long caller;
     97
     98	ops = ftrace_ops_trampoline(ip);
     99	if (!ops)
    100		return NULL;
    101
    102	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
    103		caller = (unsigned long)ftrace_regs_call;
    104	else
    105		caller = (unsigned long)ftrace_call;
    106
    107	/* Prevent unlikely recursion */
    108	if (ip == caller)
    109		return NULL;
    110
    111	return orc_find(caller);
    112}
    113#else
    114static struct orc_entry *orc_ftrace_find(unsigned long ip)
    115{
    116	return NULL;
    117}
    118#endif
    119
    120/*
    121 * If we crash with IP==0, the last successfully executed instruction
    122 * was probably an indirect function call with a NULL function pointer,
    123 * and we don't have unwind information for NULL.
    124 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
    125 * pointer into its parent and then continue normally from there.
    126 */
    127static struct orc_entry null_orc_entry = {
    128	.sp_offset = sizeof(long),
    129	.sp_reg = ORC_REG_SP,
    130	.bp_reg = ORC_REG_UNDEFINED,
    131	.type = UNWIND_HINT_TYPE_CALL
    132};
    133
    134/* Fake frame pointer entry -- used as a fallback for generated code */
    135static struct orc_entry orc_fp_entry = {
    136	.type		= UNWIND_HINT_TYPE_CALL,
    137	.sp_reg		= ORC_REG_BP,
    138	.sp_offset	= 16,
    139	.bp_reg		= ORC_REG_PREV_SP,
    140	.bp_offset	= -16,
    141	.end		= 0,
    142};
    143
    144static struct orc_entry *orc_find(unsigned long ip)
    145{
    146	static struct orc_entry *orc;
    147
    148	if (ip == 0)
    149		return &null_orc_entry;
    150
    151	/* For non-init vmlinux addresses, use the fast lookup table: */
    152	if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
    153		unsigned int idx, start, stop;
    154
    155		idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
    156
    157		if (unlikely((idx >= lookup_num_blocks-1))) {
    158			orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
    159				 idx, lookup_num_blocks, (void *)ip);
    160			return NULL;
    161		}
    162
    163		start = orc_lookup[idx];
    164		stop = orc_lookup[idx + 1] + 1;
    165
    166		if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
    167			     (__start_orc_unwind + stop > __stop_orc_unwind))) {
    168			orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
    169				 idx, lookup_num_blocks, start, stop, (void *)ip);
    170			return NULL;
    171		}
    172
    173		return __orc_find(__start_orc_unwind_ip + start,
    174				  __start_orc_unwind + start, stop - start, ip);
    175	}
    176
    177	/* vmlinux .init slow lookup: */
    178	if (is_kernel_inittext(ip))
    179		return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
    180				  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
    181
    182	/* Module lookup: */
    183	orc = orc_module_find(ip);
    184	if (orc)
    185		return orc;
    186
    187	return orc_ftrace_find(ip);
    188}
    189
    190#ifdef CONFIG_MODULES
    191
    192static DEFINE_MUTEX(sort_mutex);
    193static int *cur_orc_ip_table = __start_orc_unwind_ip;
    194static struct orc_entry *cur_orc_table = __start_orc_unwind;
    195
    196static void orc_sort_swap(void *_a, void *_b, int size)
    197{
    198	struct orc_entry *orc_a, *orc_b;
    199	struct orc_entry orc_tmp;
    200	int *a = _a, *b = _b, tmp;
    201	int delta = _b - _a;
    202
    203	/* Swap the .orc_unwind_ip entries: */
    204	tmp = *a;
    205	*a = *b + delta;
    206	*b = tmp - delta;
    207
    208	/* Swap the corresponding .orc_unwind entries: */
    209	orc_a = cur_orc_table + (a - cur_orc_ip_table);
    210	orc_b = cur_orc_table + (b - cur_orc_ip_table);
    211	orc_tmp = *orc_a;
    212	*orc_a = *orc_b;
    213	*orc_b = orc_tmp;
    214}
    215
    216static int orc_sort_cmp(const void *_a, const void *_b)
    217{
    218	struct orc_entry *orc_a;
    219	const int *a = _a, *b = _b;
    220	unsigned long a_val = orc_ip(a);
    221	unsigned long b_val = orc_ip(b);
    222
    223	if (a_val > b_val)
    224		return 1;
    225	if (a_val < b_val)
    226		return -1;
    227
    228	/*
    229	 * The "weak" section terminator entries need to always be on the left
    230	 * to ensure the lookup code skips them in favor of real entries.
    231	 * These terminator entries exist to handle any gaps created by
    232	 * whitelisted .o files which didn't get objtool generation.
    233	 */
    234	orc_a = cur_orc_table + (a - cur_orc_ip_table);
    235	return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
    236}
    237
    238void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
    239			void *_orc, size_t orc_size)
    240{
    241	int *orc_ip = _orc_ip;
    242	struct orc_entry *orc = _orc;
    243	unsigned int num_entries = orc_ip_size / sizeof(int);
    244
    245	WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
    246		     orc_size % sizeof(*orc) != 0 ||
    247		     num_entries != orc_size / sizeof(*orc));
    248
    249	/*
    250	 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
    251	 * associate an .orc_unwind_ip table entry with its corresponding
    252	 * .orc_unwind entry so they can both be swapped.
    253	 */
    254	mutex_lock(&sort_mutex);
    255	cur_orc_ip_table = orc_ip;
    256	cur_orc_table = orc;
    257	sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
    258	mutex_unlock(&sort_mutex);
    259
    260	mod->arch.orc_unwind_ip = orc_ip;
    261	mod->arch.orc_unwind = orc;
    262	mod->arch.num_orcs = num_entries;
    263}
    264#endif
    265
    266void __init unwind_init(void)
    267{
    268	size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
    269	size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
    270	size_t num_entries = orc_ip_size / sizeof(int);
    271	struct orc_entry *orc;
    272	int i;
    273
    274	if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
    275	    orc_size % sizeof(struct orc_entry) != 0 ||
    276	    num_entries != orc_size / sizeof(struct orc_entry)) {
    277		orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
    278		return;
    279	}
    280
    281	/*
    282	 * Note, the orc_unwind and orc_unwind_ip tables were already
    283	 * sorted at build time via the 'sorttable' tool.
    284	 * It's ready for binary search straight away, no need to sort it.
    285	 */
    286
    287	/* Initialize the fast lookup table: */
    288	lookup_num_blocks = orc_lookup_end - orc_lookup;
    289	for (i = 0; i < lookup_num_blocks-1; i++) {
    290		orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
    291				 num_entries,
    292				 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
    293		if (!orc) {
    294			orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
    295			return;
    296		}
    297
    298		orc_lookup[i] = orc - __start_orc_unwind;
    299	}
    300
    301	/* Initialize the ending block: */
    302	orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
    303			 LOOKUP_STOP_IP);
    304	if (!orc) {
    305		orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
    306		return;
    307	}
    308	orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
    309
    310	orc_init = true;
    311}
    312
    313unsigned long unwind_get_return_address(struct unwind_state *state)
    314{
    315	if (unwind_done(state))
    316		return 0;
    317
    318	return __kernel_text_address(state->ip) ? state->ip : 0;
    319}
    320EXPORT_SYMBOL_GPL(unwind_get_return_address);
    321
    322unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
    323{
    324	if (unwind_done(state))
    325		return NULL;
    326
    327	if (state->regs)
    328		return &state->regs->ip;
    329
    330	if (state->sp)
    331		return (unsigned long *)state->sp - 1;
    332
    333	return NULL;
    334}
    335
    336static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
    337			    size_t len)
    338{
    339	struct stack_info *info = &state->stack_info;
    340	void *addr = (void *)_addr;
    341
    342	if (on_stack(info, addr, len))
    343		return true;
    344
    345	return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
    346		on_stack(info, addr, len);
    347}
    348
    349static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
    350			    unsigned long *val)
    351{
    352	if (!stack_access_ok(state, addr, sizeof(long)))
    353		return false;
    354
    355	*val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
    356	return true;
    357}
    358
    359static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
    360			     unsigned long *ip, unsigned long *sp)
    361{
    362	struct pt_regs *regs = (struct pt_regs *)addr;
    363
    364	/* x86-32 support will be more complicated due to the &regs->sp hack */
    365	BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
    366
    367	if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
    368		return false;
    369
    370	*ip = READ_ONCE_NOCHECK(regs->ip);
    371	*sp = READ_ONCE_NOCHECK(regs->sp);
    372	return true;
    373}
    374
    375static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
    376				  unsigned long *ip, unsigned long *sp)
    377{
    378	struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
    379
    380	if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
    381		return false;
    382
    383	*ip = READ_ONCE_NOCHECK(regs->ip);
    384	*sp = READ_ONCE_NOCHECK(regs->sp);
    385	return true;
    386}
    387
    388/*
    389 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
    390 * value from state->regs.
    391 *
    392 * Otherwise, if state->regs just points to IRET regs, and the previous frame
    393 * had full regs, it's safe to get the value from the previous regs.  This can
    394 * happen when early/late IRQ entry code gets interrupted by an NMI.
    395 */
    396static bool get_reg(struct unwind_state *state, unsigned int reg_off,
    397		    unsigned long *val)
    398{
    399	unsigned int reg = reg_off/8;
    400
    401	if (!state->regs)
    402		return false;
    403
    404	if (state->full_regs) {
    405		*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
    406		return true;
    407	}
    408
    409	if (state->prev_regs) {
    410		*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
    411		return true;
    412	}
    413
    414	return false;
    415}
    416
    417bool unwind_next_frame(struct unwind_state *state)
    418{
    419	unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
    420	enum stack_type prev_type = state->stack_info.type;
    421	struct orc_entry *orc;
    422	bool indirect = false;
    423
    424	if (unwind_done(state))
    425		return false;
    426
    427	/* Don't let modules unload while we're reading their ORC data. */
    428	preempt_disable();
    429
    430	/* End-of-stack check for user tasks: */
    431	if (state->regs && user_mode(state->regs))
    432		goto the_end;
    433
    434	/*
    435	 * Find the orc_entry associated with the text address.
    436	 *
    437	 * For a call frame (as opposed to a signal frame), state->ip points to
    438	 * the instruction after the call.  That instruction's stack layout
    439	 * could be different from the call instruction's layout, for example
    440	 * if the call was to a noreturn function.  So get the ORC data for the
    441	 * call instruction itself.
    442	 */
    443	orc = orc_find(state->signal ? state->ip : state->ip - 1);
    444	if (!orc) {
    445		/*
    446		 * As a fallback, try to assume this code uses a frame pointer.
    447		 * This is useful for generated code, like BPF, which ORC
    448		 * doesn't know about.  This is just a guess, so the rest of
    449		 * the unwind is no longer considered reliable.
    450		 */
    451		orc = &orc_fp_entry;
    452		state->error = true;
    453	}
    454
    455	/* End-of-stack check for kernel threads: */
    456	if (orc->sp_reg == ORC_REG_UNDEFINED) {
    457		if (!orc->end)
    458			goto err;
    459
    460		goto the_end;
    461	}
    462
    463	/* Find the previous frame's stack: */
    464	switch (orc->sp_reg) {
    465	case ORC_REG_SP:
    466		sp = state->sp + orc->sp_offset;
    467		break;
    468
    469	case ORC_REG_BP:
    470		sp = state->bp + orc->sp_offset;
    471		break;
    472
    473	case ORC_REG_SP_INDIRECT:
    474		sp = state->sp;
    475		indirect = true;
    476		break;
    477
    478	case ORC_REG_BP_INDIRECT:
    479		sp = state->bp + orc->sp_offset;
    480		indirect = true;
    481		break;
    482
    483	case ORC_REG_R10:
    484		if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
    485			orc_warn_current("missing R10 value at %pB\n",
    486					 (void *)state->ip);
    487			goto err;
    488		}
    489		break;
    490
    491	case ORC_REG_R13:
    492		if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
    493			orc_warn_current("missing R13 value at %pB\n",
    494					 (void *)state->ip);
    495			goto err;
    496		}
    497		break;
    498
    499	case ORC_REG_DI:
    500		if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
    501			orc_warn_current("missing RDI value at %pB\n",
    502					 (void *)state->ip);
    503			goto err;
    504		}
    505		break;
    506
    507	case ORC_REG_DX:
    508		if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
    509			orc_warn_current("missing DX value at %pB\n",
    510					 (void *)state->ip);
    511			goto err;
    512		}
    513		break;
    514
    515	default:
    516		orc_warn("unknown SP base reg %d at %pB\n",
    517			 orc->sp_reg, (void *)state->ip);
    518		goto err;
    519	}
    520
    521	if (indirect) {
    522		if (!deref_stack_reg(state, sp, &sp))
    523			goto err;
    524
    525		if (orc->sp_reg == ORC_REG_SP_INDIRECT)
    526			sp += orc->sp_offset;
    527	}
    528
    529	/* Find IP, SP and possibly regs: */
    530	switch (orc->type) {
    531	case UNWIND_HINT_TYPE_CALL:
    532		ip_p = sp - sizeof(long);
    533
    534		if (!deref_stack_reg(state, ip_p, &state->ip))
    535			goto err;
    536
    537		state->ip = unwind_recover_ret_addr(state, state->ip,
    538						    (unsigned long *)ip_p);
    539		state->sp = sp;
    540		state->regs = NULL;
    541		state->prev_regs = NULL;
    542		state->signal = false;
    543		break;
    544
    545	case UNWIND_HINT_TYPE_REGS:
    546		if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
    547			orc_warn_current("can't access registers at %pB\n",
    548					 (void *)orig_ip);
    549			goto err;
    550		}
    551		/*
    552		 * There is a small chance to interrupt at the entry of
    553		 * arch_rethook_trampoline() where the ORC info doesn't exist.
    554		 * That point is right after the RET to arch_rethook_trampoline()
    555		 * which was modified return address.
    556		 * At that point, the @addr_p of the unwind_recover_rethook()
    557		 * (this has to point the address of the stack entry storing
    558		 * the modified return address) must be "SP - (a stack entry)"
    559		 * because SP is incremented by the RET.
    560		 */
    561		state->ip = unwind_recover_rethook(state, state->ip,
    562				(unsigned long *)(state->sp - sizeof(long)));
    563		state->regs = (struct pt_regs *)sp;
    564		state->prev_regs = NULL;
    565		state->full_regs = true;
    566		state->signal = true;
    567		break;
    568
    569	case UNWIND_HINT_TYPE_REGS_PARTIAL:
    570		if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
    571			orc_warn_current("can't access iret registers at %pB\n",
    572					 (void *)orig_ip);
    573			goto err;
    574		}
    575		/* See UNWIND_HINT_TYPE_REGS case comment. */
    576		state->ip = unwind_recover_rethook(state, state->ip,
    577				(unsigned long *)(state->sp - sizeof(long)));
    578
    579		if (state->full_regs)
    580			state->prev_regs = state->regs;
    581		state->regs = (void *)sp - IRET_FRAME_OFFSET;
    582		state->full_regs = false;
    583		state->signal = true;
    584		break;
    585
    586	default:
    587		orc_warn("unknown .orc_unwind entry type %d at %pB\n",
    588			 orc->type, (void *)orig_ip);
    589		goto err;
    590	}
    591
    592	/* Find BP: */
    593	switch (orc->bp_reg) {
    594	case ORC_REG_UNDEFINED:
    595		if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
    596			state->bp = tmp;
    597		break;
    598
    599	case ORC_REG_PREV_SP:
    600		if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
    601			goto err;
    602		break;
    603
    604	case ORC_REG_BP:
    605		if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
    606			goto err;
    607		break;
    608
    609	default:
    610		orc_warn("unknown BP base reg %d for ip %pB\n",
    611			 orc->bp_reg, (void *)orig_ip);
    612		goto err;
    613	}
    614
    615	/* Prevent a recursive loop due to bad ORC data: */
    616	if (state->stack_info.type == prev_type &&
    617	    on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
    618	    state->sp <= prev_sp) {
    619		orc_warn_current("stack going in the wrong direction? at %pB\n",
    620				 (void *)orig_ip);
    621		goto err;
    622	}
    623
    624	preempt_enable();
    625	return true;
    626
    627err:
    628	state->error = true;
    629
    630the_end:
    631	preempt_enable();
    632	state->stack_info.type = STACK_TYPE_UNKNOWN;
    633	return false;
    634}
    635EXPORT_SYMBOL_GPL(unwind_next_frame);
    636
    637void __unwind_start(struct unwind_state *state, struct task_struct *task,
    638		    struct pt_regs *regs, unsigned long *first_frame)
    639{
    640	memset(state, 0, sizeof(*state));
    641	state->task = task;
    642
    643	if (!orc_init)
    644		goto err;
    645
    646	/*
    647	 * Refuse to unwind the stack of a task while it's executing on another
    648	 * CPU.  This check is racy, but that's ok: the unwinder has other
    649	 * checks to prevent it from going off the rails.
    650	 */
    651	if (task_on_another_cpu(task))
    652		goto err;
    653
    654	if (regs) {
    655		if (user_mode(regs))
    656			goto the_end;
    657
    658		state->ip = regs->ip;
    659		state->sp = regs->sp;
    660		state->bp = regs->bp;
    661		state->regs = regs;
    662		state->full_regs = true;
    663		state->signal = true;
    664
    665	} else if (task == current) {
    666		asm volatile("lea (%%rip), %0\n\t"
    667			     "mov %%rsp, %1\n\t"
    668			     "mov %%rbp, %2\n\t"
    669			     : "=r" (state->ip), "=r" (state->sp),
    670			       "=r" (state->bp));
    671
    672	} else {
    673		struct inactive_task_frame *frame = (void *)task->thread.sp;
    674
    675		state->sp = task->thread.sp + sizeof(*frame);
    676		state->bp = READ_ONCE_NOCHECK(frame->bp);
    677		state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
    678		state->signal = (void *)state->ip == ret_from_fork;
    679	}
    680
    681	if (get_stack_info((unsigned long *)state->sp, state->task,
    682			   &state->stack_info, &state->stack_mask)) {
    683		/*
    684		 * We weren't on a valid stack.  It's possible that
    685		 * we overflowed a valid stack into a guard page.
    686		 * See if the next page up is valid so that we can
    687		 * generate some kind of backtrace if this happens.
    688		 */
    689		void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
    690		state->error = true;
    691		if (get_stack_info(next_page, state->task, &state->stack_info,
    692				   &state->stack_mask))
    693			return;
    694	}
    695
    696	/*
    697	 * The caller can provide the address of the first frame directly
    698	 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
    699	 * to start unwinding at.  Skip ahead until we reach it.
    700	 */
    701
    702	/* When starting from regs, skip the regs frame: */
    703	if (regs) {
    704		unwind_next_frame(state);
    705		return;
    706	}
    707
    708	/* Otherwise, skip ahead to the user-specified starting frame: */
    709	while (!unwind_done(state) &&
    710	       (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
    711			state->sp < (unsigned long)first_frame))
    712		unwind_next_frame(state);
    713
    714	return;
    715
    716err:
    717	state->error = true;
    718the_end:
    719	state->stack_info.type = STACK_TYPE_UNKNOWN;
    720}
    721EXPORT_SYMBOL_GPL(__unwind_start);