smp.c (35179B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Generic helpers for smp ipi calls 4 * 5 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/irq_work.h> 11#include <linux/rcupdate.h> 12#include <linux/rculist.h> 13#include <linux/kernel.h> 14#include <linux/export.h> 15#include <linux/percpu.h> 16#include <linux/init.h> 17#include <linux/interrupt.h> 18#include <linux/gfp.h> 19#include <linux/smp.h> 20#include <linux/cpu.h> 21#include <linux/sched.h> 22#include <linux/sched/idle.h> 23#include <linux/hypervisor.h> 24#include <linux/sched/clock.h> 25#include <linux/nmi.h> 26#include <linux/sched/debug.h> 27#include <linux/jump_label.h> 28 29#include "smpboot.h" 30#include "sched/smp.h" 31 32#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) 33 34#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 35union cfd_seq_cnt { 36 u64 val; 37 struct { 38 u64 src:16; 39 u64 dst:16; 40#define CFD_SEQ_NOCPU 0xffff 41 u64 type:4; 42#define CFD_SEQ_QUEUE 0 43#define CFD_SEQ_IPI 1 44#define CFD_SEQ_NOIPI 2 45#define CFD_SEQ_PING 3 46#define CFD_SEQ_PINGED 4 47#define CFD_SEQ_HANDLE 5 48#define CFD_SEQ_DEQUEUE 6 49#define CFD_SEQ_IDLE 7 50#define CFD_SEQ_GOTIPI 8 51#define CFD_SEQ_HDLEND 9 52 u64 cnt:28; 53 } u; 54}; 55 56static char *seq_type[] = { 57 [CFD_SEQ_QUEUE] = "queue", 58 [CFD_SEQ_IPI] = "ipi", 59 [CFD_SEQ_NOIPI] = "noipi", 60 [CFD_SEQ_PING] = "ping", 61 [CFD_SEQ_PINGED] = "pinged", 62 [CFD_SEQ_HANDLE] = "handle", 63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)", 64 [CFD_SEQ_IDLE] = "idle", 65 [CFD_SEQ_GOTIPI] = "gotipi", 66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)", 67}; 68 69struct cfd_seq_local { 70 u64 ping; 71 u64 pinged; 72 u64 handle; 73 u64 dequeue; 74 u64 idle; 75 u64 gotipi; 76 u64 hdlend; 77}; 78#endif 79 80struct cfd_percpu { 81 call_single_data_t csd; 82#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 83 u64 seq_queue; 84 u64 seq_ipi; 85 u64 seq_noipi; 86#endif 87}; 88 89struct call_function_data { 90 struct cfd_percpu __percpu *pcpu; 91 cpumask_var_t cpumask; 92 cpumask_var_t cpumask_ipi; 93}; 94 95static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); 96 97static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 98 99static void __flush_smp_call_function_queue(bool warn_cpu_offline); 100 101int smpcfd_prepare_cpu(unsigned int cpu) 102{ 103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 104 105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 106 cpu_to_node(cpu))) 107 return -ENOMEM; 108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 109 cpu_to_node(cpu))) { 110 free_cpumask_var(cfd->cpumask); 111 return -ENOMEM; 112 } 113 cfd->pcpu = alloc_percpu(struct cfd_percpu); 114 if (!cfd->pcpu) { 115 free_cpumask_var(cfd->cpumask); 116 free_cpumask_var(cfd->cpumask_ipi); 117 return -ENOMEM; 118 } 119 120 return 0; 121} 122 123int smpcfd_dead_cpu(unsigned int cpu) 124{ 125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); 126 127 free_cpumask_var(cfd->cpumask); 128 free_cpumask_var(cfd->cpumask_ipi); 129 free_percpu(cfd->pcpu); 130 return 0; 131} 132 133int smpcfd_dying_cpu(unsigned int cpu) 134{ 135 /* 136 * The IPIs for the smp-call-function callbacks queued by other 137 * CPUs might arrive late, either due to hardware latencies or 138 * because this CPU disabled interrupts (inside stop-machine) 139 * before the IPIs were sent. So flush out any pending callbacks 140 * explicitly (without waiting for the IPIs to arrive), to 141 * ensure that the outgoing CPU doesn't go offline with work 142 * still pending. 143 */ 144 __flush_smp_call_function_queue(false); 145 irq_work_run(); 146 return 0; 147} 148 149void __init call_function_init(void) 150{ 151 int i; 152 153 for_each_possible_cpu(i) 154 init_llist_head(&per_cpu(call_single_queue, i)); 155 156 smpcfd_prepare_cpu(smp_processor_id()); 157} 158 159#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 160 161static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled); 162static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended); 163 164static int __init csdlock_debug(char *str) 165{ 166 unsigned int val = 0; 167 168 if (str && !strcmp(str, "ext")) { 169 val = 1; 170 static_branch_enable(&csdlock_debug_extended); 171 } else 172 get_option(&str, &val); 173 174 if (val) 175 static_branch_enable(&csdlock_debug_enabled); 176 177 return 0; 178} 179early_param("csdlock_debug", csdlock_debug); 180 181static DEFINE_PER_CPU(call_single_data_t *, cur_csd); 182static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); 183static DEFINE_PER_CPU(void *, cur_csd_info); 184static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local); 185 186static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */ 187module_param(csd_lock_timeout, ulong, 0444); 188 189static atomic_t csd_bug_count = ATOMIC_INIT(0); 190static u64 cfd_seq; 191 192#define CFD_SEQ(s, d, t, c) \ 193 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c } 194 195static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type) 196{ 197 union cfd_seq_cnt new, old; 198 199 new = CFD_SEQ(src, dst, type, 0); 200 201 do { 202 old.val = READ_ONCE(cfd_seq); 203 new.u.cnt = old.u.cnt + 1; 204 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val); 205 206 return old.val; 207} 208 209#define cfd_seq_store(var, src, dst, type) \ 210 do { \ 211 if (static_branch_unlikely(&csdlock_debug_extended)) \ 212 var = cfd_seq_inc(src, dst, type); \ 213 } while (0) 214 215/* Record current CSD work for current CPU, NULL to erase. */ 216static void __csd_lock_record(struct __call_single_data *csd) 217{ 218 if (!csd) { 219 smp_mb(); /* NULL cur_csd after unlock. */ 220 __this_cpu_write(cur_csd, NULL); 221 return; 222 } 223 __this_cpu_write(cur_csd_func, csd->func); 224 __this_cpu_write(cur_csd_info, csd->info); 225 smp_wmb(); /* func and info before csd. */ 226 __this_cpu_write(cur_csd, csd); 227 smp_mb(); /* Update cur_csd before function call. */ 228 /* Or before unlock, as the case may be. */ 229} 230 231static __always_inline void csd_lock_record(struct __call_single_data *csd) 232{ 233 if (static_branch_unlikely(&csdlock_debug_enabled)) 234 __csd_lock_record(csd); 235} 236 237static int csd_lock_wait_getcpu(struct __call_single_data *csd) 238{ 239 unsigned int csd_type; 240 241 csd_type = CSD_TYPE(csd); 242 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) 243 return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ 244 return -1; 245} 246 247static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst, 248 unsigned int type, union cfd_seq_cnt *data, 249 unsigned int *n_data, unsigned int now) 250{ 251 union cfd_seq_cnt new[2]; 252 unsigned int i, j, k; 253 254 new[0].val = val; 255 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1); 256 257 for (i = 0; i < 2; i++) { 258 if (new[i].u.cnt <= now) 259 new[i].u.cnt |= 0x80000000U; 260 for (j = 0; j < *n_data; j++) { 261 if (new[i].u.cnt == data[j].u.cnt) { 262 /* Direct read value trumps generated one. */ 263 if (i == 0) 264 data[j].val = new[i].val; 265 break; 266 } 267 if (new[i].u.cnt < data[j].u.cnt) { 268 for (k = *n_data; k > j; k--) 269 data[k].val = data[k - 1].val; 270 data[j].val = new[i].val; 271 (*n_data)++; 272 break; 273 } 274 } 275 if (j == *n_data) { 276 data[j].val = new[i].val; 277 (*n_data)++; 278 } 279 } 280} 281 282static const char *csd_lock_get_type(unsigned int type) 283{ 284 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type]; 285} 286 287static void csd_lock_print_extended(struct __call_single_data *csd, int cpu) 288{ 289 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu); 290 unsigned int srccpu = csd->node.src; 291 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu); 292 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); 293 unsigned int now; 294 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)]; 295 unsigned int n_data = 0, i; 296 297 data[0].val = READ_ONCE(cfd_seq); 298 now = data[0].u.cnt; 299 300 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now); 301 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now); 302 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now); 303 304 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now); 305 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now); 306 307 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now); 308 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now); 309 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now); 310 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now); 311 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now); 312 313 for (i = 0; i < n_data; i++) { 314 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n", 315 data[i].u.cnt & ~0x80000000U, data[i].u.src, 316 data[i].u.dst, csd_lock_get_type(data[i].u.type)); 317 } 318 pr_alert("\tcsd: cnt now: %07x\n", now); 319} 320 321/* 322 * Complain if too much time spent waiting. Note that only 323 * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, 324 * so waiting on other types gets much less information. 325 */ 326static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id) 327{ 328 int cpu = -1; 329 int cpux; 330 bool firsttime; 331 u64 ts2, ts_delta; 332 call_single_data_t *cpu_cur_csd; 333 unsigned int flags = READ_ONCE(csd->node.u_flags); 334 unsigned long long csd_lock_timeout_ns = csd_lock_timeout * NSEC_PER_MSEC; 335 336 if (!(flags & CSD_FLAG_LOCK)) { 337 if (!unlikely(*bug_id)) 338 return true; 339 cpu = csd_lock_wait_getcpu(csd); 340 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n", 341 *bug_id, raw_smp_processor_id(), cpu); 342 return true; 343 } 344 345 ts2 = sched_clock(); 346 ts_delta = ts2 - *ts1; 347 if (likely(ts_delta <= csd_lock_timeout_ns || csd_lock_timeout_ns == 0)) 348 return false; 349 350 firsttime = !*bug_id; 351 if (firsttime) 352 *bug_id = atomic_inc_return(&csd_bug_count); 353 cpu = csd_lock_wait_getcpu(csd); 354 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) 355 cpux = 0; 356 else 357 cpux = cpu; 358 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ 359 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n", 360 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0, 361 cpu, csd->func, csd->info); 362 if (cpu_cur_csd && csd != cpu_cur_csd) { 363 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n", 364 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), 365 READ_ONCE(per_cpu(cur_csd_info, cpux))); 366 } else { 367 pr_alert("\tcsd: CSD lock (#%d) %s.\n", 368 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); 369 } 370 if (cpu >= 0) { 371 if (static_branch_unlikely(&csdlock_debug_extended)) 372 csd_lock_print_extended(csd, cpu); 373 if (!trigger_single_cpu_backtrace(cpu)) 374 dump_cpu_task(cpu); 375 if (!cpu_cur_csd) { 376 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); 377 arch_send_call_function_single_ipi(cpu); 378 } 379 } 380 dump_stack(); 381 *ts1 = ts2; 382 383 return false; 384} 385 386/* 387 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 388 * 389 * For non-synchronous ipi calls the csd can still be in use by the 390 * previous function call. For multi-cpu calls its even more interesting 391 * as we'll have to ensure no other cpu is observing our csd. 392 */ 393static void __csd_lock_wait(struct __call_single_data *csd) 394{ 395 int bug_id = 0; 396 u64 ts0, ts1; 397 398 ts1 = ts0 = sched_clock(); 399 for (;;) { 400 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id)) 401 break; 402 cpu_relax(); 403 } 404 smp_acquire__after_ctrl_dep(); 405} 406 407static __always_inline void csd_lock_wait(struct __call_single_data *csd) 408{ 409 if (static_branch_unlikely(&csdlock_debug_enabled)) { 410 __csd_lock_wait(csd); 411 return; 412 } 413 414 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); 415} 416 417static void __smp_call_single_queue_debug(int cpu, struct llist_node *node) 418{ 419 unsigned int this_cpu = smp_processor_id(); 420 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local); 421 struct call_function_data *cfd = this_cpu_ptr(&cfd_data); 422 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); 423 424 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); 425 if (llist_add(node, &per_cpu(call_single_queue, cpu))) { 426 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); 427 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING); 428 send_call_function_single_ipi(cpu); 429 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED); 430 } else { 431 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); 432 } 433} 434#else 435#define cfd_seq_store(var, src, dst, type) 436 437static void csd_lock_record(struct __call_single_data *csd) 438{ 439} 440 441static __always_inline void csd_lock_wait(struct __call_single_data *csd) 442{ 443 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); 444} 445#endif 446 447static __always_inline void csd_lock(struct __call_single_data *csd) 448{ 449 csd_lock_wait(csd); 450 csd->node.u_flags |= CSD_FLAG_LOCK; 451 452 /* 453 * prevent CPU from reordering the above assignment 454 * to ->flags with any subsequent assignments to other 455 * fields of the specified call_single_data_t structure: 456 */ 457 smp_wmb(); 458} 459 460static __always_inline void csd_unlock(struct __call_single_data *csd) 461{ 462 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); 463 464 /* 465 * ensure we're all done before releasing data: 466 */ 467 smp_store_release(&csd->node.u_flags, 0); 468} 469 470static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); 471 472void __smp_call_single_queue(int cpu, struct llist_node *node) 473{ 474#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 475 if (static_branch_unlikely(&csdlock_debug_extended)) { 476 unsigned int type; 477 478 type = CSD_TYPE(container_of(node, call_single_data_t, 479 node.llist)); 480 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) { 481 __smp_call_single_queue_debug(cpu, node); 482 return; 483 } 484 } 485#endif 486 487 /* 488 * The list addition should be visible before sending the IPI 489 * handler locks the list to pull the entry off it because of 490 * normal cache coherency rules implied by spinlocks. 491 * 492 * If IPIs can go out of order to the cache coherency protocol 493 * in an architecture, sufficient synchronisation should be added 494 * to arch code to make it appear to obey cache coherency WRT 495 * locking and barrier primitives. Generic code isn't really 496 * equipped to do the right thing... 497 */ 498 if (llist_add(node, &per_cpu(call_single_queue, cpu))) 499 send_call_function_single_ipi(cpu); 500} 501 502/* 503 * Insert a previously allocated call_single_data_t element 504 * for execution on the given CPU. data must already have 505 * ->func, ->info, and ->flags set. 506 */ 507static int generic_exec_single(int cpu, struct __call_single_data *csd) 508{ 509 if (cpu == smp_processor_id()) { 510 smp_call_func_t func = csd->func; 511 void *info = csd->info; 512 unsigned long flags; 513 514 /* 515 * We can unlock early even for the synchronous on-stack case, 516 * since we're doing this from the same CPU.. 517 */ 518 csd_lock_record(csd); 519 csd_unlock(csd); 520 local_irq_save(flags); 521 func(info); 522 csd_lock_record(NULL); 523 local_irq_restore(flags); 524 return 0; 525 } 526 527 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { 528 csd_unlock(csd); 529 return -ENXIO; 530 } 531 532 __smp_call_single_queue(cpu, &csd->node.llist); 533 534 return 0; 535} 536 537/** 538 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks 539 * 540 * Invoked by arch to handle an IPI for call function single. 541 * Must be called with interrupts disabled. 542 */ 543void generic_smp_call_function_single_interrupt(void) 544{ 545 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, 546 smp_processor_id(), CFD_SEQ_GOTIPI); 547 __flush_smp_call_function_queue(true); 548} 549 550/** 551 * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks 552 * 553 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an 554 * offline CPU. Skip this check if set to 'false'. 555 * 556 * Flush any pending smp-call-function callbacks queued on this CPU. This is 557 * invoked by the generic IPI handler, as well as by a CPU about to go offline, 558 * to ensure that all pending IPI callbacks are run before it goes completely 559 * offline. 560 * 561 * Loop through the call_single_queue and run all the queued callbacks. 562 * Must be called with interrupts disabled. 563 */ 564static void __flush_smp_call_function_queue(bool warn_cpu_offline) 565{ 566 call_single_data_t *csd, *csd_next; 567 struct llist_node *entry, *prev; 568 struct llist_head *head; 569 static bool warned; 570 571 lockdep_assert_irqs_disabled(); 572 573 head = this_cpu_ptr(&call_single_queue); 574 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU, 575 smp_processor_id(), CFD_SEQ_HANDLE); 576 entry = llist_del_all(head); 577 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue, 578 /* Special meaning of source cpu: 0 == queue empty */ 579 entry ? CFD_SEQ_NOCPU : 0, 580 smp_processor_id(), CFD_SEQ_DEQUEUE); 581 entry = llist_reverse_order(entry); 582 583 /* There shouldn't be any pending callbacks on an offline CPU. */ 584 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && 585 !warned && entry != NULL)) { 586 warned = true; 587 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 588 589 /* 590 * We don't have to use the _safe() variant here 591 * because we are not invoking the IPI handlers yet. 592 */ 593 llist_for_each_entry(csd, entry, node.llist) { 594 switch (CSD_TYPE(csd)) { 595 case CSD_TYPE_ASYNC: 596 case CSD_TYPE_SYNC: 597 case CSD_TYPE_IRQ_WORK: 598 pr_warn("IPI callback %pS sent to offline CPU\n", 599 csd->func); 600 break; 601 602 case CSD_TYPE_TTWU: 603 pr_warn("IPI task-wakeup sent to offline CPU\n"); 604 break; 605 606 default: 607 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n", 608 CSD_TYPE(csd)); 609 break; 610 } 611 } 612 } 613 614 /* 615 * First; run all SYNC callbacks, people are waiting for us. 616 */ 617 prev = NULL; 618 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { 619 /* Do we wait until *after* callback? */ 620 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { 621 smp_call_func_t func = csd->func; 622 void *info = csd->info; 623 624 if (prev) { 625 prev->next = &csd_next->node.llist; 626 } else { 627 entry = &csd_next->node.llist; 628 } 629 630 csd_lock_record(csd); 631 func(info); 632 csd_unlock(csd); 633 csd_lock_record(NULL); 634 } else { 635 prev = &csd->node.llist; 636 } 637 } 638 639 if (!entry) { 640 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, 641 0, smp_processor_id(), 642 CFD_SEQ_HDLEND); 643 return; 644 } 645 646 /* 647 * Second; run all !SYNC callbacks. 648 */ 649 prev = NULL; 650 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { 651 int type = CSD_TYPE(csd); 652 653 if (type != CSD_TYPE_TTWU) { 654 if (prev) { 655 prev->next = &csd_next->node.llist; 656 } else { 657 entry = &csd_next->node.llist; 658 } 659 660 if (type == CSD_TYPE_ASYNC) { 661 smp_call_func_t func = csd->func; 662 void *info = csd->info; 663 664 csd_lock_record(csd); 665 csd_unlock(csd); 666 func(info); 667 csd_lock_record(NULL); 668 } else if (type == CSD_TYPE_IRQ_WORK) { 669 irq_work_single(csd); 670 } 671 672 } else { 673 prev = &csd->node.llist; 674 } 675 } 676 677 /* 678 * Third; only CSD_TYPE_TTWU is left, issue those. 679 */ 680 if (entry) 681 sched_ttwu_pending(entry); 682 683 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU, 684 smp_processor_id(), CFD_SEQ_HDLEND); 685} 686 687 688/** 689 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks 690 * from task context (idle, migration thread) 691 * 692 * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it 693 * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by 694 * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to 695 * handle queued SMP function calls before scheduling. 696 * 697 * The migration thread has to ensure that an eventually pending wakeup has 698 * been handled before it migrates a task. 699 */ 700void flush_smp_call_function_queue(void) 701{ 702 unsigned int was_pending; 703 unsigned long flags; 704 705 if (llist_empty(this_cpu_ptr(&call_single_queue))) 706 return; 707 708 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, 709 smp_processor_id(), CFD_SEQ_IDLE); 710 local_irq_save(flags); 711 /* Get the already pending soft interrupts for RT enabled kernels */ 712 was_pending = local_softirq_pending(); 713 __flush_smp_call_function_queue(true); 714 if (local_softirq_pending()) 715 do_softirq_post_smp_call_flush(was_pending); 716 717 local_irq_restore(flags); 718} 719 720/* 721 * smp_call_function_single - Run a function on a specific CPU 722 * @func: The function to run. This must be fast and non-blocking. 723 * @info: An arbitrary pointer to pass to the function. 724 * @wait: If true, wait until function has completed on other CPUs. 725 * 726 * Returns 0 on success, else a negative status code. 727 */ 728int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 729 int wait) 730{ 731 call_single_data_t *csd; 732 call_single_data_t csd_stack = { 733 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, 734 }; 735 int this_cpu; 736 int err; 737 738 /* 739 * prevent preemption and reschedule on another processor, 740 * as well as CPU removal 741 */ 742 this_cpu = get_cpu(); 743 744 /* 745 * Can deadlock when called with interrupts disabled. 746 * We allow cpu's that are not yet online though, as no one else can 747 * send smp call function interrupt to this cpu and as such deadlocks 748 * can't happen. 749 */ 750 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 751 && !oops_in_progress); 752 753 /* 754 * When @wait we can deadlock when we interrupt between llist_add() and 755 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to 756 * csd_lock() on because the interrupt context uses the same csd 757 * storage. 758 */ 759 WARN_ON_ONCE(!in_task()); 760 761 csd = &csd_stack; 762 if (!wait) { 763 csd = this_cpu_ptr(&csd_data); 764 csd_lock(csd); 765 } 766 767 csd->func = func; 768 csd->info = info; 769#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 770 csd->node.src = smp_processor_id(); 771 csd->node.dst = cpu; 772#endif 773 774 err = generic_exec_single(cpu, csd); 775 776 if (wait) 777 csd_lock_wait(csd); 778 779 put_cpu(); 780 781 return err; 782} 783EXPORT_SYMBOL(smp_call_function_single); 784 785/** 786 * smp_call_function_single_async() - Run an asynchronous function on a 787 * specific CPU. 788 * @cpu: The CPU to run on. 789 * @csd: Pre-allocated and setup data structure 790 * 791 * Like smp_call_function_single(), but the call is asynchonous and 792 * can thus be done from contexts with disabled interrupts. 793 * 794 * The caller passes his own pre-allocated data structure 795 * (ie: embedded in an object) and is responsible for synchronizing it 796 * such that the IPIs performed on the @csd are strictly serialized. 797 * 798 * If the function is called with one csd which has not yet been 799 * processed by previous call to smp_call_function_single_async(), the 800 * function will return immediately with -EBUSY showing that the csd 801 * object is still in progress. 802 * 803 * NOTE: Be careful, there is unfortunately no current debugging facility to 804 * validate the correctness of this serialization. 805 * 806 * Return: %0 on success or negative errno value on error 807 */ 808int smp_call_function_single_async(int cpu, struct __call_single_data *csd) 809{ 810 int err = 0; 811 812 preempt_disable(); 813 814 if (csd->node.u_flags & CSD_FLAG_LOCK) { 815 err = -EBUSY; 816 goto out; 817 } 818 819 csd->node.u_flags = CSD_FLAG_LOCK; 820 smp_wmb(); 821 822 err = generic_exec_single(cpu, csd); 823 824out: 825 preempt_enable(); 826 827 return err; 828} 829EXPORT_SYMBOL_GPL(smp_call_function_single_async); 830 831/* 832 * smp_call_function_any - Run a function on any of the given cpus 833 * @mask: The mask of cpus it can run on. 834 * @func: The function to run. This must be fast and non-blocking. 835 * @info: An arbitrary pointer to pass to the function. 836 * @wait: If true, wait until function has completed. 837 * 838 * Returns 0 on success, else a negative status code (if no cpus were online). 839 * 840 * Selection preference: 841 * 1) current cpu if in @mask 842 * 2) any cpu of current node if in @mask 843 * 3) any other online cpu in @mask 844 */ 845int smp_call_function_any(const struct cpumask *mask, 846 smp_call_func_t func, void *info, int wait) 847{ 848 unsigned int cpu; 849 const struct cpumask *nodemask; 850 int ret; 851 852 /* Try for same CPU (cheapest) */ 853 cpu = get_cpu(); 854 if (cpumask_test_cpu(cpu, mask)) 855 goto call; 856 857 /* Try for same node. */ 858 nodemask = cpumask_of_node(cpu_to_node(cpu)); 859 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; 860 cpu = cpumask_next_and(cpu, nodemask, mask)) { 861 if (cpu_online(cpu)) 862 goto call; 863 } 864 865 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ 866 cpu = cpumask_any_and(mask, cpu_online_mask); 867call: 868 ret = smp_call_function_single(cpu, func, info, wait); 869 put_cpu(); 870 return ret; 871} 872EXPORT_SYMBOL_GPL(smp_call_function_any); 873 874/* 875 * Flags to be used as scf_flags argument of smp_call_function_many_cond(). 876 * 877 * %SCF_WAIT: Wait until function execution is completed 878 * %SCF_RUN_LOCAL: Run also locally if local cpu is set in cpumask 879 */ 880#define SCF_WAIT (1U << 0) 881#define SCF_RUN_LOCAL (1U << 1) 882 883static void smp_call_function_many_cond(const struct cpumask *mask, 884 smp_call_func_t func, void *info, 885 unsigned int scf_flags, 886 smp_cond_func_t cond_func) 887{ 888 int cpu, last_cpu, this_cpu = smp_processor_id(); 889 struct call_function_data *cfd; 890 bool wait = scf_flags & SCF_WAIT; 891 bool run_remote = false; 892 bool run_local = false; 893 int nr_cpus = 0; 894 895 lockdep_assert_preemption_disabled(); 896 897 /* 898 * Can deadlock when called with interrupts disabled. 899 * We allow cpu's that are not yet online though, as no one else can 900 * send smp call function interrupt to this cpu and as such deadlocks 901 * can't happen. 902 */ 903 if (cpu_online(this_cpu) && !oops_in_progress && 904 !early_boot_irqs_disabled) 905 lockdep_assert_irqs_enabled(); 906 907 /* 908 * When @wait we can deadlock when we interrupt between llist_add() and 909 * arch_send_call_function_ipi*(); when !@wait we can deadlock due to 910 * csd_lock() on because the interrupt context uses the same csd 911 * storage. 912 */ 913 WARN_ON_ONCE(!in_task()); 914 915 /* Check if we need local execution. */ 916 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) 917 run_local = true; 918 919 /* Check if we need remote execution, i.e., any CPU excluding this one. */ 920 cpu = cpumask_first_and(mask, cpu_online_mask); 921 if (cpu == this_cpu) 922 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 923 if (cpu < nr_cpu_ids) 924 run_remote = true; 925 926 if (run_remote) { 927 cfd = this_cpu_ptr(&cfd_data); 928 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 929 __cpumask_clear_cpu(this_cpu, cfd->cpumask); 930 931 cpumask_clear(cfd->cpumask_ipi); 932 for_each_cpu(cpu, cfd->cpumask) { 933 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu); 934 call_single_data_t *csd = &pcpu->csd; 935 936 if (cond_func && !cond_func(cpu, info)) 937 continue; 938 939 csd_lock(csd); 940 if (wait) 941 csd->node.u_flags |= CSD_TYPE_SYNC; 942 csd->func = func; 943 csd->info = info; 944#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG 945 csd->node.src = smp_processor_id(); 946 csd->node.dst = cpu; 947#endif 948 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE); 949 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) { 950 __cpumask_set_cpu(cpu, cfd->cpumask_ipi); 951 nr_cpus++; 952 last_cpu = cpu; 953 954 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI); 955 } else { 956 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI); 957 } 958 } 959 960 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING); 961 962 /* 963 * Choose the most efficient way to send an IPI. Note that the 964 * number of CPUs might be zero due to concurrent changes to the 965 * provided mask. 966 */ 967 if (nr_cpus == 1) 968 send_call_function_single_ipi(last_cpu); 969 else if (likely(nr_cpus > 1)) 970 arch_send_call_function_ipi_mask(cfd->cpumask_ipi); 971 972 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED); 973 } 974 975 if (run_local && (!cond_func || cond_func(this_cpu, info))) { 976 unsigned long flags; 977 978 local_irq_save(flags); 979 func(info); 980 local_irq_restore(flags); 981 } 982 983 if (run_remote && wait) { 984 for_each_cpu(cpu, cfd->cpumask) { 985 call_single_data_t *csd; 986 987 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd; 988 csd_lock_wait(csd); 989 } 990 } 991} 992 993/** 994 * smp_call_function_many(): Run a function on a set of CPUs. 995 * @mask: The set of cpus to run on (only runs on online subset). 996 * @func: The function to run. This must be fast and non-blocking. 997 * @info: An arbitrary pointer to pass to the function. 998 * @wait: Bitmask that controls the operation. If %SCF_WAIT is set, wait 999 * (atomically) until function has completed on other CPUs. If 1000 * %SCF_RUN_LOCAL is set, the function will also be run locally 1001 * if the local CPU is set in the @cpumask. 1002 * 1003 * If @wait is true, then returns once @func has returned. 1004 * 1005 * You must not call this function with disabled interrupts or from a 1006 * hardware interrupt handler or from a bottom half handler. Preemption 1007 * must be disabled when calling this function. 1008 */ 1009void smp_call_function_many(const struct cpumask *mask, 1010 smp_call_func_t func, void *info, bool wait) 1011{ 1012 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL); 1013} 1014EXPORT_SYMBOL(smp_call_function_many); 1015 1016/** 1017 * smp_call_function(): Run a function on all other CPUs. 1018 * @func: The function to run. This must be fast and non-blocking. 1019 * @info: An arbitrary pointer to pass to the function. 1020 * @wait: If true, wait (atomically) until function has completed 1021 * on other CPUs. 1022 * 1023 * Returns 0. 1024 * 1025 * If @wait is true, then returns once @func has returned; otherwise 1026 * it returns just before the target cpu calls @func. 1027 * 1028 * You must not call this function with disabled interrupts or from a 1029 * hardware interrupt handler or from a bottom half handler. 1030 */ 1031void smp_call_function(smp_call_func_t func, void *info, int wait) 1032{ 1033 preempt_disable(); 1034 smp_call_function_many(cpu_online_mask, func, info, wait); 1035 preempt_enable(); 1036} 1037EXPORT_SYMBOL(smp_call_function); 1038 1039/* Setup configured maximum number of CPUs to activate */ 1040unsigned int setup_max_cpus = NR_CPUS; 1041EXPORT_SYMBOL(setup_max_cpus); 1042 1043 1044/* 1045 * Setup routine for controlling SMP activation 1046 * 1047 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP 1048 * activation entirely (the MPS table probe still happens, though). 1049 * 1050 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer 1051 * greater than 0, limits the maximum number of CPUs activated in 1052 * SMP mode to <NUM>. 1053 */ 1054 1055void __weak arch_disable_smp_support(void) { } 1056 1057static int __init nosmp(char *str) 1058{ 1059 setup_max_cpus = 0; 1060 arch_disable_smp_support(); 1061 1062 return 0; 1063} 1064 1065early_param("nosmp", nosmp); 1066 1067/* this is hard limit */ 1068static int __init nrcpus(char *str) 1069{ 1070 int nr_cpus; 1071 1072 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) 1073 nr_cpu_ids = nr_cpus; 1074 1075 return 0; 1076} 1077 1078early_param("nr_cpus", nrcpus); 1079 1080static int __init maxcpus(char *str) 1081{ 1082 get_option(&str, &setup_max_cpus); 1083 if (setup_max_cpus == 0) 1084 arch_disable_smp_support(); 1085 1086 return 0; 1087} 1088 1089early_param("maxcpus", maxcpus); 1090 1091/* Setup number of possible processor ids */ 1092unsigned int nr_cpu_ids __read_mostly = NR_CPUS; 1093EXPORT_SYMBOL(nr_cpu_ids); 1094 1095/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ 1096void __init setup_nr_cpu_ids(void) 1097{ 1098 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; 1099} 1100 1101/* Called by boot processor to activate the rest. */ 1102void __init smp_init(void) 1103{ 1104 int num_nodes, num_cpus; 1105 1106 idle_threads_init(); 1107 cpuhp_threads_init(); 1108 1109 pr_info("Bringing up secondary CPUs ...\n"); 1110 1111 bringup_nonboot_cpus(setup_max_cpus); 1112 1113 num_nodes = num_online_nodes(); 1114 num_cpus = num_online_cpus(); 1115 pr_info("Brought up %d node%s, %d CPU%s\n", 1116 num_nodes, (num_nodes > 1 ? "s" : ""), 1117 num_cpus, (num_cpus > 1 ? "s" : "")); 1118 1119 /* Any cleanup work */ 1120 smp_cpus_done(setup_max_cpus); 1121} 1122 1123/* 1124 * on_each_cpu_cond(): Call a function on each processor for which 1125 * the supplied function cond_func returns true, optionally waiting 1126 * for all the required CPUs to finish. This may include the local 1127 * processor. 1128 * @cond_func: A callback function that is passed a cpu id and 1129 * the info parameter. The function is called 1130 * with preemption disabled. The function should 1131 * return a blooean value indicating whether to IPI 1132 * the specified CPU. 1133 * @func: The function to run on all applicable CPUs. 1134 * This must be fast and non-blocking. 1135 * @info: An arbitrary pointer to pass to both functions. 1136 * @wait: If true, wait (atomically) until function has 1137 * completed on other CPUs. 1138 * 1139 * Preemption is disabled to protect against CPUs going offline but not online. 1140 * CPUs going online during the call will not be seen or sent an IPI. 1141 * 1142 * You must not call this function with disabled interrupts or 1143 * from a hardware interrupt handler or from a bottom half handler. 1144 */ 1145void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, 1146 void *info, bool wait, const struct cpumask *mask) 1147{ 1148 unsigned int scf_flags = SCF_RUN_LOCAL; 1149 1150 if (wait) 1151 scf_flags |= SCF_WAIT; 1152 1153 preempt_disable(); 1154 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func); 1155 preempt_enable(); 1156} 1157EXPORT_SYMBOL(on_each_cpu_cond_mask); 1158 1159static void do_nothing(void *unused) 1160{ 1161} 1162 1163/** 1164 * kick_all_cpus_sync - Force all cpus out of idle 1165 * 1166 * Used to synchronize the update of pm_idle function pointer. It's 1167 * called after the pointer is updated and returns after the dummy 1168 * callback function has been executed on all cpus. The execution of 1169 * the function can only happen on the remote cpus after they have 1170 * left the idle function which had been called via pm_idle function 1171 * pointer. So it's guaranteed that nothing uses the previous pointer 1172 * anymore. 1173 */ 1174void kick_all_cpus_sync(void) 1175{ 1176 /* Make sure the change is visible before we kick the cpus */ 1177 smp_mb(); 1178 smp_call_function(do_nothing, NULL, 1); 1179} 1180EXPORT_SYMBOL_GPL(kick_all_cpus_sync); 1181 1182/** 1183 * wake_up_all_idle_cpus - break all cpus out of idle 1184 * wake_up_all_idle_cpus try to break all cpus which is in idle state even 1185 * including idle polling cpus, for non-idle cpus, we will do nothing 1186 * for them. 1187 */ 1188void wake_up_all_idle_cpus(void) 1189{ 1190 int cpu; 1191 1192 for_each_possible_cpu(cpu) { 1193 preempt_disable(); 1194 if (cpu != smp_processor_id() && cpu_online(cpu)) 1195 wake_up_if_idle(cpu); 1196 preempt_enable(); 1197 } 1198} 1199EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); 1200 1201/** 1202 * struct smp_call_on_cpu_struct - Call a function on a specific CPU 1203 * @work: &work_struct 1204 * @done: &completion to signal 1205 * @func: function to call 1206 * @data: function's data argument 1207 * @ret: return value from @func 1208 * @cpu: target CPU (%-1 for any CPU) 1209 * 1210 * Used to call a function on a specific cpu and wait for it to return. 1211 * Optionally make sure the call is done on a specified physical cpu via vcpu 1212 * pinning in order to support virtualized environments. 1213 */ 1214struct smp_call_on_cpu_struct { 1215 struct work_struct work; 1216 struct completion done; 1217 int (*func)(void *); 1218 void *data; 1219 int ret; 1220 int cpu; 1221}; 1222 1223static void smp_call_on_cpu_callback(struct work_struct *work) 1224{ 1225 struct smp_call_on_cpu_struct *sscs; 1226 1227 sscs = container_of(work, struct smp_call_on_cpu_struct, work); 1228 if (sscs->cpu >= 0) 1229 hypervisor_pin_vcpu(sscs->cpu); 1230 sscs->ret = sscs->func(sscs->data); 1231 if (sscs->cpu >= 0) 1232 hypervisor_pin_vcpu(-1); 1233 1234 complete(&sscs->done); 1235} 1236 1237int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) 1238{ 1239 struct smp_call_on_cpu_struct sscs = { 1240 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), 1241 .func = func, 1242 .data = par, 1243 .cpu = phys ? cpu : -1, 1244 }; 1245 1246 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); 1247 1248 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 1249 return -ENXIO; 1250 1251 queue_work_on(cpu, system_wq, &sscs.work); 1252 wait_for_completion(&sscs.done); 1253 1254 return sscs.ret; 1255} 1256EXPORT_SYMBOL_GPL(smp_call_on_cpu);