From e162b39a368f0401e41b558f430c354d12a85b37 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Thu, 15 Jan 2009 11:08:40 -0800 Subject: softlockup: decouple hung tasks check from softlockup detection Decoupling allows: * hung tasks check to happen at very low priority * hung tasks check and softlockup to be enabled/disabled independently at compile and/or run-time * individual panic settings to be enabled disabled independently at compile and/or run-time * softlockup threshold to be reduced without increasing hung tasks poll frequency (hung task check is expensive relative to softlock watchdog) * hung task check to be zero over-head when disabled at run-time Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- kernel/Makefile | 1 + kernel/hung_task.c | 198 ++++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/softlockup.c | 100 -------------------------- kernel/sysctl.c | 15 +++- 4 files changed, 213 insertions(+), 101 deletions(-) create mode 100644 kernel/hung_task.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index 2aebc4cd7878..979745f1b4bc 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -74,6 +74,7 @@ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o +obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o diff --git a/kernel/hung_task.c b/kernel/hung_task.c new file mode 100644 index 000000000000..ba5a77cad3bb --- /dev/null +++ b/kernel/hung_task.c @@ -0,0 +1,198 @@ +/* + * Detect Hung Task + * + * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Have a reasonable limit on the number of tasks checked: + */ +unsigned long __read_mostly sysctl_hung_task_check_count = 1024; + +/* + * Zero means infinite timeout - no checking done: + */ +unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; +static unsigned long __read_mostly hung_task_poll_jiffies; + +unsigned long __read_mostly sysctl_hung_task_warnings = 10; + +static int __read_mostly did_panic; + +static struct task_struct *watchdog_task; + +/* + * Should we panic (and reboot, if panic_timeout= is set) when a + * hung task is detected: + */ +unsigned int __read_mostly sysctl_hung_task_panic = + CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE; + +static int __init hung_task_panic_setup(char *str) +{ + sysctl_hung_task_panic = simple_strtoul(str, NULL, 0); + + return 1; +} +__setup("hung_task_panic=", hung_task_panic_setup); + +static int +hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr) +{ + did_panic = 1; + + return NOTIFY_DONE; +} + +static struct notifier_block panic_block = { + .notifier_call = hung_task_panic, +}; + +/* + * Returns seconds, approximately. We don't need nanosecond + * resolution, and we don't need to waste time with a big divide when + * 2^30ns == 1.074s. + */ +static unsigned long get_timestamp(void) +{ + int this_cpu = raw_smp_processor_id(); + + return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ +} + +static void check_hung_task(struct task_struct *t, unsigned long now) +{ + unsigned long switch_count = t->nvcsw + t->nivcsw; + + if (t->flags & PF_FROZEN) + return; + + if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { + t->last_switch_count = switch_count; + t->last_switch_timestamp = now; + return; + } + if ((long)(now - t->last_switch_timestamp) < + sysctl_hung_task_timeout_secs) + return; + if (!sysctl_hung_task_warnings) + return; + sysctl_hung_task_warnings--; + + /* + * Ok, the task did not get scheduled for more than 2 minutes, + * complain: + */ + printk(KERN_ERR "INFO: task %s:%d blocked for more than " + "%ld seconds.\n", t->comm, t->pid, + sysctl_hung_task_timeout_secs); + printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" + " disables this message.\n"); + sched_show_task(t); + __debug_show_held_locks(t); + + t->last_switch_timestamp = now; + touch_nmi_watchdog(); + + if (sysctl_hung_task_panic) + panic("hung_task: blocked tasks"); +} + +/* + * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for + * a really long time (120 seconds). If that happens, print out + * a warning. + */ +static void check_hung_uninterruptible_tasks(void) +{ + int max_count = sysctl_hung_task_check_count; + unsigned long now = get_timestamp(); + struct task_struct *g, *t; + + /* + * If the system crashed already then all bets are off, + * do not report extra hung tasks: + */ + if (test_taint(TAINT_DIE) || did_panic) + return; + + read_lock(&tasklist_lock); + do_each_thread(g, t) { + if (!--max_count) + goto unlock; + /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ + if (t->state == TASK_UNINTERRUPTIBLE) + check_hung_task(t, now); + } while_each_thread(g, t); + unlock: + read_unlock(&tasklist_lock); +} + +static void update_poll_jiffies(void) +{ + /* timeout of 0 will disable the watchdog */ + if (sysctl_hung_task_timeout_secs == 0) + hung_task_poll_jiffies = MAX_SCHEDULE_TIMEOUT; + else + hung_task_poll_jiffies = sysctl_hung_task_timeout_secs * HZ / 2; +} + +/* + * Process updating of timeout sysctl + */ +int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); + + if (ret || !write) + goto out; + + update_poll_jiffies(); + + wake_up_process(watchdog_task); + + out: + return ret; +} + +/* + * kthread which checks for tasks stuck in D state + */ +static int watchdog(void *dummy) +{ + set_user_nice(current, 0); + update_poll_jiffies(); + + for ( ; ; ) { + while (schedule_timeout_interruptible(hung_task_poll_jiffies)); + check_hung_uninterruptible_tasks(); + } + + return 0; +} + +static int __init hung_task_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &panic_block); + watchdog_task = kthread_run(watchdog, NULL, "khungtaskd"); + + return 0; +} + +module_init(hung_task_init); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 85d5a2455103..88796c330838 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -165,98 +165,12 @@ void softlockup_tick(void) panic("softlockup: hung tasks"); } -/* - * Have a reasonable limit on the number of tasks checked: - */ -unsigned long __read_mostly sysctl_hung_task_check_count = 1024; - -/* - * Zero means infinite timeout - no checking done: - */ -unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480; - -unsigned long __read_mostly sysctl_hung_task_warnings = 10; - -/* - * Only do the hung-tasks check on one CPU: - */ -static int check_cpu __read_mostly = -1; - -static void check_hung_task(struct task_struct *t, unsigned long now) -{ - unsigned long switch_count = t->nvcsw + t->nivcsw; - - if (t->flags & PF_FROZEN) - return; - - if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { - t->last_switch_count = switch_count; - t->last_switch_timestamp = now; - return; - } - if ((long)(now - t->last_switch_timestamp) < - sysctl_hung_task_timeout_secs) - return; - if (!sysctl_hung_task_warnings) - return; - sysctl_hung_task_warnings--; - - /* - * Ok, the task did not get scheduled for more than 2 minutes, - * complain: - */ - printk(KERN_ERR "INFO: task %s:%d blocked for more than " - "%ld seconds.\n", t->comm, t->pid, - sysctl_hung_task_timeout_secs); - printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" - " disables this message.\n"); - sched_show_task(t); - __debug_show_held_locks(t); - - t->last_switch_timestamp = now; - touch_nmi_watchdog(); - - if (softlockup_panic) - panic("softlockup: blocked tasks"); -} - -/* - * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for - * a really long time (120 seconds). If that happens, print out - * a warning. - */ -static void check_hung_uninterruptible_tasks(int this_cpu) -{ - int max_count = sysctl_hung_task_check_count; - unsigned long now = get_timestamp(this_cpu); - struct task_struct *g, *t; - - /* - * If the system crashed already then all bets are off, - * do not report extra hung tasks: - */ - if (test_taint(TAINT_DIE) || did_panic) - return; - - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (!--max_count) - goto unlock; - /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ - if (t->state == TASK_UNINTERRUPTIBLE) - check_hung_task(t, now); - } while_each_thread(g, t); - unlock: - read_unlock(&tasklist_lock); -} - /* * The watchdog thread - runs every second and touches the timestamp. */ static int watchdog(void *__bind_cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - int this_cpu = (long)__bind_cpu; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -276,11 +190,6 @@ static int watchdog(void *__bind_cpu) if (kthread_should_stop()) break; - if (this_cpu == check_cpu) { - if (sysctl_hung_task_timeout_secs) - check_hung_uninterruptible_tasks(this_cpu); - } - set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); @@ -312,18 +221,9 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - check_cpu = cpumask_any(cpu_online_mask); wake_up_process(per_cpu(watchdog_task, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - if (hotcpu == check_cpu) { - /* Pick any other online cpu. */ - check_cpu = cpumask_any_but(cpu_online_mask, hotcpu); - } - break; - case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: if (!per_cpu(watchdog_task, hotcpu)) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 596dc31a7116..2481ed30d2b5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -805,6 +805,19 @@ static struct ctl_table kern_table[] = { .extra1 = &neg_one, .extra2 = &sixty, }, +#endif +#ifdef CONFIG_DETECT_HUNG_TASK + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hung_task_panic", + .data = &sysctl_hung_task_panic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one, + }, { .ctl_name = CTL_UNNUMBERED, .procname = "hung_task_check_count", @@ -820,7 +833,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = &proc_dohung_task_timeout_secs, .strategy = &sysctl_intvec, }, { -- cgit v1.2.3-71-gd317 From af432eb1cc3178ec7109aca2283aafb1c12ccac1 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Fri, 16 Jan 2009 09:09:38 -0800 Subject: softlockup: fix to allow compiling with !DETECT_HUNG_TASK Fixes the following compile error: kernel/fork.c:1049: error: 'struct task_struct' has no member named 'last_switch_count' kernel/fork.c:1050: error: 'struct task_struct' has no member named 'last_switch_timestamp' Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 1d68f1255dd8..fb9444282836 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1041,7 +1041,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->default_timer_slack_ns = current->timer_slack_ns; -#ifdef CONFIG_DETECT_SOFTLOCKUP +#ifdef CONFIG_DETECT_HUNG_TASK p->last_switch_count = 0; p->last_switch_timestamp = 0; #endif -- cgit v1.2.3-71-gd317 From 603a148f434742fff08273207ffa44176cad13a1 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Sat, 17 Jan 2009 10:31:48 -0800 Subject: softlockup: fix potential race in hung_task when resetting timeout Impact: fix potential false panic A potential race exists if sysctl_hung_task_timeout_secs is reset to 0 while inside check_hung_uniterruptible_tasks(). If check_task() is entered, a comparison with 0 will result in a false hung_task being detected. If sysctl_hung_task_panic is set, the system will panic. Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index ba5a77cad3bb..ba8ccd432963 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -72,7 +72,8 @@ static unsigned long get_timestamp(void) return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } -static void check_hung_task(struct task_struct *t, unsigned long now) +static void check_hung_task(struct task_struct *t, unsigned long now, + unsigned long timeout) { unsigned long switch_count = t->nvcsw + t->nivcsw; @@ -84,8 +85,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now) t->last_switch_timestamp = now; return; } - if ((long)(now - t->last_switch_timestamp) < - sysctl_hung_task_timeout_secs) + if ((long)(now - t->last_switch_timestamp) < timeout) return; if (!sysctl_hung_task_warnings) return; @@ -96,8 +96,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now) * complain: */ printk(KERN_ERR "INFO: task %s:%d blocked for more than " - "%ld seconds.\n", t->comm, t->pid, - sysctl_hung_task_timeout_secs); + "%ld seconds.\n", t->comm, t->pid, timeout); printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" " disables this message.\n"); sched_show_task(t); @@ -115,7 +114,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now) * a really long time (120 seconds). If that happens, print out * a warning. */ -static void check_hung_uninterruptible_tasks(void) +static void check_hung_uninterruptible_tasks(unsigned long timeout) { int max_count = sysctl_hung_task_check_count; unsigned long now = get_timestamp(); @@ -134,7 +133,7 @@ static void check_hung_uninterruptible_tasks(void) goto unlock; /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) - check_hung_task(t, now); + check_hung_task(t, now, timeout); } while_each_thread(g, t); unlock: read_unlock(&tasklist_lock); @@ -180,8 +179,17 @@ static int watchdog(void *dummy) update_poll_jiffies(); for ( ; ; ) { + unsigned long timeout; + while (schedule_timeout_interruptible(hung_task_poll_jiffies)); - check_hung_uninterruptible_tasks(); + + /* + * Need to cache timeout here to avoid timeout being set + * to 0 via sysctl while inside check_hung_*_tasks(). + */ + timeout = sysctl_hung_task_timeout_secs; + if (timeout) + check_hung_uninterruptible_tasks(timeout); } return 0; -- cgit v1.2.3-71-gd317 From ce9dbe244bf2063c41792e40dae7745957b118e0 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Wed, 4 Feb 2009 20:35:48 -0800 Subject: softlockup: check all tasks in hung_task MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Impact: extend the scope of hung-task checks Changed the default value of hung_task_check_count to PID_MAX_LIMIT. hung_task_batch_count added to put an upper bound on the critical section. Every hung_task_batch_count checks, the rcu lock is never held for a too long time. Keeping the critical section small minimizes time preemption is disabled and keeps rcu grace periods small. To prevent following a stale pointer, get_task_struct is called on g and t. To verify that g and t have not been unhashed while outside the critical section, the task states are checked. The design was proposed by Frédéric Weisbecker. Signed-off-by: Mandeep Singh Baines Suggested-by: Frédéric Weisbecker Acked-by: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index ba8ccd432963..481ca8b5c2bc 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -17,9 +17,18 @@ #include /* - * Have a reasonable limit on the number of tasks checked: + * The number of tasks checked: */ -unsigned long __read_mostly sysctl_hung_task_check_count = 1024; +unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; + +/* + * Limit number of tasks checked in a batch. + * + * This value controls the preemptibility of khungtaskd since preemption + * is disabled during the critical section. It also controls the size of + * the RCU grace period. So it needs to be upper-bound. + */ +#define HUNG_TASK_BATCHING 1024 /* * Zero means infinite timeout - no checking done: @@ -109,6 +118,24 @@ static void check_hung_task(struct task_struct *t, unsigned long now, panic("hung_task: blocked tasks"); } +/* + * To avoid extending the RCU grace period for an unbounded amount of time, + * periodically exit the critical section and enter a new one. + * + * For preemptible RCU it is sufficient to call rcu_read_unlock in order + * exit the grace period. For classic RCU, a reschedule is required. + */ +static void rcu_lock_break(struct task_struct *g, struct task_struct *t) +{ + get_task_struct(g); + get_task_struct(t); + rcu_read_unlock(); + cond_resched(); + rcu_read_lock(); + put_task_struct(t); + put_task_struct(g); +} + /* * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for * a really long time (120 seconds). If that happens, print out @@ -117,6 +144,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now, static void check_hung_uninterruptible_tasks(unsigned long timeout) { int max_count = sysctl_hung_task_check_count; + int batch_count = HUNG_TASK_BATCHING; unsigned long now = get_timestamp(); struct task_struct *g, *t; @@ -131,6 +159,13 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) do_each_thread(g, t) { if (!--max_count) goto unlock; + if (!--batch_count) { + batch_count = HUNG_TASK_BATCHING; + rcu_lock_break(g, t); + /* Exit if t or g was unhashed during refresh. */ + if (t->state == TASK_DEAD || g->state == TASK_DEAD) + goto unlock; + } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) check_hung_task(t, now, timeout); -- cgit v1.2.3-71-gd317 From 94be52dc075a32af4aa73d7e10f68734d62d6af2 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Thu, 5 Feb 2009 09:56:08 -0800 Subject: softlockup: convert read_lock in hung_task to rcu_read_lock Since the tasklist is protected by rcu list operations, it is safe to convert the read_lock()s to rcu_read_lock(). Suggested-by: Peter Zijlstra Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 481ca8b5c2bc..3951a80e7cbe 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -155,7 +155,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) if (test_taint(TAINT_DIE) || did_panic) return; - read_lock(&tasklist_lock); + rcu_read_lock(); do_each_thread(g, t) { if (!--max_count) goto unlock; @@ -171,7 +171,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) check_hung_task(t, now, timeout); } while_each_thread(g, t); unlock: - read_unlock(&tasklist_lock); + rcu_read_unlock(); } static void update_poll_jiffies(void) -- cgit v1.2.3-71-gd317 From 17406b82d621930cca8ccc1272cdac9a7dae8e40 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Fri, 6 Feb 2009 15:37:47 -0800 Subject: softlockup: remove timestamp checking from hung_task Impact: saves sizeof(long) bytes per task_struct By guaranteeing that sysctl_hung_task_timeout_secs have elapsed between tasklist scans we can avoid using timestamps. Signed-off-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 - kernel/fork.c | 8 +++----- kernel/hung_task.c | 48 +++++++++--------------------------------------- 3 files changed, 12 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/include/linux/sched.h b/include/linux/sched.h index 2a2811c6239d..e0d723fea9f5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1241,7 +1241,6 @@ struct task_struct { #endif #ifdef CONFIG_DETECT_HUNG_TASK /* hung task detection */ - unsigned long last_switch_timestamp; unsigned long last_switch_count; #endif /* CPU-specific state of this task */ diff --git a/kernel/fork.c b/kernel/fork.c index fb9444282836..bf582f75014b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -639,6 +639,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; +#ifdef CONFIG_DETECT_HUNG_TASK + tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; +#endif tsk->mm = NULL; tsk->active_mm = NULL; @@ -1041,11 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->default_timer_slack_ns = current->timer_slack_ns; -#ifdef CONFIG_DETECT_HUNG_TASK - p->last_switch_count = 0; - p->last_switch_timestamp = 0; -#endif - task_io_accounting_init(&p->ioac); acct_clear_integrals(p); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 3951a80e7cbe..0c924de58cb2 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -34,7 +34,6 @@ unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; * Zero means infinite timeout - no checking done: */ unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; -static unsigned long __read_mostly hung_task_poll_jiffies; unsigned long __read_mostly sysctl_hung_task_warnings = 10; @@ -69,33 +68,17 @@ static struct notifier_block panic_block = { .notifier_call = hung_task_panic, }; -/* - * Returns seconds, approximately. We don't need nanosecond - * resolution, and we don't need to waste time with a big divide when - * 2^30ns == 1.074s. - */ -static unsigned long get_timestamp(void) -{ - int this_cpu = raw_smp_processor_id(); - - return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ -} - -static void check_hung_task(struct task_struct *t, unsigned long now, - unsigned long timeout) +static void check_hung_task(struct task_struct *t, unsigned long timeout) { unsigned long switch_count = t->nvcsw + t->nivcsw; if (t->flags & PF_FROZEN) return; - if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { + if (switch_count != t->last_switch_count) { t->last_switch_count = switch_count; - t->last_switch_timestamp = now; return; } - if ((long)(now - t->last_switch_timestamp) < timeout) - return; if (!sysctl_hung_task_warnings) return; sysctl_hung_task_warnings--; @@ -111,7 +94,6 @@ static void check_hung_task(struct task_struct *t, unsigned long now, sched_show_task(t); __debug_show_held_locks(t); - t->last_switch_timestamp = now; touch_nmi_watchdog(); if (sysctl_hung_task_panic) @@ -145,7 +127,6 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) { int max_count = sysctl_hung_task_check_count; int batch_count = HUNG_TASK_BATCHING; - unsigned long now = get_timestamp(); struct task_struct *g, *t; /* @@ -168,19 +149,16 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) - check_hung_task(t, now, timeout); + check_hung_task(t, timeout); } while_each_thread(g, t); unlock: rcu_read_unlock(); } -static void update_poll_jiffies(void) +static unsigned long timeout_jiffies(unsigned long timeout) { /* timeout of 0 will disable the watchdog */ - if (sysctl_hung_task_timeout_secs == 0) - hung_task_poll_jiffies = MAX_SCHEDULE_TIMEOUT; - else - hung_task_poll_jiffies = sysctl_hung_task_timeout_secs * HZ / 2; + return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT; } /* @@ -197,8 +175,6 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, if (ret || !write) goto out; - update_poll_jiffies(); - wake_up_process(watchdog_task); out: @@ -211,20 +187,14 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, static int watchdog(void *dummy) { set_user_nice(current, 0); - update_poll_jiffies(); for ( ; ; ) { - unsigned long timeout; + unsigned long timeout = sysctl_hung_task_timeout_secs; - while (schedule_timeout_interruptible(hung_task_poll_jiffies)); + while (schedule_timeout_interruptible(timeout_jiffies(timeout))) + timeout = sysctl_hung_task_timeout_secs; - /* - * Need to cache timeout here to avoid timeout being set - * to 0 via sysctl while inside check_hung_*_tasks(). - */ - timeout = sysctl_hung_task_timeout_secs; - if (timeout) - check_hung_uninterruptible_tasks(timeout); + check_hung_uninterruptible_tasks(timeout); } return 0; -- cgit v1.2.3-71-gd317 From cf2592f59c0e8ed4308adbdb2e0a88655379d579 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 10 Feb 2009 16:52:37 +0100 Subject: softlockup: ensure the task has been switched out once When we check if a task has been switched out since the last scan, we might have a race condition on the following scenario: - the task is freshly created and scheduled - it puts its state to TASK_UNINTERRUPTIBLE and is not yet switched out - check_hung_task() scans this task and will report a false positive because t->nvcsw + t->nivcsw == t->last_switch_count == 0 Add a check for such cases. Signed-off-by: Frederic Weisbecker Acked-by: Mandeep Singh Baines Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0c924de58cb2..022a4927b785 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -72,7 +72,13 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) { unsigned long switch_count = t->nvcsw + t->nivcsw; - if (t->flags & PF_FROZEN) + /* + * Ensure the task is not frozen. + * Also, when a freshly created task is scheduled once, changes + * its state to TASK_UNINTERRUPTIBLE without having ever been + * switched out once, it musn't be checked. + */ + if (unlikely(t->flags & PF_FROZEN || !switch_count)) return; if (switch_count != t->last_switch_count) { -- cgit v1.2.3-71-gd317 From 9f8d979f082a3ee1b27f32b7e0811b51c3ad1d15 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 12 Feb 2009 13:10:17 +0100 Subject: softlockup: move 'one' to the softlockup section in sysctl.c CONFIG_SOFTLOCKUP=y || CONFIG_DETECT_HUNG_TASKS=y is now the only user of the 'one' constant in kernel/sysctl.c. Move it to the softlockup block of constants. This fixes a GCC warning. Signed-off-by: Ingo Molnar --- kernel/sysctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3b6b54c8ac0d..6d2aeff92b3d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -90,6 +90,9 @@ extern int rcutorture_runnable; #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ /* Constants used for minimum and maximum */ +#if defined(CONFIG_DETECT_HUNG_TASK) || defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) +static int one = 1; +#endif #ifdef CONFIG_DETECT_SOFTLOCKUP static int sixty = 60; static int neg_one = -1; @@ -100,7 +103,6 @@ static int two = 2; #endif static int zero; -static int one = 1; static unsigned long one_ul = 1; static int one_hundred = 100; -- cgit v1.2.3-71-gd317 From 3aa551c9b4c40018f0e261a178e3d25478dc04a9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 23 Mar 2009 18:28:15 +0100 Subject: genirq: add threaded interrupt handler support Add support for threaded interrupt handlers: A device driver can request that its main interrupt handler runs in a thread. To achive this the device driver requests the interrupt with request_threaded_irq() and provides additionally to the handler a thread function. The handler function is called in hard interrupt context and needs to check whether the interrupt originated from the device. If the interrupt originated from the device then the handler can either return IRQ_HANDLED or IRQ_WAKE_THREAD. IRQ_HANDLED is returned when no further action is required. IRQ_WAKE_THREAD causes the genirq code to invoke the threaded (main) handler. When IRQ_WAKE_THREAD is returned handler must have disabled the interrupt on the device level. This is mandatory for shared interrupt handlers, but we need to do it as well for obscure x86 hardware where disabling an interrupt on the IO_APIC level redirects the interrupt to the legacy PIC interrupt lines. Signed-off-by: Thomas Gleixner Reviewed-by: Ingo Molnar --- include/linux/hardirq.h | 2 +- include/linux/interrupt.h | 37 ++++++++- include/linux/irq.h | 5 ++ include/linux/irqreturn.h | 2 + include/linux/sched.h | 5 ++ kernel/exit.c | 2 + kernel/irq/handle.c | 31 +++++++- kernel/irq/manage.c | 192 ++++++++++++++++++++++++++++++++++++++++++---- 8 files changed, 259 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..2dfaadbdb2ac 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -105,7 +105,7 @@ # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET #endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) extern void synchronize_irq(unsigned int irq); #else # define synchronize_irq(irq) barrier() diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0c9cb63e6895..6fc2b720c231 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -59,6 +59,16 @@ #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 +/* + * Bits used by threaded handlers: + * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run + * IRQTF_DIED - handler thread died + */ +enum { + IRQTF_RUNTHREAD, + IRQTF_DIED, +}; + typedef irqreturn_t (*irq_handler_t)(int, void *); /** @@ -71,6 +81,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @next: pointer to the next irqaction for shared interrupts * @irq: interrupt number * @dir: pointer to the proc/irq/NN/name entry + * @thread_fn: interupt handler function for threaded interrupts + * @thread: thread pointer for threaded interrupts + * @thread_flags: flags related to @thread */ struct irqaction { irq_handler_t handler; @@ -81,11 +94,31 @@ struct irqaction { struct irqaction *next; int irq; struct proc_dir_entry *dir; + irq_handler_t thread_fn; + struct task_struct *thread; + unsigned long thread_flags; }; extern irqreturn_t no_action(int cpl, void *dev_id); -extern int __must_check request_irq(unsigned int, irq_handler_t handler, - unsigned long, const char *, void *); + +extern int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); + +static inline int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, NULL, flags, name, dev); +} + +#ifdef CONFIG_GENERIC_HARDIRQS +extern void exit_irq_thread(void); +#else +static inline void exit_irq_thread(void) { } +#endif + extern void free_irq(unsigned int, void *); struct device; diff --git a/include/linux/irq.h b/include/linux/irq.h index 873e4ac11b81..8b1cf0630210 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -155,6 +156,8 @@ struct irq_2_iommu; * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -186,6 +189,8 @@ struct irq_desc { cpumask_var_t pending_mask; #endif #endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index c5584ca5b8c9..819acaaac3f5 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -5,10 +5,12 @@ * enum irqreturn * @IRQ_NONE interrupt was not from this device * @IRQ_HANDLED interrupt was handled by this device + * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ enum irqreturn { IRQ_NONE, IRQ_HANDLED, + IRQ_WAKE_THREAD, }; typedef enum irqreturn irqreturn_t; diff --git a/include/linux/sched.h b/include/linux/sched.h index 46d680643f89..38b77b0f56e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1292,6 +1292,11 @@ struct task_struct { /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; +#ifdef CONFIG_GENERIC_HARDIRQS + /* IRQ handler threads */ + struct irqaction *irqaction; +#endif + /* Protection of the PI data structures: */ spinlock_t pi_lock; diff --git a/kernel/exit.c b/kernel/exit.c index 167e1e3ad7c6..ca0b3488c4a9 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1037,6 +1037,8 @@ NORET_TYPE void do_exit(long code) schedule(); } + exit_irq_thread(); + exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 9ebf77968871..fe8f45374e86 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -357,8 +357,37 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) do { ret = action->handler(irq, action->dev_id); - if (ret == IRQ_HANDLED) + + switch (ret) { + case IRQ_WAKE_THREAD: + /* + * Wake up the handler thread for this + * action. In case the thread crashed and was + * killed we just pretend that we handled the + * interrupt. The hardirq handler above has + * disabled the device interrupt, so no irq + * storm is lurking. + */ + if (likely(!test_bit(IRQTF_DIED, + &action->thread_flags))) { + set_bit(IRQTF_RUNTHREAD, &action->thread_flags); + wake_up_process(action->thread); + } + + /* + * Set it to handled so the spurious check + * does not trigger. + */ + ret = IRQ_HANDLED; + /* Fall through to add to randomness */ + case IRQ_HANDLED: status |= action->flags; + break; + + default: + break; + } + retval |= ret; action = action->next; } while (action); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6458e99984c0..a4c1ab86cd25 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -8,16 +8,15 @@ */ #include +#include #include #include #include #include +#include #include "internals.h" -#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) -cpumask_var_t irq_default_affinity; - /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for @@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) /* Oops, that failed? */ } while (status & IRQ_INPROGRESS); + + /* + * We made sure that no hardirq handler is running. Now verify + * that no threaded handlers are active. + */ + wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } EXPORT_SYMBOL(synchronize_irq); +#ifdef CONFIG_SMP +cpumask_var_t irq_default_affinity; + /** * irq_can_set_affinity - Check if the affinity of a given irq can be set * @irq: Interrupt to check @@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) return 1; } +static void +irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) +{ + struct irqaction *action = desc->action; + + while (action) { + if (action->thread) + set_cpus_allowed_ptr(action->thread, cpumask); + action = action->next; + } +} + /** * irq_set_affinity - Set the irq affinity of a given irq * @irq: Interrupt to set affinity @@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) cpumask_copy(desc->affinity, cpumask); desc->chip->set_affinity(irq, cpumask); #endif + irq_set_thread_affinity(desc, cpumask); desc->status |= IRQ_AFFINITY_SET; spin_unlock_irqrestore(&desc->lock, flags); return 0; @@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq) spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(irq, desc); + if (!ret) + irq_set_thread_affinity(desc, desc->affinity); spin_unlock_irqrestore(&desc->lock, flags); return ret; @@ -384,6 +407,93 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, return ret; } +static inline int irq_thread_should_run(struct irqaction *action) +{ + return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags); +} + +static int irq_wait_for_interrupt(struct irqaction *action) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if (irq_thread_should_run(action)) { + __set_current_state(TASK_RUNNING); + return 0; + } else + schedule(); + } + return -1; +} + +/* + * Interrupt handler thread + */ +static int irq_thread(void *data) +{ + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; + struct irqaction *action = data; + struct irq_desc *desc = irq_to_desc(action->irq); + int wake; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + current->irqaction = action; + + while (!irq_wait_for_interrupt(action)) { + + atomic_inc(&desc->threads_active); + + spin_lock_irq(&desc->lock); + if (unlikely(desc->status & IRQ_DISABLED)) { + /* + * CHECKME: We might need a dedicated + * IRQ_THREAD_PENDING flag here, which + * retriggers the thread in check_irq_resend() + * but AFAICT IRQ_PENDING should be fine as it + * retriggers the interrupt itself --- tglx + */ + desc->status |= IRQ_PENDING; + spin_unlock_irq(&desc->lock); + } else { + spin_unlock_irq(&desc->lock); + + action->thread_fn(action->irq, action->dev_id); + } + + wake = atomic_dec_and_test(&desc->threads_active); + + if (wake && waitqueue_active(&desc->wait_for_threads)) + wake_up(&desc->wait_for_threads); + } + + /* + * Clear irqaction. Otherwise exit_irq_thread() would make + * fuzz about an active irq thread going into nirvana. + */ + current->irqaction = NULL; + return 0; +} + +/* + * Called from do_exit() + */ +void exit_irq_thread(void) +{ + struct task_struct *tsk = current; + + if (!tsk->irqaction) + return; + + printk(KERN_ERR + "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", + tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); + + /* + * Set the THREAD DIED flag to prevent further wakeups of the + * soon to be gone threaded handler. + */ + set_bit(IRQTF_DIED, &tsk->irqaction->flags); +} + /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. @@ -419,6 +529,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) rand_initialize_irq(irq); } + /* + * Threaded handler ? + */ + if (new->thread_fn) { + struct task_struct *t; + + t = kthread_create(irq_thread, new, "irq/%d-%s", irq, + new->name); + if (IS_ERR(t)) + return PTR_ERR(t); + /* + * We keep the reference to the task struct even if + * the thread dies to avoid that the interrupt code + * references an already freed task_struct. + */ + get_task_struct(t); + new->thread = t; + wake_up_process(t); + } + /* * The following block of code has to be executed atomically */ @@ -456,15 +586,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (!shared) { irq_chip_set_defaults(desc->chip); + init_waitqueue_head(&desc->wait_for_threads); + /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK); - if (ret) { - spin_unlock_irqrestore(&desc->lock, flags); - return ret; - } + if (ret) + goto out_thread; } else compat_irq_chip_set_default_handler(desc); #if defined(CONFIG_IRQ_PER_CPU) @@ -532,8 +662,19 @@ mismatch: dump_stack(); } #endif + ret = -EBUSY; + +out_thread: spin_unlock_irqrestore(&desc->lock, flags); - return -EBUSY; + if (new->thread) { + struct task_struct *t = new->thread; + + new->thread = NULL; + if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) + kthread_stop(t); + put_task_struct(t); + } + return ret; } /** @@ -559,6 +700,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action, **action_ptr; + struct task_struct *irqthread; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); @@ -605,6 +747,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) else desc->chip->disable(irq); } + + irqthread = action->thread; + action->thread = NULL; + spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); @@ -612,6 +758,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) /* Make sure it's not being used on another CPU: */ synchronize_irq(irq); + if (irqthread) { + if (!test_bit(IRQTF_DIED, &action->thread_flags)) + kthread_stop(irqthread); + put_task_struct(irqthread); + } + #ifdef CONFIG_DEBUG_SHIRQ /* * It's a shared IRQ -- the driver ought to be prepared for an IRQ @@ -664,9 +816,12 @@ void free_irq(unsigned int irq, void *dev_id) EXPORT_SYMBOL(free_irq); /** - * request_irq - allocate an interrupt line + * request_threaded_irq - allocate an interrupt line * @irq: Interrupt line to allocate - * @handler: Function to be called when the IRQ occurs + * @handler: Function to be called when the IRQ occurs. + * Primary handler for threaded interrupts + * @thread_fn: Function called from the irq handler thread + * If NULL, no irq thread is created * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function @@ -678,6 +833,15 @@ EXPORT_SYMBOL(free_irq); * raises, you must take care both to initialise your hardware * and to set up the interrupt handler in the right order. * + * If you want to set up a threaded irq handler for your device + * then you need to supply @handler and @thread_fn. @handler ist + * still called in hard interrupt context and has to check + * whether the interrupt originates from the device. If yes it + * needs to disable the interrupt on the device and return + * IRQ_THREAD_WAKE which will wake up the handler thread and run + * @thread_fn. This split handler design is necessary to support + * shared interrupts. + * * Dev_id must be globally unique. Normally the address of the * device data structure is used as the cookie. Since the handler * receives this value it makes sense to use it. @@ -693,8 +857,9 @@ EXPORT_SYMBOL(free_irq); * IRQF_TRIGGER_* Specify active edge(s) or level * */ -int request_irq(unsigned int irq, irq_handler_t handler, - unsigned long irqflags, const char *devname, void *dev_id) +int request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, unsigned long irqflags, + const char *devname, void *dev_id) { struct irqaction *action; struct irq_desc *desc; @@ -742,6 +907,7 @@ int request_irq(unsigned int irq, irq_handler_t handler, return -ENOMEM; action->handler = handler; + action->thread_fn = thread_fn; action->flags = irqflags; action->name = devname; action->dev_id = dev_id; @@ -771,4 +937,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, #endif return retval; } -EXPORT_SYMBOL(request_irq); +EXPORT_SYMBOL(request_threaded_irq); -- cgit v1.2.3-71-gd317 From 935bd5b971f0df7c06d214d022cf8392e2f37952 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 23 Mar 2009 18:28:16 +0100 Subject: genirq: add support for threaded interrupts to devres Some devices use devres_request_irq() for to install their interrupt handler. Add support for threaded interrupts to devres as well. [tglx - simplified and adapted to latest threadirq version] Signed-off-by: Arjan van de Ven Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 17 ++++++++++++++--- kernel/irq/devres.c | 16 ++++++++++------ 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 6fc2b720c231..dbf6a6fd116c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -123,9 +123,20 @@ extern void free_irq(unsigned int, void *); struct device; -extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id); +extern int __must_check +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline int __must_check +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, + devname, dev_id); +} + extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); /* diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 38a25b8d8bff..d06df9c41cba 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data) } /** - * devm_request_irq - allocate an interrupt line for a managed device + * devm_request_threaded_irq - allocate an interrupt line for a managed device * @dev: device to request interrupt for * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs + * @thread_fn: function to be called in a threaded interrupt context. NULL + * for devices which handle everything in @handler * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function @@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data) * If an IRQ allocated with this function needs to be freed * separately, dev_free_irq() must be used. */ -int devm_request_irq(struct device *dev, unsigned int irq, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id) +int devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id) { struct irq_devres *dr; int rc; @@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq, if (!dr) return -ENOMEM; - rc = request_irq(irq, handler, irqflags, devname, dev_id); + rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, + dev_id); if (rc) { devres_free(dr); return rc; @@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq, return 0; } -EXPORT_SYMBOL(devm_request_irq); +EXPORT_SYMBOL(devm_request_threaded_irq); /** * devm_free_irq - free an interrupt -- cgit v1.2.3-71-gd317 From f48fe81e5b032914183e9a17052313720c2cac56 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 24 Mar 2009 11:46:22 +0100 Subject: genirq: threaded irq handlers review fixups Delta patch to address the review comments. - Implement warning when IRQ_WAKE_THREAD is requested and no thread handler installed - coding style fixes Pointed-out-by: Christoph Hellwig Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 2 ++ kernel/irq/handle.c | 29 ++++++++++++++++++++++++----- kernel/irq/manage.c | 17 +++++++---------- 3 files changed, 33 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index dbf6a6fd116c..266a5f5f57cc 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -63,10 +63,12 @@ * Bits used by threaded handlers: * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run * IRQTF_DIED - handler thread died + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed */ enum { IRQTF_RUNTHREAD, IRQTF_DIED, + IRQTF_WARNED, }; typedef irqreturn_t (*irq_handler_t)(int, void *); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index fe8f45374e86..38b49a9e508a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -338,6 +338,15 @@ irqreturn_t no_action(int cpl, void *dev_id) return IRQ_NONE; } +static void warn_no_thread(unsigned int irq, struct irqaction *action) +{ + if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) + return; + + printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " + "but no thread function available.", irq, action->name); +} + /** * handle_IRQ_event - irq action chain handler * @irq: the interrupt number @@ -360,6 +369,21 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) switch (ret) { case IRQ_WAKE_THREAD: + /* + * Set result to handled so the spurious check + * does not trigger. + */ + ret = IRQ_HANDLED; + + /* + * Catch drivers which return WAKE_THREAD but + * did not set up a thread function + */ + if (unlikely(!action->thread_fn)) { + warn_no_thread(irq, action); + break; + } + /* * Wake up the handler thread for this * action. In case the thread crashed and was @@ -374,11 +398,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) wake_up_process(action->thread); } - /* - * Set it to handled so the spurious check - * does not trigger. - */ - ret = IRQ_HANDLED; /* Fall through to add to randomness */ case IRQ_HANDLED: status |= action->flags; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a4c1ab86cd25..a3eb7baf1e46 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -407,20 +407,17 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, return ret; } -static inline int irq_thread_should_run(struct irqaction *action) -{ - return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags); -} - static int irq_wait_for_interrupt(struct irqaction *action) { while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); - if (irq_thread_should_run(action)) { + + if (test_and_clear_bit(IRQTF_RUNTHREAD, + &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; - } else - schedule(); + } + schedule(); } return -1; } @@ -820,8 +817,8 @@ EXPORT_SYMBOL(free_irq); * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts - * @thread_fn: Function called from the irq handler thread - * If NULL, no irq thread is created + * @thread_fn: Function called from the irq handler thread + * If NULL, no irq thread is created * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function -- cgit v1.2.3-71-gd317 From a18b83b7ef3c98cd8b4bb885e4a649a8f30fb7b0 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Mon, 23 Mar 2009 10:02:53 +0530 Subject: cpuacct: make cpuacct hierarchy walk in cpuacct_charge() safe when rcupreempt is used -v2 Impact: fix cgroups race under rcu-preempt cpuacct_charge() obtains task's ca and does a hierarchy walk upwards. This can race with the task's movement between cgroups. This race can cause an access to freed ca pointer in cpuacct_charge() or access to invalid cgroups pointer of the task. This will not happen with rcu or tree rcu as cpuacct_charge() is called with preemption disabled. However if rcupreempt is used, the race is seen. Thanks to Li Zefan for explaining this. Fix this race by explicitly protecting ca and the hierarchy walk with rcu_read_lock(). Changes for v2: - Update patch descrition (as per Li Zefan's review comments). - Remove comments in cpuacct_charge() which explained why rcu_read_lock() was needed (as per Peter Zijlstra's review comments). Signed-off-by: Bharata B Rao Cc: Dhaval Giani Cc: Li Zefan Cc: Paul Menage Cc: KAMEZAWA Hiroyuki Acked-by: Peter Zijlstra Acked-by: Balbir Singh Tested-by: Balbir Singh Signed-off-by: Ingo Molnar --- kernel/sched.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 186c6fd08acf..cc397aae5eae 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -9654,12 +9654,17 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) return; cpu = task_cpu(tsk); + + rcu_read_lock(); + ca = task_ca(tsk); for (; ca; ca = ca->parent) { u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; } + + rcu_read_unlock(); } struct cgroup_subsys cpuacct_subsys = { -- cgit v1.2.3-71-gd317 From 13b8bd0a5713bdf05659019badd7c0407984ece1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 25 Mar 2009 15:01:22 +1030 Subject: sched_rt: don't allocate cpumask in fastpath Impact: cleanup As pointed out by Steven Rostedt. Since the arg in question is unused, we simply change cpupri_find() to accept NULL. Reported-by: Steven Rostedt Signed-off-by: Rusty Russell LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.c | 5 +++-- kernel/sched_rt.c | 15 ++++----------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b8..cdd3c89574cd 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -55,7 +55,7 @@ static int convert_prio(int prio) * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task - * @lowest_mask: A mask to fill in with selected CPUs + * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invokation. By the time the call returns, the CPUs may have in @@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + if (lowest_mask) + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); return 1; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2f..fbec5a58ff10 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { - cpumask_var_t mask; - if (rq->curr->rt.nr_cpus_allowed == 1) return; - if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) - return; - if (p->rt.nr_cpus_allowed != 1 - && cpupri_find(&rq->rd->cpupri, p, mask)) - goto free; + && cpupri_find(&rq->rd->cpupri, p, NULL)) + return; - if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) - goto free; + if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) + return; /* * There appears to be other cpus that can accept @@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) */ requeue_task_rt(rq, p, 1); resched_task(rq->curr); -free: - free_cpumask_var(mask); } #endif /* CONFIG_SMP */ -- cgit v1.2.3-71-gd317 From c5f8d99585d7b5b7e857fabf8aefd0174903a98c Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 31 Mar 2009 16:56:03 +0900 Subject: posixtimers, sched: Fix posix clock monotonicity Impact: Regression fix (against clock_gettime() backwarding bug) This patch re-introduces a couple of functions, task_sched_runtime and thread_group_sched_runtime, which was once removed at the time of 2.6.28-rc1. These functions protect the sampling of thread/process clock with rq lock. This rq lock is required not to update rq->clock during the sampling. i.e. The clock_gettime() may return ((accounted runtime before update) + (delta after update)) that is less than what it should be. v2 -> v3: - Rename static helper function __task_delta_exec() to do_task_delta_exec() since -tip tree already has a __task_delta_exec() of different version. v1 -> v2: - Revises comments of function and patch description. - Add note about accuracy of thread group's runtime. Signed-off-by: Hidetoshi Seto Acked-by: Peter Zijlstra Cc: stable@kernel.org [2.6.28.x][2.6.29.x] LKML-Reference: <49D1CC93.4080401@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 7 ++--- kernel/sched.c | 65 +++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 61 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index fa07da94d7be..4318c3085788 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, cpu->cpu = virt_ticks(p); break; case CPUCLOCK_SCHED: - cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); + cpu->sched = task_sched_runtime(p); break; } return 0; @@ -240,18 +240,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock, { struct task_cputime cputime; - thread_group_cputime(p, &cputime); switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: + thread_group_cputime(p, &cputime); cpu->cpu = cputime_add(cputime.utime, cputime.stime); break; case CPUCLOCK_VIRT: + thread_group_cputime(p, &cputime); cpu->cpu = cputime.utime; break; case CPUCLOCK_SCHED: - cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); + cpu->sched = thread_group_sched_runtime(p); break; } return 0; diff --git a/kernel/sched.c b/kernel/sched.c index cc397aae5eae..c8d7f17bd036 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4139,9 +4139,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); EXPORT_PER_CPU_SYMBOL(kstat); /* - * Return any ns on the sched_clock that have not yet been banked in + * Return any ns on the sched_clock that have not yet been accounted in * @p in case that task is currently running. + * + * Called with task_rq_lock() held on @rq. */ +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) +{ + u64 ns = 0; + + if (task_current(rq, p)) { + update_rq_clock(rq); + ns = rq->clock - p->se.exec_start; + if ((s64)ns < 0) + ns = 0; + } + + return ns; +} + unsigned long long task_delta_exec(struct task_struct *p) { unsigned long flags; @@ -4149,16 +4165,49 @@ unsigned long long task_delta_exec(struct task_struct *p) u64 ns = 0; rq = task_rq_lock(p, &flags); + ns = do_task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); - if (task_current(rq, p)) { - u64 delta_exec; + return ns; +} - update_rq_clock(rq); - delta_exec = rq->clock - p->se.exec_start; - if ((s64)delta_exec > 0) - ns = delta_exec; - } +/* + * Return accounted runtime for the task. + * In case the task is currently running, return the runtime plus current's + * pending runtime that have not been accounted yet. + */ +unsigned long long task_sched_runtime(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + u64 ns = 0; + + rq = task_rq_lock(p, &flags); + ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); + task_rq_unlock(rq, &flags); + + return ns; +} +/* + * Return sum_exec_runtime for the thread group. + * In case the task is currently running, return the sum plus current's + * pending runtime that have not been accounted yet. + * + * Note that the thread group might have other running tasks as well, + * so the return value not includes other pending runtime that other + * running tasks might have. + */ +unsigned long long thread_group_sched_runtime(struct task_struct *p) +{ + struct task_cputime totals; + unsigned long flags; + struct rq *rq; + u64 ns; + + rq = task_rq_lock(p, &flags); + thread_group_cputime(p, &totals); + ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); task_rq_unlock(rq, &flags); return ns; -- cgit v1.2.3-71-gd317 From ef12fefabf94b6a902ad3abd3eb124b00560c445 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Tue, 31 Mar 2009 10:02:22 +0530 Subject: cpuacct: add per-cgroup utime/stime statistics Add per-cgroup cpuacct controller statistics like the system and user time consumed by the group of tasks. Changelog: v7 - Changed the name of the statistic from utime to user and from stime to system so that in future we could easily add other statistics like irq, softirq, steal times etc easily. v6 - Fixed a bug in the error path of cpuacct_create() (pointed by Li Zefan). v5 - In cpuacct_stats_show(), use cputime64_to_clock_t() since we are operating on a 64bit variable here. v4 - Remove comments in cpuacct_update_stats() which explained why rcu_read_lock() was needed (as per Peter Zijlstra's review comments). - Don't say that percpu_counter_read() is broken in Documentation/cpuacct.txt as per KAMEZAWA Hiroyuki's review comments. v3 - Fix a small race in the cpuacct hierarchy walk. v2 - stime and utime now exported in clock_t units instead of msecs. - Addressed the code review comments from Balbir and Li Zefan. - Moved to -tip tree. v1 - Moved the stime/utime accounting to cpuacct controller. Earlier versions - http://lkml.org/lkml/2009/2/25/129 Signed-off-by: Bharata B Rao Signed-off-by: Balaji Rao Cc: Dhaval Giani Cc: Paul Menage Cc: Andrew Morton Cc: KAMEZAWA Hiroyuki Reviewed-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Balbir Singh Tested-by: Balbir Singh LKML-Reference: <20090331043222.GA4093@in.ibm.com> Signed-off-by: Ingo Molnar --- Documentation/cgroups/cpuacct.txt | 18 ++++++++ kernel/sched.c | 87 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 99 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt index bb775fbe43d7..8b930946c52a 100644 --- a/Documentation/cgroups/cpuacct.txt +++ b/Documentation/cgroups/cpuacct.txt @@ -30,3 +30,21 @@ The above steps create a new group g1 and move the current shell process (bash) into it. CPU time consumed by this bash and its children can be obtained from g1/cpuacct.usage and the same is accumulated in /cgroups/cpuacct.usage also. + +cpuacct.stat file lists a few statistics which further divide the +CPU time obtained by the cgroup into user and system times. Currently +the following statistics are supported: + +user: Time spent by tasks of the cgroup in user mode. +system: Time spent by tasks of the cgroup in kernel mode. + +user and system are in USER_HZ unit. + +cpuacct controller uses percpu_counter interface to collect user and +system times. This has two side effects: + +- It is theoretically possible to see wrong values for user and system times. + This is because percpu_counter_read() on 32bit systems isn't safe + against concurrent writes. +- It is possible to see slightly outdated values for user and system times + due to the batch processing nature of percpu_counter. diff --git a/kernel/sched.c b/kernel/sched.c index c8d7f17bd036..8d1bdbe8aafc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1393,10 +1393,22 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct rq_iterator *iterator); #endif +/* Time spent by the tasks of the cpu accounting group executing in ... */ +enum cpuacct_stat_index { + CPUACCT_STAT_USER, /* ... user mode */ + CPUACCT_STAT_SYSTEM, /* ... kernel mode */ + + CPUACCT_STAT_NSTATS, +}; + #ifdef CONFIG_CGROUP_CPUACCT static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val); #else static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +static inline void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) {} #endif static inline void inc_cpu_load(struct rq *rq, unsigned long load) @@ -4236,6 +4248,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, cpustat->nice = cputime64_add(cpustat->nice, tmp); else cpustat->user = cputime64_add(cpustat->user, tmp); + + cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ acct_update_integrals(p); } @@ -4297,6 +4311,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, else cpustat->system = cputime64_add(cpustat->system, tmp); + cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); + /* Account for system time used */ acct_update_integrals(p); } @@ -9539,6 +9555,7 @@ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every cpu */ u64 *cpuusage; + struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct cpuacct *parent; }; @@ -9563,20 +9580,32 @@ static struct cgroup_subsys_state *cpuacct_create( struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + int i; if (!ca) - return ERR_PTR(-ENOMEM); + goto out; ca->cpuusage = alloc_percpu(u64); - if (!ca->cpuusage) { - kfree(ca); - return ERR_PTR(-ENOMEM); - } + if (!ca->cpuusage) + goto out_free_ca; + + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + if (percpu_counter_init(&ca->cpustat[i], 0)) + goto out_free_counters; if (cgrp->parent) ca->parent = cgroup_ca(cgrp->parent); return &ca->css; + +out_free_counters: + while (--i >= 0) + percpu_counter_destroy(&ca->cpustat[i]); + free_percpu(ca->cpuusage); +out_free_ca: + kfree(ca); +out: + return ERR_PTR(-ENOMEM); } /* destroy an existing cpu accounting group */ @@ -9584,7 +9613,10 @@ static void cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = cgroup_ca(cgrp); + int i; + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) + percpu_counter_destroy(&ca->cpustat[i]); free_percpu(ca->cpuusage); kfree(ca); } @@ -9671,6 +9703,25 @@ static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, return 0; } +static const char *cpuacct_stat_desc[] = { + [CPUACCT_STAT_USER] = "user", + [CPUACCT_STAT_SYSTEM] = "system", +}; + +static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct cpuacct *ca = cgroup_ca(cgrp); + int i; + + for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { + s64 val = percpu_counter_read(&ca->cpustat[i]); + val = cputime64_to_clock_t(val); + cb->fill(cb, cpuacct_stat_desc[i], val); + } + return 0; +} + static struct cftype files[] = { { .name = "usage", @@ -9681,7 +9732,10 @@ static struct cftype files[] = { .name = "usage_percpu", .read_seq_string = cpuacct_percpu_seq_read, }, - + { + .name = "stat", + .read_map = cpuacct_stats_show, + }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) @@ -9716,6 +9770,27 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) rcu_read_unlock(); } +/* + * Charge the system/user time to the task's accounting group. + */ +static void cpuacct_update_stats(struct task_struct *tsk, + enum cpuacct_stat_index idx, cputime_t val) +{ + struct cpuacct *ca; + + if (unlikely(!cpuacct_subsys.active)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + + do { + percpu_counter_add(&ca->cpustat[idx], val); + ca = ca->parent; + } while (ca); + rcu_read_unlock(); +} + struct cgroup_subsys cpuacct_subsys = { .name = "cpuacct", .create = cpuacct_create, -- cgit v1.2.3-71-gd317 From 46e0bb9c12f4bab539736f1714cbf16600f681ec Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Mon, 30 Mar 2009 10:25:20 +0530 Subject: sched: Print sched_group::__cpu_power in sched_domain_debug Impact: extend debug info /proc/sched_debug If the user changes the value of the sched_mc/smt_power_savings sysfs tunable, it'll trigger a rebuilding of the whole sched_domain tree, with the SD_POWERSAVINGS_BALANCE flag set at certain levels. As a result, there would be a change in the __cpu_power of sched_groups in the sched_domain hierarchy. Print the __cpu_power values for each sched_group in sched_domain_debug to help verify this change and correlate it with the change in the load-balancing behavior. Signed-off-by: Gautham R Shenoy Cc: Peter Zijlstra LKML-Reference: <20090330045520.2869.24777.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 8d1bdbe8aafc..6234d10c6a79 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6963,7 +6963,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); - printk(KERN_CONT " %s", str); + printk(KERN_CONT " %s (__cpu_power = %d)", str, + group->__cpu_power); group = group->next; } while (group != sd->groups); -- cgit v1.2.3-71-gd317 From 633fe795b80693a8198e7d82f66538a72d2bbba2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 1 Apr 2009 17:47:23 -0700 Subject: timers: add missing kernel-doc Add missing kernel-doc parameter notation and change function name to its new name: Warning(kernel/timer.c:543): No description found for parameter 'name' Warning(kernel/timer.c:543): No description found for parameter 'key' Signed-off-by: Randy Dunlap Cc: akpm Cc: Johannes Berg LKML-Reference: <20090401174723.f0bea0eb.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- kernel/timer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 9b77fc9a9ac8..3af9a0bc4292 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -524,10 +524,13 @@ static void __init_timer(struct timer_list *timer) } /** - * init_timer - initialize a timer. + * init_timer_key - initialize a timer * @timer: the timer to be initialized + * @name: name of the timer + * @key: lockdep class key of the fake lock used for tracking timer + * sync lock dependencies * - * init_timer() must be done to a timer prior calling *any* of the + * init_timer_key() must be done to a timer prior calling *any* of the * other timer functions. */ void init_timer(struct timer_list *timer) -- cgit v1.2.3-71-gd317 From cd84a42f315e50edd454c27a3da3951ccd3d735a Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Thu, 2 Apr 2009 14:19:38 -0700 Subject: futex: comment requeue key reference semantics We've tripped over the futex_requeue drop_count refering to key2 instead of key1. The code is actually correct, but is non-intuitive. This patch adds an explicit comment explaining the requeue. Signed-off-by: Darren Hart Cc: Peter Zijlstra Cc: Nick Piggin Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- kernel/futex.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 6b50a024bca2..eef8cd26b5e5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -883,7 +883,12 @@ retry_private: out_unlock: double_unlock_hb(hb1, hb2); - /* drop_futex_key_refs() must be called outside the spinlocks. */ + /* + * drop_futex_key_refs() must be called outside the spinlocks. During + * the requeue we moved futex_q's from the hash bucket at key1 to the + * one at key2 and updated their key pointer. We no longer need to + * hold the references to key1. + */ while (--drop_count >= 0) drop_futex_key_refs(&key1); -- cgit v1.2.3-71-gd317 From a4b3ada83d06554d307dd54abdc62b2e5648264a Mon Sep 17 00:00:00 2001 From: Carl Henrik Lunde Date: Fri, 3 Apr 2009 14:27:15 +0200 Subject: blktrace: NUL-terminate user space messages Impact: fix corrupted blkparse output Make sure messages from user space are NUL-terminated strings, otherwise we could dump random memory to the block trace file. Additionally, I've limited the message to BLK_TN_MAX_MSG-1 characters, because the last character would be stripped by vscnprintf anyway. Signed-off-by: Carl Henrik Lunde Cc: Li Zefan Cc: Arnaldo Carvalho de Melo Cc: "Alan D. Brunelle" Cc: Steven Rostedt LKML-Reference: <20090403122714.GT5178@kernel.dk> Signed-off-by: Ingo Molnar --- kernel/trace/blktrace.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 947c5b3f90c4..a400b861fad3 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -327,10 +327,10 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, char *msg; struct blk_trace *bt; - if (count > BLK_TN_MAX_MSG) + if (count > BLK_TN_MAX_MSG - 1) return -EINVAL; - msg = kmalloc(count, GFP_KERNEL); + msg = kmalloc(count + 1, GFP_KERNEL); if (msg == NULL) return -ENOMEM; @@ -339,6 +339,7 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, return -EFAULT; } + msg[count] = '\0'; bt = filp->private_data; __trace_note_message(bt, "%s", msg); kfree(msg); -- cgit v1.2.3-71-gd317 From 7635b03adf3d7b84da7649b81efa91e6ebf11b85 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 3 Apr 2009 15:31:34 +0800 Subject: blktrace: small cleanup in blk_msg_write() Signed-off-by: Li Zefan Cc: Arnaldo Carvalho de Melo Cc: "Alan D. Brunelle" Cc: Jens Axboe LKML-Reference: <49D5BB56.7000807@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/blktrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index a400b861fad3..73d7860b72e2 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -327,7 +327,7 @@ static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, char *msg; struct blk_trace *bt; - if (count > BLK_TN_MAX_MSG - 1) + if (count >= BLK_TN_MAX_MSG) return -EINVAL; msg = kmalloc(count + 1, GFP_KERNEL); -- cgit v1.2.3-71-gd317 From e2494e1b42ebac402324105d57646489d19e2b01 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 2 Apr 2009 13:43:26 +0800 Subject: blktrace: fix pdu_len when tracing packet command requests Impact: output all of packet commands - not just the first 4 / 8 bytes Since commit d7e3c3249ef23b4617393c69fe464765b4ff1645 ("block: add large command support"), struct request->cmd has been changed from unsinged char cmd[BLK_MAX_CDB] to unsigned char *cmd. v1 -> v2: by: FUJITA Tomonori - make sure rq->cmd_len is always intialized, and then we can use rq->cmd_len instead of BLK_MAX_CDB. Signed-off-by: Li Zefan Acked-by: FUJITA Tomonori Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jens Axboe LKML-Reference: <49D4507E.2060602@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- block/blk-core.c | 1 + kernel/trace/blktrace.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/block/blk-core.c b/block/blk-core.c index 29bcfac6c688..859879d0a0bf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -132,6 +132,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; rq->ref_count = 1; } diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 73d7860b72e2..b32ff446c3fb 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -643,7 +643,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (blk_pc_request(rq)) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, - sizeof(rq->cmd), rq->cmd); + rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, -- cgit v1.2.3-71-gd317 From 9756b15e1b58453a6fd54b85c1ad8515209e10bb Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 30 Mar 2009 20:37:20 -0700 Subject: irq: fix cpumask memory leak on offstack cpumask kernels Need to free the old cpumask for affinity and pending_mask. Signed-off-by: Yinghai Lu Acked-by: Rusty Russell LKML-Reference: <49D18FF0.50707@kernel.org> Signed-off-by: Ingo Molnar --- include/linux/irq.h | 14 ++++++++++++++ kernel/irq/numa_migrate.c | 1 + 2 files changed, 15 insertions(+) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 974890b3c52f..99d147efe399 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -482,6 +482,16 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, #endif } +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + #else /* !CONFIG_SMP */ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, @@ -495,6 +505,10 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, { } +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} #endif /* CONFIG_SMP */ #endif /* _LINUX_IRQ_H */ diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 243d6121e50e..44bbdcbaf8d2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) { free_kstat_irqs(old_desc, desc); + free_desc_masks(old_desc, desc); arch_free_chip_data(old_desc, desc); } -- cgit v1.2.3-71-gd317 From 2e45e77787c9d0720b046eb69856edf43b17e33e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 7 Apr 2009 17:12:43 +0930 Subject: Revert "module: remove the SHF_ALLOC flag on the __versions section." This reverts commit 9cb610d8e35fe3ec95a2fe2030b02f85aeea83c1. This was an impressively stupid patch. Firstly, we reset the SHF_ALLOC flag lower down in the same function, so the patch was useless. Even better, find_sec() ignores sections with SHF_ALLOC not set, so it breaks CONFIG_MODVERSIONS=y with CONFIG_MODULE_FORCE_LOAD=n, which refuses to load the module since it can't find the __versions section. Signed-off-by: Rusty Russell --- kernel/module.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index c268a771595c..05f014efa32c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1952,9 +1952,6 @@ static noinline struct module *load_module(void __user *umod, if (strstarts(secstrings+sechdrs[i].sh_name, ".exit")) sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; #endif - /* Don't keep __versions around; it's just for loading. */ - if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0) - sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; } modindex = find_sec(hdr, sechdrs, secstrings, -- cgit v1.2.3-71-gd317 From 301fd748e2c81e78e74edbc694a64caa7b95dda2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 3 Apr 2009 11:12:23 -0400 Subject: tracing: remove CALLER_ADDR2 from wakeup tracer Maneesh Soni was getting a crash when running the wakeup tracer. We debugged it down to the recording of the function with the CALLER_ADDR2 macro. This is used to get the location of the caller to schedule. But the problem comes when schedule is called by assmebly. In the case that Maneesh had, retint_careful would call schedule. But retint_careful does not set up a proper frame pointer. CALLER_ADDR2 is defined as __builtin_return_address(2). This produces the following assembly in the wakeup tracer code. mov 0x0(%rbp),%rcx <--- get the frame pointer of the caller mov %r14d,%r8d mov 0xf2de8e(%rip),%rdi mov 0x8(%rcx),%rsi <-- this is __builtin_return_address(1) mov 0x28(%rdi,%rax,8),%rbx mov (%rcx),%rax <-- get the frame pointer of the caller's caller mov %r12,%rcx mov 0x8(%rax),%rdx <-- this is __builtin_return_address(2) At the reading of 0x8(%rax) Maneesh's machine would take a fault. The reason is that retint_careful did not set up the return address and the content of %rax here was zero. To verify this, I sent Maneesh a patch to create a frame pointer in retint_careful. He ran the test again but this time he would take the same type of fault from sysret_careful. The retint_careful was no longer an issue, but there are other callers that still have issues. Instead of adding frame pointers for all callers to schedule (in possibly all archs), it is much safer to simply not use CALLER_ADDR2. This loses out on knowing what called schedule, but the function tracer will help there if needed. Reported-by: Maneesh Soni Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_sched_wakeup.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 3c5ad6b2ec84..5bc00e8f153e 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -154,7 +154,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, if (unlikely(!tracer_enabled || next != wakeup_task)) goto out_unlock; - trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); + trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); /* @@ -257,6 +257,12 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) data = wakeup_trace->data[wakeup_cpu]; data->preempt_timestamp = ftrace_now(cpu); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); + + /* + * We must be careful in using CALLER_ADDR2. But since wake_up + * is not called by an assembly function (where as schedule is) + * it should be safe to use it here. + */ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: -- cgit v1.2.3-71-gd317 From cf8e3474654f20433aab9aa35826d43b5f245008 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 30 Mar 2009 13:48:00 +0800 Subject: tracing: fix incorrect return type of ns2usecs() Impact: fix time output bug in 32bits system ns2usecs() returns 'long', it's incorrect. (In i386) ... -0 [000] 521.442100: _spin_lock <-tick_do_update_jiffies64 -0 [000] 521.442101: do_timer <-tick_do_update_jiffies64 -0 [000] 521.442102: update_wall_time <-do_timer -0 [000] 521.442102: update_xtime_cache <-update_wall_time .... (It always print the time less than 2200 seconds besides ...) Because 'long' is 32bits in i386. ( (1<<31) useconds is about 2200 seconds) ... -0 [001] 4154502640.134759: rcu_bh_qsctr_inc <-__do_softirq -0 [001] 4154502640.134760: _local_bh_enable <-__do_softirq -0 [001] 4154502640.134761: idle_cpu <-irq_exit ... (very large value) Because 'long' is a signed type and it is 32bits in i386. Changes in v2: return 'unsigned long long' instead of 'cycle_t' Signed-off-by: Lai Jiangshan LKML-Reference: <49D05D10.4030009@cn.fujitsu.com> Reported-by: Li Zefan Acked-by: Frederic Weisbecker Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 3 +-- kernel/trace/trace.h | 2 +- kernel/trace/trace_output.c | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a0174a40c563..457dd8c97e0d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -147,8 +147,7 @@ static int __init set_ftrace_dump_on_oops(char *str) } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); -long -ns2usecs(cycle_t nsec) +unsigned long long ns2usecs(cycle_t nsec) { nsec += 500; do_div(nsec, 1000); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cb0ce3fc36d3..0d81a4a2a4a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -596,7 +596,7 @@ extern int trace_selftest_startup_branch(struct tracer *trace, #endif /* CONFIG_FTRACE_STARTUP_TEST */ extern void *head_page(struct trace_array_cpu *data); -extern long ns2usecs(cycle_t nsec); +extern unsigned long long ns2usecs(cycle_t nsec); extern int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); extern int diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index d72b9a63b247..64b54a59c55b 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -423,7 +423,7 @@ int trace_print_lat_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" + ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, entry->pid, iter->cpu, entry->flags, entry->preempt_count, iter->idx, -- cgit v1.2.3-71-gd317 From 5f0c6c03c5fee91c02c696bc9bf4c0d41392abe7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 27 Mar 2009 14:22:10 +0100 Subject: tracing/ftrace: fix missing include string.h Building a kernel with tracing can raise the following warning on tip/master: kernel/trace/trace.c:1249: error: implicit declaration of function 'vbin_printf' We are missing an include to string.h Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker LKML-Reference: <1238160130-7437-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 457dd8c97e0d..2230b46f9e1c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-71-gd317 From 8bcae09b93e7f96f700b6bb372c2b3f2b36636dc Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Tue, 31 Mar 2009 15:24:51 +0800 Subject: ftrace: Add check of sched_stopped for probe_sched_wakeup The wakeup tracing in sched_switch does not stop when a user disables tracing. This is because the probe_sched_wakeup() is missing the check to prevent the wakeup from being traced. Signed-off-by: Zhao Lei LKML-Reference: <49D1C543.3010307@cn.fujitsu.com> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_sched_switch.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index de35f200abd3..9117cea6f1ae 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c @@ -62,6 +62,9 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) pc = preempt_count(); tracing_record_cmdline(current); + if (sched_stopped) + return; + local_irq_save(flags); cpu = raw_smp_processor_id(); data = ctx_trace->data[cpu]; -- cgit v1.2.3-71-gd317 From b0dfa978c7a1699fb3506fbfcba0b6a5c4bd17ae Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 1 Apr 2009 22:53:08 +0200 Subject: tracing/ftrace: alloc the started cpumask for the trace file Impact: fix a crash while cat trace file Currently we are using a cpumask to remind each cpu where a trace occured. It lets us notice the user that a cpu just had its first trace. But on latest -tip we have the following crash once we cat the trace file: IP: [] print_trace_fmt+0x45/0xe7 *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP last sysfs file: /sys/class/net/eth0/carrier Pid: 3897, comm: cat Not tainted (2.6.29-tip-02825-g0f22972-dirty #81) EIP: 0060:[] EFLAGS: 00010297 CPU: 0 EIP is at print_trace_fmt+0x45/0xe7 EAX: 00000000 EBX: 00000000 ECX: c12d9e98 EDX: ccdb7010 ESI: d31f4000 EDI: 00322401 EBP: d31f3f10 ESP: d31f3efc DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Process cat (pid: 3897, ti=d31f2000 task=d3b3cf20 task.ti=d31f2000) Stack: d31f4080 ccdb7010 d31f4000 d691fe70 ccdb7010 d31f3f24 c0270e5c d31f4000 d691fe70 d31f4000 d31f3f34 c02718e8 c12d9e98 d691fe70 d31f3f70 c02bfc33 00001000 09130000 d3b46e00 d691fe98 00000000 00000079 00000001 00000000 Call Trace: [] ? print_trace_line+0x170/0x17c [] ? s_show+0xa7/0xbd [] ? seq_read+0x24a/0x327 [] ? seq_read+0x0/0x327 [] ? vfs_read+0x86/0xe1 [] ? sys_read+0x40/0x65 [] ? sysenter_do_call+0x12/0x3c Code: 00 00 00 89 45 ec f7 c7 00 20 00 00 89 55 f0 74 4e f6 86 98 10 00 00 02 74 45 8b 86 8c 10 00 00 8b 9e a8 10 00 00 e8 52 f3 ff ff <0f> a3 03 19 c0 85 c0 75 2b 8b 86 8c 10 00 00 8b 9e a8 10 00 00 EIP: [] print_trace_fmt+0x45/0xe7 SS:ESP 0068:d31f3efc CR2: 0000000000000000 ---[ end trace aa9cf38e5ebed9dd ]--- This is because we alloc the iter->started cpumask on tracing_pipe_open but not on tracing_open. It hadn't been noticed until now because we need to have ring buffer overruns to activate the starting of cpu buffer detection. Also, we need a check to not print the messagge for the first trace on the file. Signed-off-by: Frederic Weisbecker LKML-Reference: <1238619188-6109-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2230b46f9e1c..fc8c7d66832b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1632,7 +1632,11 @@ static void test_cpu_buff_start(struct trace_iterator *iter) return; cpumask_set_cpu(iter->cpu, iter->started); - trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); + + /* Don't print started cpu buffer for the first entry of the trace */ + if (iter->idx > 1) + trace_seq_printf(s, "##### CPU %u buffer started ####\n", + iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) @@ -1867,6 +1871,11 @@ __tracing_open(struct inode *inode, struct file *file) if (current_trace) *iter->trace = *current_trace; + if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) + goto fail; + + cpumask_clear(iter->started); + if (current_trace && current_trace->print_max) iter->tr = &max_tr; else @@ -1917,6 +1926,7 @@ __tracing_open(struct inode *inode, struct file *file) if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } + free_cpumask_var(iter->started); fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); @@ -1960,6 +1970,7 @@ static int tracing_release(struct inode *inode, struct file *file) seq_release(inode, file); mutex_destroy(&iter->mutex); + free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter); return 0; -- cgit v1.2.3-71-gd317 From bc2b6871c17b3aff79fb14e1a1c06c5f5a187f76 Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Mon, 23 Mar 2009 11:58:31 +0530 Subject: Update /debug/tracing/README Some of the tracers have been renamed, which was not updated in the in-kernel run-time README file. Update it. Signed-off-by: Nikanth Karthikesan LKML-Reference: <200903231158.32151.knikanth@suse.de> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fc8c7d66832b..9d28476a9851 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2369,9 +2369,9 @@ static const char readme_msg[] = "# mkdir /debug\n" "# mount -t debugfs nodev /debug\n\n" "# cat /debug/tracing/available_tracers\n" - "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" + "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" "# cat /debug/tracing/current_tracer\n" - "none\n" + "nop\n" "# echo sched_switch > /debug/tracing/current_tracer\n" "# cat /debug/tracing/current_tracer\n" "sched_switch\n" -- cgit v1.2.3-71-gd317 From 1bbe2a83ab68e5cf8c66c372c7cb3b51910c2cfe Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Fri, 3 Apr 2009 18:24:46 +0800 Subject: ftrace: Correct a text align for event format output If we cat debugfs/tracing/events/ftrace/bprint/format, we'll see: name: bprint ID: 6 format: field:unsigned char common_type; offset:0; size:1; field:unsigned char common_flags; offset:1; size:1; field:unsigned char common_preempt_count; offset:2; size:1; field:int common_pid; offset:4; size:4; field:int common_tgid; offset:8; size:4; field:unsigned long ip; offset:12; size:4; field:char * fmt; offset:16; size:4; field: char buf; offset:20; size:0; print fmt: "%08lx (%d) fmt:%p %s" There is an inconsistent blank before char buf. Signed-off-by: Zhao Lei LKML-Reference: <49D5E3EE.70201@cn.fujitsu.com> Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_export.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 4d9952d3df50..07a22c33ebf3 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -40,7 +40,7 @@ #undef TRACE_FIELD_ZERO_CHAR #define TRACE_FIELD_ZERO_CHAR(item) \ - ret = trace_seq_printf(s, "\tfield: char " #item ";\t" \ + ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \ "offset:%u;\tsize:0;\n", \ (unsigned int)offsetof(typeof(field), item)); \ if (!ret) \ -- cgit v1.2.3-71-gd317 From fafd688e4c0c34da0f3de909881117d374e4c7af Mon Sep 17 00:00:00 2001 From: Peter W Morreale Date: Mon, 6 Apr 2009 19:00:29 -0700 Subject: mm: add /proc controls for pdflush threads Add /proc entries to give the admin the ability to control the minimum and maximum number of pdflush threads. This allows finer control of pdflush on both large and small machines. The rationale is simply one size does not fit all. Admins on large and/or small systems may want to tune the min/max pdflush thread count to best suit their needs. Right now the min/max is hardcoded to 2/8. While probably a fair estimate for smaller machines, large machines with large numbers of CPUs and large numbers of filesystems/block devices may benefit from larger numbers of threads working on different block devices. Even if the background flushing algorithm is radically changed, it is still likely that multiple threads will be involved and admins would still desire finer control on the min/max other than to have to recompile the kernel. The patch adds '/proc/sys/vm/nr_pdflush_threads_min' and '/proc/sys/vm/nr_pdflush_threads_max' with r/w permissions. The minimum value for nr_pdflush_threads_min is 1 and the maximum value is the current value of nr_pdflush_threads_max. This minimum is required since additional thread creation is performed in a pdflush thread itself. The minimum value for nr_pdflush_threads_max is the current value of nr_pdflush_threads_min and the maximum value can be 1000. Documentation/sysctl/vm.txt is also updated. [akpm@linux-foundation.org: fix comment, fix whitespace, use __read_mostly] Signed-off-by: Peter W Morreale Reviewed-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 28 ++++++++++++++++++++++++++++ include/linux/writeback.h | 2 ++ kernel/sysctl.c | 23 +++++++++++++++++++++++ mm/pdflush.c | 31 +++++++++++++++++++------------ 4 files changed, 72 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 3197fc83bc51..97c4b3284329 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -39,6 +39,8 @@ Currently, these files are in /proc/sys/vm: - nr_hugepages - nr_overcommit_hugepages - nr_pdflush_threads +- nr_pdflush_threads_min +- nr_pdflush_threads_max - nr_trim_pages (only if CONFIG_MMU=n) - numa_zonelist_order - oom_dump_tasks @@ -463,6 +465,32 @@ The default value is 0. ============================================================== +nr_pdflush_threads_min + +This value controls the minimum number of pdflush threads. + +At boot time, the kernel will create and maintain 'nr_pdflush_threads_min' +threads for the kernel's lifetime. + +The default value is 2. The minimum value you can specify is 1, and +the maximum value is the current setting of 'nr_pdflush_threads_max'. + +See 'nr_pdflush_threads_max' below for more information. + +============================================================== + +nr_pdflush_threads_max + +This value controls the maximum number of pdflush threads that can be +created. The pdflush algorithm will create a new pdflush thread (up to +this maximum) if no pdflush threads have been available for >= 1 second. + +The default value is 8. The minimum value you can specify is the +current value of 'nr_pdflush_threads_min' and the +maximum is 1000. + +============================================================== + overcommit_memory: This value contains a flag that enables memory overcommitment. diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 93445477f86a..9c1ed1fb6ddb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -168,6 +168,8 @@ void writeback_set_ratelimit(void); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl read-only. */ +extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */ +extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */ #endif /* WRITEBACK_H */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b125e3387568..72eb1a41dcab 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -101,6 +101,7 @@ static int __maybe_unused one = 1; static int __maybe_unused two = 2; static unsigned long one_ul = 1; static int one_hundred = 100; +static int one_thousand = 1000; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; @@ -1026,6 +1027,28 @@ static struct ctl_table vm_table[] = { .mode = 0444 /* read-only*/, .proc_handler = &proc_dointvec, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "nr_pdflush_threads_min", + .data = &nr_pdflush_threads_min, + .maxlen = sizeof nr_pdflush_threads_min, + .mode = 0644 /* read-write */, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &one, + .extra2 = &nr_pdflush_threads_max, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "nr_pdflush_threads_max", + .data = &nr_pdflush_threads_max, + .maxlen = sizeof nr_pdflush_threads_max, + .mode = 0644 /* read-write */, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &nr_pdflush_threads_min, + .extra2 = &one_thousand, + }, { .ctl_name = VM_SWAPPINESS, .procname = "swappiness", diff --git a/mm/pdflush.c b/mm/pdflush.c index 235ac440c44e..f2caf96993f8 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -57,6 +57,14 @@ static DEFINE_SPINLOCK(pdflush_lock); */ int nr_pdflush_threads = 0; +/* + * The max/min number of pdflush threads. R/W by sysctl at + * /proc/sys/vm/nr_pdflush_threads_max/min + */ +int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS; +int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS; + + /* * The time at which the pdflush thread pool last went empty */ @@ -68,7 +76,7 @@ static unsigned long last_empty_jifs; * Thread pool management algorithm: * * - The minimum and maximum number of pdflush instances are bound - * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS. + * by nr_pdflush_threads_min and nr_pdflush_threads_max. * * - If there have been no idle pdflush instances for 1 second, create * a new one. @@ -134,14 +142,13 @@ static int __pdflush(struct pdflush_work *my_work) * To throttle creation, we reset last_empty_jifs. */ if (time_after(jiffies, last_empty_jifs + 1 * HZ)) { - if (list_empty(&pdflush_list)) { - if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) { - last_empty_jifs = jiffies; - nr_pdflush_threads++; - spin_unlock_irq(&pdflush_lock); - start_one_pdflush_thread(); - spin_lock_irq(&pdflush_lock); - } + if (list_empty(&pdflush_list) && + nr_pdflush_threads < nr_pdflush_threads_max) { + last_empty_jifs = jiffies; + nr_pdflush_threads++; + spin_unlock_irq(&pdflush_lock); + start_one_pdflush_thread(); + spin_lock_irq(&pdflush_lock); } } @@ -153,7 +160,7 @@ static int __pdflush(struct pdflush_work *my_work) */ if (list_empty(&pdflush_list)) continue; - if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) + if (nr_pdflush_threads <= nr_pdflush_threads_min) continue; pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) { @@ -259,9 +266,9 @@ static int __init pdflush_init(void) * Pre-set nr_pdflush_threads... If we fail to create, * the count will be decremented. */ - nr_pdflush_threads = MIN_PDFLUSH_THREADS; + nr_pdflush_threads = nr_pdflush_threads_min; - for (i = 0; i < MIN_PDFLUSH_THREADS; i++) + for (i = 0; i < nr_pdflush_threads_min; i++) start_one_pdflush_thread(); return 0; } -- cgit v1.2.3-71-gd317 From b918e5e60d775549478e4268155142156a95aa17 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 6 Apr 2009 19:00:58 -0700 Subject: kprobes: cleanup aggr_kprobe related code Currently, kprobes can disable all probes at once, but can't disable it individually (not unregister, just disable an kprobe, because unregistering needs to wait for scheduler synchronization). These patches introduce APIs for on-the-fly per-probe disabling and re-enabling by dis-arming/re-arming its breakpoint instruction. This patch: Change old_p to ap in add_new_kprobe() for readability, copy flags member in add_aggr_kprobe(), and simplify the code flow of register_aggr_kprobe(). Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 60 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5016bfb682b9..a55bfadfd766 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -518,20 +518,20 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) } /* -* Add the new probe to old_p->list. Fail if this is the +* Add the new probe to ap->list. Fail if this is the * second jprobe at the address - two jprobes can't coexist */ -static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) +static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) { if (p->break_handler) { - if (old_p->break_handler) + if (ap->break_handler) return -EEXIST; - list_add_tail_rcu(&p->list, &old_p->list); - old_p->break_handler = aggr_break_handler; + list_add_tail_rcu(&p->list, &ap->list); + ap->break_handler = aggr_break_handler; } else - list_add_rcu(&p->list, &old_p->list); - if (p->post_handler && !old_p->post_handler) - old_p->post_handler = aggr_post_handler; + list_add_rcu(&p->list, &ap->list); + if (p->post_handler && !ap->post_handler) + ap->post_handler = aggr_post_handler; return 0; } @@ -544,6 +544,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) copy_kprobe(p, ap); flush_insn_slot(ap); ap->addr = p->addr; + ap->flags = p->flags; ap->pre_handler = aggr_pre_handler; ap->fault_handler = aggr_fault_handler; /* We don't care the kprobe which has gone. */ @@ -566,44 +567,43 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) { int ret = 0; - struct kprobe *ap; + struct kprobe *ap = old_p; - if (kprobe_gone(old_p)) { + if (old_p->pre_handler != aggr_pre_handler) { + /* If old_p is not an aggr_probe, create new aggr_kprobe. */ + ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); + if (!ap) + return -ENOMEM; + add_aggr_kprobe(ap, old_p); + } + + if (kprobe_gone(ap)) { /* * Attempting to insert new probe at the same location that * had a probe in the module vaddr area which already * freed. So, the instruction slot has already been * released. We need a new slot for the new probe. */ - ret = arch_prepare_kprobe(old_p); + ret = arch_prepare_kprobe(ap); if (ret) + /* + * Even if fail to allocate new slot, don't need to + * free aggr_probe. It will be used next time, or + * freed by unregister_kprobe. + */ return ret; - } - if (old_p->pre_handler == aggr_pre_handler) { - copy_kprobe(old_p, p); - ret = add_new_kprobe(old_p, p); - ap = old_p; - } else { - ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); - if (!ap) { - if (kprobe_gone(old_p)) - arch_remove_kprobe(old_p); - return -ENOMEM; - } - add_aggr_kprobe(ap, old_p); - copy_kprobe(ap, p); - ret = add_new_kprobe(ap, p); - } - if (kprobe_gone(old_p)) { + /* Clear gone flag to prevent allocating new slot again. */ + ap->flags &= ~KPROBE_FLAG_GONE; /* * If the old_p has gone, its breakpoint has been disarmed. * We have to arm it again after preparing real kprobes. */ - ap->flags &= ~KPROBE_FLAG_GONE; if (kprobe_enabled) arch_arm_kprobe(ap); } - return ret; + + copy_kprobe(ap, p); + return add_new_kprobe(ap, p); } static int __kprobes in_kprobes_functions(unsigned long addr) -- cgit v1.2.3-71-gd317 From 99081ab553d6a88dd6b3774af5eef94dbb8faad7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 6 Apr 2009 19:00:59 -0700 Subject: kprobes: move EXPORT_SYMBOL_GPL just after function definitions Clean up positions of EXPORT_SYMBOL_GPL in kernel/kprobes.c according to checkpatch.pl. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index a55bfadfd766..ca4c22d78cfd 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -722,6 +722,7 @@ out: return ret; } +EXPORT_SYMBOL_GPL(register_kprobe); /* * Unregister a kprobe without a scheduler synchronization. @@ -803,11 +804,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num) } return ret; } +EXPORT_SYMBOL_GPL(register_kprobes); void __kprobes unregister_kprobe(struct kprobe *p) { unregister_kprobes(&p, 1); } +EXPORT_SYMBOL_GPL(unregister_kprobe); void __kprobes unregister_kprobes(struct kprobe **kps, int num) { @@ -826,6 +829,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num) if (kps[i]->addr) __unregister_kprobe_bottom(kps[i]); } +EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, @@ -865,16 +869,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) } return ret; } +EXPORT_SYMBOL_GPL(register_jprobes); int __kprobes register_jprobe(struct jprobe *jp) { return register_jprobes(&jp, 1); } +EXPORT_SYMBOL_GPL(register_jprobe); void __kprobes unregister_jprobe(struct jprobe *jp) { unregister_jprobes(&jp, 1); } +EXPORT_SYMBOL_GPL(unregister_jprobe); void __kprobes unregister_jprobes(struct jprobe **jps, int num) { @@ -894,6 +901,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num) __unregister_kprobe_bottom(&jps[i]->kp); } } +EXPORT_SYMBOL_GPL(unregister_jprobes); #ifdef CONFIG_KRETPROBES /* @@ -987,6 +995,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) free_rp_inst(rp); return ret; } +EXPORT_SYMBOL_GPL(register_kretprobe); int __kprobes register_kretprobes(struct kretprobe **rps, int num) { @@ -1004,11 +1013,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num) } return ret; } +EXPORT_SYMBOL_GPL(register_kretprobes); void __kprobes unregister_kretprobe(struct kretprobe *rp) { unregister_kretprobes(&rp, 1); } +EXPORT_SYMBOL_GPL(unregister_kretprobe); void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) { @@ -1030,24 +1041,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) } } } +EXPORT_SYMBOL_GPL(unregister_kretprobes); #else /* CONFIG_KRETPROBES */ int __kprobes register_kretprobe(struct kretprobe *rp) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(register_kretprobe); int __kprobes register_kretprobes(struct kretprobe **rps, int num) { return -ENOSYS; } +EXPORT_SYMBOL_GPL(register_kretprobes); + void __kprobes unregister_kretprobe(struct kretprobe *rp) { } +EXPORT_SYMBOL_GPL(unregister_kretprobe); void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) { } +EXPORT_SYMBOL_GPL(unregister_kretprobes); static int __kprobes pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) @@ -1418,16 +1435,5 @@ late_initcall(debugfs_kprobe_init); module_init(init_kprobes); -EXPORT_SYMBOL_GPL(register_kprobe); -EXPORT_SYMBOL_GPL(unregister_kprobe); -EXPORT_SYMBOL_GPL(register_kprobes); -EXPORT_SYMBOL_GPL(unregister_kprobes); -EXPORT_SYMBOL_GPL(register_jprobe); -EXPORT_SYMBOL_GPL(unregister_jprobe); -EXPORT_SYMBOL_GPL(register_jprobes); -EXPORT_SYMBOL_GPL(unregister_jprobes); +/* defined in arch/.../kernel/kprobes.c */ EXPORT_SYMBOL_GPL(jprobe_return); -EXPORT_SYMBOL_GPL(register_kretprobe); -EXPORT_SYMBOL_GPL(unregister_kretprobe); -EXPORT_SYMBOL_GPL(register_kretprobes); -EXPORT_SYMBOL_GPL(unregister_kretprobes); -- cgit v1.2.3-71-gd317 From e579abeb58eb4b8d7321c6eb44dd9e2d0cbaebaa Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 6 Apr 2009 19:01:01 -0700 Subject: kprobes: rename kprobe_enabled to kprobes_all_disarmed Rename kprobe_enabled to kprobes_all_disarmed and invert logic due to avoiding naming confusion from per-probe disabling. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kprobes.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ca4c22d78cfd..dae198b68e97 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -68,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; /* NOTE: change this value only with kprobe_mutex held */ -static bool kprobe_enabled; +static bool kprobes_all_disarmed; static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; @@ -598,7 +598,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, * If the old_p has gone, its breakpoint has been disarmed. * We have to arm it again after preparing real kprobes. */ - if (kprobe_enabled) + if (!kprobes_all_disarmed) arch_arm_kprobe(ap); } @@ -709,7 +709,7 @@ int __kprobes register_kprobe(struct kprobe *p) hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (kprobe_enabled) + if (!kprobes_all_disarmed) arch_arm_kprobe(p); out_unlock_text: @@ -751,7 +751,7 @@ valid_p: * enabled and not gone - otherwise, the breakpoint would * already have been removed. We save on flushing icache. */ - if (kprobe_enabled && !kprobe_gone(old_p)) { + if (!kprobes_all_disarmed && !kprobe_gone(old_p)) { mutex_lock(&text_mutex); arch_disarm_kprobe(p); mutex_unlock(&text_mutex); @@ -1190,8 +1190,8 @@ static int __init init_kprobes(void) } } - /* By default, kprobes are enabled */ - kprobe_enabled = true; + /* By default, kprobes are armed */ + kprobes_all_disarmed = false; err = arch_init_kprobes(); if (!err) @@ -1289,7 +1289,7 @@ static struct file_operations debugfs_kprobes_operations = { .release = seq_release, }; -static void __kprobes enable_all_kprobes(void) +static void __kprobes arm_all_kprobes(void) { struct hlist_head *head; struct hlist_node *node; @@ -1298,8 +1298,8 @@ static void __kprobes enable_all_kprobes(void) mutex_lock(&kprobe_mutex); - /* If kprobes are already enabled, just return */ - if (kprobe_enabled) + /* If kprobes are armed, just return */ + if (!kprobes_all_disarmed) goto already_enabled; mutex_lock(&text_mutex); @@ -1311,7 +1311,7 @@ static void __kprobes enable_all_kprobes(void) } mutex_unlock(&text_mutex); - kprobe_enabled = true; + kprobes_all_disarmed = false; printk(KERN_INFO "Kprobes globally enabled\n"); already_enabled: @@ -1319,7 +1319,7 @@ already_enabled: return; } -static void __kprobes disable_all_kprobes(void) +static void __kprobes disarm_all_kprobes(void) { struct hlist_head *head; struct hlist_node *node; @@ -1328,11 +1328,11 @@ static void __kprobes disable_all_kprobes(void) mutex_lock(&kprobe_mutex); - /* If kprobes are already disabled, just return */ - if (!kprobe_enabled) + /* If kprobes are already disarmed, just return */ + if (kprobes_all_disarmed) goto already_disabled; - kprobe_enabled = false; + kprobes_all_disarmed = true; printk(KERN_INFO "Kprobes globally disabled\n"); mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { @@ -1364,7 +1364,7 @@ static ssize_t read_enabled_file_bool(struct file *file, { char buf[3]; - if (kprobe_enabled) + if (!kprobes_all_disarmed) buf[0] = '1'; else buf[0] = '0'; @@ -1387,12 +1387,12 @@ static ssize_t write_enabled_file_bool(struct file *file, case 'y': case 'Y': case '1': - enable_all_kprobes(); + arm_all_kprobes(); break; case 'n': case 'N': case '0': - disable_all_kprobes(); + disarm_all_kprobes(); break; } -- cgit v1.2.3-71-gd317 From de5bd88d5a5cce3cacea904d3503e5ebdb3852a2 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 6 Apr 2009 19:01:02 -0700 Subject: kprobes: support per-kprobe disabling Add disable_kprobe() and enable_kprobe() to disable/enable kprobes temporarily. disable_kprobe() asynchronously disables probe handlers of specified kprobe. So, after calling it, some handlers can be called at a while. enable_kprobe() enables specified kprobe. aggr_pre_handler and aggr_post_handler check disabled probes. On the other hand aggr_break_handler and aggr_fault_handler don't check it because these handlers will be called while executing pre or post handlers and usually those help error handling. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kprobes.txt | 34 ++++++++-- include/linux/kprobes.h | 23 ++++++- kernel/kprobes.c | 167 ++++++++++++++++++++++++++++++++++++++-------- 3 files changed, 191 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 48b3de90eb1e..f609af242d6c 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -212,7 +212,9 @@ hit, Kprobes calls kp->pre_handler. After the probed instruction is single-stepped, Kprobe calls kp->post_handler. If a fault occurs during execution of kp->pre_handler or kp->post_handler, or during single-stepping of the probed instruction, Kprobes calls -kp->fault_handler. Any or all handlers can be NULL. +kp->fault_handler. Any or all handlers can be NULL. If kp->flags +is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, +so, it's handlers aren't hit until calling enable_kprobe(kp). NOTE: 1. With the introduction of the "symbol_name" field to struct kprobe, @@ -363,6 +365,22 @@ probes) in the specified array, they clear the addr field of those incorrect probes. However, other probes in the array are unregistered correctly. +4.7 disable_kprobe + +#include +int disable_kprobe(struct kprobe *kp); + +Temporarily disables the specified kprobe. You can enable it again by using +enable_kprobe(). You must specify the kprobe which has been registered. + +4.8 enable_kprobe + +#include +int enable_kprobe(struct kprobe *kp); + +Enables kprobe which has been disabled by disable_kprobe(). You must specify +the kprobe which has been registered. + 5. Kprobes Features and Limitations Kprobes allows multiple probes at the same address. Currently, @@ -500,10 +518,14 @@ the probe. If the probed function belongs to a module, the module name is also specified. Following columns show probe status. If the probe is on a virtual address that is no longer valid (module init sections, module virtual addresses that correspond to modules that've been unloaded), -such probes are marked with [GONE]. +such probes are marked with [GONE]. If the probe is temporarily disabled, +such probes are marked with [DISABLED]. -/debug/kprobes/enabled: Turn kprobes ON/OFF +/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. -Provides a knob to globally turn registered kprobes ON or OFF. By default, -all kprobes are enabled. By echoing "0" to this file, all registered probes -will be disarmed, till such time a "1" is echoed to this file. +Provides a knob to globally and forcibly turn registered kprobes ON or OFF. +By default, all kprobes are enabled. By echoing "0" to this file, all +registered probes will be disarmed, till such time a "1" is echoed to this +file. Note that this knob just disarms and arms all kprobes and doesn't +change each probe's disabling state. This means that disabled kprobes (marked +[DISABLED]) will be not enabled if you turn ON all kprobes by this knob. diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 39826a678364..1071cfddddc9 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -112,18 +112,28 @@ struct kprobe { /* copy of the original instruction */ struct arch_specific_insn ainsn; - /* Indicates various status flags. Protected by kprobe_mutex. */ + /* + * Indicates various status flags. + * Protected by kprobe_mutex after this kprobe is registered. + */ u32 flags; }; /* Kprobe status flags */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ +/* Has this kprobe gone ? */ static inline int kprobe_gone(struct kprobe *p) { return p->flags & KPROBE_FLAG_GONE; } +/* Is this kprobe disabled ? */ +static inline int kprobe_disabled(struct kprobe *p) +{ + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); +} /* * Special probe type that uses setjmp-longjmp type tricks to resume * execution at a specified entry with a matching prototype corresponding @@ -283,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); +int disable_kprobe(struct kprobe *kp); +int enable_kprobe(struct kprobe *kp); + #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -349,5 +362,13 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } +static inline int disable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +static inline int enable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} #endif /* CONFIG_KPROBES */ #endif /* _LINUX_KPROBES_H */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index dae198b68e97..a5e74ddee0e2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -328,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { - if (kp->pre_handler && !kprobe_gone(kp)) { + if (kp->pre_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); if (kp->pre_handler(kp, regs)) return 1; @@ -344,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, struct kprobe *kp; list_for_each_entry_rcu(kp, &p->list, list) { - if (kp->post_handler && !kprobe_gone(kp)) { + if (kp->post_handler && likely(!kprobe_disabled(kp))) { set_kprobe_instance(kp); kp->post_handler(kp, regs, flags); reset_kprobe_instance(); @@ -523,6 +523,7 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) */ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) { + BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); if (p->break_handler) { if (ap->break_handler) return -EEXIST; @@ -532,6 +533,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) list_add_rcu(&p->list, &ap->list); if (p->post_handler && !ap->post_handler) ap->post_handler = aggr_post_handler; + + if (kprobe_disabled(ap) && !kprobe_disabled(p)) { + ap->flags &= ~KPROBE_FLAG_DISABLED; + if (!kprobes_all_disarmed) + /* Arm the breakpoint again. */ + arch_arm_kprobe(ap); + } return 0; } @@ -592,20 +600,36 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, * freed by unregister_kprobe. */ return ret; - /* Clear gone flag to prevent allocating new slot again. */ - ap->flags &= ~KPROBE_FLAG_GONE; + /* - * If the old_p has gone, its breakpoint has been disarmed. - * We have to arm it again after preparing real kprobes. + * Clear gone flag to prevent allocating new slot again, and + * set disabled flag because it is not armed yet. */ - if (!kprobes_all_disarmed) - arch_arm_kprobe(ap); + ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) + | KPROBE_FLAG_DISABLED; } copy_kprobe(ap, p); return add_new_kprobe(ap, p); } +/* Try to disable aggr_kprobe, and return 1 if succeeded.*/ +static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) +{ + struct kprobe *kp; + + list_for_each_entry_rcu(kp, &p->list, list) { + if (!kprobe_disabled(kp)) + /* + * There is an active probe on the list. + * We can't disable aggr_kprobe. + */ + return 0; + } + p->flags |= KPROBE_FLAG_DISABLED; + return 1; +} + static int __kprobes in_kprobes_functions(unsigned long addr) { struct kprobe_blackpoint *kb; @@ -664,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p) return -EINVAL; } - p->flags = 0; + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ + p->flags &= KPROBE_FLAG_DISABLED; + /* * Check if are we probing a module. */ @@ -709,7 +735,7 @@ int __kprobes register_kprobe(struct kprobe *p) hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - if (!kprobes_all_disarmed) + if (!kprobes_all_disarmed && !kprobe_disabled(p)) arch_arm_kprobe(p); out_unlock_text: @@ -724,25 +750,37 @@ out: } EXPORT_SYMBOL_GPL(register_kprobe); -/* - * Unregister a kprobe without a scheduler synchronization. - */ -static int __kprobes __unregister_kprobe_top(struct kprobe *p) +/* Check passed kprobe is valid and return kprobe in kprobe_table. */ +static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) { struct kprobe *old_p, *list_p; old_p = get_kprobe(p->addr); if (unlikely(!old_p)) - return -EINVAL; + return NULL; if (p != old_p) { list_for_each_entry_rcu(list_p, &old_p->list, list) if (list_p == p) /* kprobe p is a valid probe */ - goto valid_p; - return -EINVAL; + goto valid; + return NULL; } -valid_p: +valid: + return old_p; +} + +/* + * Unregister a kprobe without a scheduler synchronization. + */ +static int __kprobes __unregister_kprobe_top(struct kprobe *p) +{ + struct kprobe *old_p, *list_p; + + old_p = __get_valid_kprobe(p); + if (old_p == NULL) + return -EINVAL; + if (old_p == p || (old_p->pre_handler == aggr_pre_handler && list_is_singular(&old_p->list))) { @@ -751,7 +789,7 @@ valid_p: * enabled and not gone - otherwise, the breakpoint would * already have been removed. We save on flushing icache. */ - if (!kprobes_all_disarmed && !kprobe_gone(old_p)) { + if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { mutex_lock(&text_mutex); arch_disarm_kprobe(p); mutex_unlock(&text_mutex); @@ -769,6 +807,11 @@ valid_p: } noclean: list_del_rcu(&p->list); + if (!kprobe_disabled(old_p)) { + try_to_disable_aggr_kprobe(old_p); + if (!kprobes_all_disarmed && kprobe_disabled(old_p)) + arch_disarm_kprobe(old_p); + } } return 0; } @@ -1078,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, static void __kprobes kill_kprobe(struct kprobe *p) { struct kprobe *kp; + p->flags |= KPROBE_FLAG_GONE; if (p->pre_handler == aggr_pre_handler) { /* @@ -1219,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, else kprobe_type = "k"; if (sym) - seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, - sym, offset, (modname ? modname : " "), - (kprobe_gone(p) ? "[GONE]" : "")); + seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", + p->addr, kprobe_type, sym, offset, + (modname ? modname : " "), + (kprobe_gone(p) ? "[GONE]" : ""), + ((kprobe_disabled(p) && !kprobe_gone(p)) ? + "[DISABLED]" : "")); else - seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, - (kprobe_gone(p) ? "[GONE]" : "")); + seq_printf(pi, "%p %s %p %s%s\n", + p->addr, kprobe_type, p->addr, + (kprobe_gone(p) ? "[GONE]" : ""), + ((kprobe_disabled(p) && !kprobe_gone(p)) ? + "[DISABLED]" : "")); } static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) @@ -1289,6 +1339,71 @@ static struct file_operations debugfs_kprobes_operations = { .release = seq_release, }; +/* Disable one kprobe */ +int __kprobes disable_kprobe(struct kprobe *kp) +{ + int ret = 0; + struct kprobe *p; + + mutex_lock(&kprobe_mutex); + + /* Check whether specified probe is valid. */ + p = __get_valid_kprobe(kp); + if (unlikely(p == NULL)) { + ret = -EINVAL; + goto out; + } + + /* If the probe is already disabled (or gone), just return */ + if (kprobe_disabled(kp)) + goto out; + + kp->flags |= KPROBE_FLAG_DISABLED; + if (p != kp) + /* When kp != p, p is always enabled. */ + try_to_disable_aggr_kprobe(p); + + if (!kprobes_all_disarmed && kprobe_disabled(p)) + arch_disarm_kprobe(p); +out: + mutex_unlock(&kprobe_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(disable_kprobe); + +/* Enable one kprobe */ +int __kprobes enable_kprobe(struct kprobe *kp) +{ + int ret = 0; + struct kprobe *p; + + mutex_lock(&kprobe_mutex); + + /* Check whether specified probe is valid. */ + p = __get_valid_kprobe(kp); + if (unlikely(p == NULL)) { + ret = -EINVAL; + goto out; + } + + if (kprobe_gone(kp)) { + /* This kprobe has gone, we couldn't enable it. */ + ret = -EINVAL; + goto out; + } + + if (!kprobes_all_disarmed && kprobe_disabled(p)) + arch_arm_kprobe(p); + + p->flags &= ~KPROBE_FLAG_DISABLED; + if (p != kp) + kp->flags &= ~KPROBE_FLAG_DISABLED; +out: + mutex_unlock(&kprobe_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(enable_kprobe); + static void __kprobes arm_all_kprobes(void) { struct hlist_head *head; @@ -1306,7 +1421,7 @@ static void __kprobes arm_all_kprobes(void) for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) - if (!kprobe_gone(p)) + if (!kprobe_disabled(p)) arch_arm_kprobe(p); } mutex_unlock(&text_mutex); @@ -1338,7 +1453,7 @@ static void __kprobes disarm_all_kprobes(void) for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) { - if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) + if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) arch_disarm_kprobe(p); } } -- cgit v1.2.3-71-gd317 From 6279a751fe096a21dc7704e918d570d3ff06e769 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 27 Mar 2009 01:06:07 +0100 Subject: posix-timers: fix RLIMIT_CPU && fork() See http://bugzilla.kernel.org/show_bug.cgi?id=12911 copy_signal() copies signal->rlim, but RLIMIT_CPU is "lost". Because posix_cpu_timers_init_group() sets cputime_expires.prof_exp = 0 and thus fastpath_timer_check() returns false unless we have other expired cpu timers. Change copy_signal() to set cputime_expires.prof_exp if we have RLIMIT_CPU. Also, set cputimer.running = 1 in that case. This is not strictly necessary, but imho makes sense. Reported-by: Peter Lojkin Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Peter Lojkin Cc: Roland McGrath Cc: stable@kernel.org LKML-Reference: <20090327000607.GA10104@redhat.com> Signed-off-by: Ingo Molnar --- kernel/fork.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 4854c2c4a82e..9b51a1b190d4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) sig->cputime_expires.virt_exp = cputime_zero; sig->cputime_expires.sched_exp = 0; + if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { + sig->cputime_expires.prof_exp = + secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); + sig->cputimer.running = 1; + } + /* The timer lists. */ INIT_LIST_HEAD(&sig->cpu_timers[0]); INIT_LIST_HEAD(&sig->cpu_timers[1]); @@ -823,11 +829,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) atomic_inc(¤t->signal->live); return 0; } - sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); - - if (sig) - posix_cpu_timers_init_group(sig); + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; @@ -865,6 +868,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); + posix_cpu_timers_init_group(sig); + acct_init_pacct(&sig->pacct); tty_audit_fork(sig); -- cgit v1.2.3-71-gd317 From 8f2e586567b1bad72dac7c3810fe9a2ef7117506 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 27 Mar 2009 01:06:10 +0100 Subject: posix-timers: fix RLIMIT_CPU && setitimer(CPUCLOCK_PROF) update_rlimit_cpu() tries to optimize out set_process_cpu_timer() in case when we already have CPUCLOCK_PROF timer which should expire first. But it uses cputime_lt() instead of cputime_gt(). Test case: int main(void) { struct itimerval it = { .it_value = { .tv_sec = 1000 }, }; assert(!setitimer(ITIMER_PROF, &it, NULL)); struct rlimit rl = { .rlim_cur = 1, .rlim_max = 1, }; assert(!setrlimit(RLIMIT_CPU, &rl)); for (;;) ; return 0; } Without this patch, the task is not killed as RLIMIT_CPU demands. Signed-off-by: Oleg Nesterov Acked-by: Peter Zijlstra Cc: Peter Lojkin Cc: Roland McGrath Cc: stable@kernel.org LKML-Reference: <20090327000610.GA10108@redhat.com> Signed-off-by: Ingo Molnar --- kernel/posix-cpu-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 8e5d9a68b022..bb53185d8c78 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new) cputime = secs_to_cputime(rlim_new); if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || - cputime_lt(current->signal->it_prof_expires, cputime)) { + cputime_gt(current->signal->it_prof_expires, cputime)) { spin_lock_irq(¤t->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(¤t->sighand->siglock); -- cgit v1.2.3-71-gd317 From 3a709703538c471530405556dda136fd0d82b0dc Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 7 Apr 2009 23:21:06 -0700 Subject: ptrace: some checkpatch fixes This fixes all the checkpatch --file complaints about kernel/ptrace.c and also removes an unused #include. I've verified that there are no changes to the compiled code on x86_64. Signed-off-by: Roland McGrath [ Removed the parts that just split a line - Linus ] Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index aaad0ec34194..64191fa09b7e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -21,9 +21,7 @@ #include #include #include - -#include -#include +#include /* @@ -48,7 +46,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) list_add(&child->ptrace_entry, &new_parent->ptraced); child->parent = new_parent; } - + /* * Turn a tracing stop into a normal stop now, since with no tracer there * would be no way to wake it up with SIGCONT or SIGKILL. If there was a @@ -173,7 +171,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) task_lock(task); err = __ptrace_may_access(task, mode); task_unlock(task); - return (!err ? true : false); + return !err; } int ptrace_attach(struct task_struct *task) @@ -358,7 +356,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst copied += retval; src += retval; dst += retval; - len -= retval; + len -= retval; } return copied; } @@ -383,7 +381,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds copied += retval; src += retval; dst += retval; - len -= retval; + len -= retval; } return copied; } @@ -496,9 +494,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data) if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); - } - else + } else { user_disable_single_step(child); + } child->exit_code = data; wake_up_process(child); -- cgit v1.2.3-71-gd317 From 3217ab97f14c5c8f9f975ed8c40c351164b0b10e Mon Sep 17 00:00:00 2001 From: Vitaliy Gusev Date: Thu, 9 Apr 2009 09:50:35 -0600 Subject: kthread: Don't looking for a task in create_kthread() #2 Remove the unnecessary find_task_by_pid_ns(). kthread() can just use "current" to get the same result. Signed-off-by: Vitaliy Gusev Acked-by: Oleg Nesterov Signed-off-by: Rusty Russell --- kernel/kthread.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index 84bbadd4d021..c013bf000ce6 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -76,6 +76,7 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); + create->result = current; complete(&create->started); schedule(); @@ -101,9 +102,6 @@ static void create_kthread(struct kthread_create_info *create) } else { struct sched_param param = { .sched_priority = 0 }; wait_for_completion(&create->started); - read_lock(&tasklist_lock); - create->result = find_task_by_pid_ns(pid, &init_pid_ns); - read_unlock(&tasklist_lock); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. -- cgit v1.2.3-71-gd317 From 1c99315bb36b5d776210546d438ca928dc9b1f22 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Apr 2009 09:50:36 -0600 Subject: kthread: move sched-realeted initialization from kthreadd context kthreadd is the single thread which implements ths "create" request, move sched_setscheduler/etc from create_kthread() to kthread_create() to improve the scalability. We should be careful with sched_setscheduler(), use _nochek helper. Signed-off-by: Oleg Nesterov Cc: Christoph Hellwig Cc: "Eric W. Biederman" Cc: Ingo Molnar Cc: Pavel Emelyanov Cc: Vitaliy Gusev Signed-off-by: Rusty Russell --- kernel/kthread.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index c013bf000ce6..4ebaf8519abf 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -97,19 +97,10 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); - if (pid < 0) { + if (pid < 0) create->result = ERR_PTR(pid); - } else { - struct sched_param param = { .sched_priority = 0 }; + else wait_for_completion(&create->started); - /* - * root may have changed our (kthreadd's) priority or CPU mask. - * The kernel thread should not inherit these properties. - */ - sched_setscheduler(create->result, SCHED_NORMAL, ¶m); - set_user_nice(create->result, KTHREAD_NICE_LEVEL); - set_cpus_allowed_ptr(create->result, cpu_all_mask); - } complete(&create->done); } @@ -152,11 +143,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), wait_for_completion(&create.done); if (!IS_ERR(create.result)) { + struct sched_param param = { .sched_priority = 0 }; va_list args; + va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); + /* + * root may have changed our (kthreadd's) priority or CPU mask. + * The kernel thread should not inherit these properties. + */ + sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); + set_user_nice(create.result, KTHREAD_NICE_LEVEL); + set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } -- cgit v1.2.3-71-gd317 From 6b44003e5ca66a3fffeb5bc90f40ada2c4340896 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Apr 2009 09:50:37 -0600 Subject: work_on_cpu(): rewrite it to create a kernel thread on demand Impact: circular locking bugfix The various implemetnations and proposed implemetnations of work_on_cpu() are vulnerable to various deadlocks because they all used queues of some form. Unrelated pieces of kernel code thus gained dependencies wherein if one work_on_cpu() caller holds a lock which some other work_on_cpu() callback also takes, the kernel could rarely deadlock. Fix this by creating a short-lived kernel thread for each work_on_cpu() invokation. This is not terribly fast, but the only current caller of work_on_cpu() is pci_call_probe(). It would be nice to find some other way of doing the node-local allocations in the PCI probe code so that we can zap work_on_cpu() altogether. The code there is rather nasty. I can't think of anything simple at this time... Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Rusty Russell --- kernel/workqueue.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6b966ce1451..f71fb2a08950 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -966,20 +966,20 @@ undo: } #ifdef CONFIG_SMP -static struct workqueue_struct *work_on_cpu_wq __read_mostly; struct work_for_cpu { - struct work_struct work; + struct completion completion; long (*fn)(void *); void *arg; long ret; }; -static void do_work_for_cpu(struct work_struct *w) +static int do_work_for_cpu(void *_wfc) { - struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); - + struct work_for_cpu *wfc = _wfc; wfc->ret = wfc->fn(wfc->arg); + complete(&wfc->completion); + return 0; } /** @@ -990,17 +990,23 @@ static void do_work_for_cpu(struct work_struct *w) * * This will return the value @fn returns. * It is up to the caller to ensure that the cpu doesn't go offline. + * The caller must not hold any locks which would prevent @fn from completing. */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { - struct work_for_cpu wfc; - - INIT_WORK(&wfc.work, do_work_for_cpu); - wfc.fn = fn; - wfc.arg = arg; - queue_work_on(cpu, work_on_cpu_wq, &wfc.work); - flush_work(&wfc.work); - + struct task_struct *sub_thread; + struct work_for_cpu wfc = { + .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), + .fn = fn, + .arg = arg, + }; + + sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); + if (IS_ERR(sub_thread)) + return PTR_ERR(sub_thread); + kthread_bind(sub_thread, cpu); + wake_up_process(sub_thread); + wait_for_completion(&wfc.completion); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); @@ -1016,8 +1022,4 @@ void __init init_workqueues(void) hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); -#ifdef CONFIG_SMP - work_on_cpu_wq = create_workqueue("work_on_cpu"); - BUG_ON(!work_on_cpu_wq); -#endif } -- cgit v1.2.3-71-gd317 From 47788c58e66c050982241d9a05eb690daceb05a9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 8 Apr 2009 20:40:59 +0200 Subject: tracing/syscalls: use a dedicated file header Impact: fix build warnings and possibe compat misbehavior on IA64 Building a kernel on ia64 might trigger these ugly build warnings: CC arch/ia64/ia32/sys_ia32.o In file included from arch/ia64/ia32/sys_ia32.c:55: arch/ia64/ia32/ia32priv.h:290:1: warning: "elf_check_arch" redefined In file included from include/linux/elf.h:7, from include/linux/module.h:14, from include/linux/ftrace.h:8, from include/linux/syscalls.h:68, from arch/ia64/ia32/sys_ia32.c:18: arch/ia64/include/asm/elf.h:19:1: warning: this is the location of the previous definition [...] sys_ia32.c includes linux/syscalls.h which in turn includes linux/ftrace.h to import the syscalls tracing prototypes. But including ftrace.h can pull too much things for a low level file, especially on ia64 where the ia32 private headers conflict with higher level headers. Now we isolate the syscall tracing headers in their own lightweight file. Reported-by: Tony Luck Tested-by: Tony Luck Signed-off-by: Frederic Weisbecker Acked-by: Tony Luck Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Jason Baron Cc: "Frank Ch. Eigler" Cc: Mathieu Desnoyers Cc: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Jiaying Zhang Cc: Michael Rubin Cc: Martin Bligh Cc: Michael Davidson LKML-Reference: <20090408184058.GB6017@nowhere> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ftrace.c | 2 ++ arch/x86/kernel/ptrace.c | 3 ++- include/linux/ftrace.h | 29 ----------------------------- include/linux/syscalls.h | 2 +- include/trace/syscall.h | 35 +++++++++++++++++++++++++++++++++++ kernel/trace/trace_syscalls.c | 2 +- 6 files changed, 41 insertions(+), 32 deletions(-) create mode 100644 include/trace/syscall.h (limited to 'kernel') diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 70a10ca100f6..18dfa30795c9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -18,6 +18,8 @@ #include #include +#include + #include #include #include diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index fe9345c967de..23b7c8f017e2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -35,6 +34,8 @@ #include #include +#include + #include "tls.h" enum x86_regset { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ff112a872d75..8a0c2f221e6b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -511,33 +511,4 @@ static inline void trace_hw_branch_oops(void) {} #endif /* CONFIG_HW_BRANCH_TRACER */ -/* - * A syscall entry in the ftrace syscalls array. - * - * @name: name of the syscall - * @nb_args: number of parameters it takes - * @types: list of types as strings - * @args: list of args as strings (args[i] matches types[i]) - */ -struct syscall_metadata { - const char *name; - int nb_args; - const char **types; - const char **args; -}; - -#ifdef CONFIG_FTRACE_SYSCALLS -extern void arch_init_ftrace_syscalls(void); -extern struct syscall_metadata *syscall_nr_to_meta(int nr); -extern void start_ftrace_syscalls(void); -extern void stop_ftrace_syscalls(void); -extern void ftrace_syscall_enter(struct pt_regs *regs); -extern void ftrace_syscall_exit(struct pt_regs *regs); -#else -static inline void start_ftrace_syscalls(void) { } -static inline void stop_ftrace_syscalls(void) { } -static inline void ftrace_syscall_enter(struct pt_regs *regs) { } -static inline void ftrace_syscall_exit(struct pt_regs *regs) { } -#endif - #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 6470f74074af..dabe4ad89141 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -65,7 +65,7 @@ struct old_linux_dirent; #include #include #include -#include +#include #define __SC_DECL1(t1, a1) t1 a1 #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) diff --git a/include/trace/syscall.h b/include/trace/syscall.h new file mode 100644 index 000000000000..8cfe515cbc47 --- /dev/null +++ b/include/trace/syscall.h @@ -0,0 +1,35 @@ +#ifndef _TRACE_SYSCALL_H +#define _TRACE_SYSCALL_H + +#include + +/* + * A syscall entry in the ftrace syscalls array. + * + * @name: name of the syscall + * @nb_args: number of parameters it takes + * @types: list of types as strings + * @args: list of args as strings (args[i] matches types[i]) + */ +struct syscall_metadata { + const char *name; + int nb_args; + const char **types; + const char **args; +}; + +#ifdef CONFIG_FTRACE_SYSCALLS +extern void arch_init_ftrace_syscalls(void); +extern struct syscall_metadata *syscall_nr_to_meta(int nr); +extern void start_ftrace_syscalls(void); +extern void stop_ftrace_syscalls(void); +extern void ftrace_syscall_enter(struct pt_regs *regs); +extern void ftrace_syscall_exit(struct pt_regs *regs); +#else +static inline void start_ftrace_syscalls(void) { } +static inline void stop_ftrace_syscalls(void) { } +static inline void ftrace_syscall_enter(struct pt_regs *regs) { } +static inline void ftrace_syscall_exit(struct pt_regs *regs) { } +#endif + +#endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a2a3af29c943..5e579645ac86 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -1,5 +1,5 @@ +#include #include -#include #include #include "trace_output.h" -- cgit v1.2.3-71-gd317 From 9eb85125ce218a8b8d9a7c982510388e227adbec Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 9 Apr 2009 11:19:40 +0800 Subject: blktrace: pass the right pointer to kfree() Impact: fix kfree crash with non-standard act_mask string If passing a string with leading white spaces to strstrip(), the returned ptr != the original ptr. This bug was introduced by me. Signed-off-by: Li Zefan Cc: Jens Axboe Cc: Arnaldo Carvalho de Melo LKML-Reference: <49DD694C.8020902@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/blktrace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b32ff446c3fb..921ef5d1f0ba 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1377,12 +1377,12 @@ static int blk_trace_str2mask(const char *str) { int i; int mask = 0; - char *s, *token; + char *buf, *s, *token; - s = kstrdup(str, GFP_KERNEL); - if (s == NULL) + buf = kstrdup(str, GFP_KERNEL); + if (buf == NULL) return -ENOMEM; - s = strstrip(s); + s = strstrip(buf); while (1) { token = strsep(&s, ","); @@ -1403,7 +1403,7 @@ static int blk_trace_str2mask(const char *str) break; } } - kfree(s); + kfree(buf); return mask; } -- cgit v1.2.3-71-gd317 From 36cd3c9f925b9307236505ae7ad1ad7ac4d4357c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 9 Apr 2009 18:48:34 +0200 Subject: mutex: have non-spinning mutexes on s390 by default Impact: performance regression fix for s390 The adaptive spinning mutexes will not always do what one would expect on virtualized architectures like s390. Especially the cpu_relax() loop in mutex_spin_on_owner might hurt if the mutex holding cpu has been scheduled away by the hypervisor. We would end up in a cpu_relax() loop when there is no chance that the state of the mutex changes until the target cpu has been scheduled again by the hypervisor. For that reason we should change the default behaviour to no-spin on s390. We do have an instruction which allows to yield the current cpu in favour of a different target cpu. Also we have an instruction which allows us to figure out if the target cpu is physically backed. However we need to do some performance tests until we can come up with a solution that will do the right thing on s390. Signed-off-by: Heiko Carstens Acked-by: Peter Zijlstra Cc: Martin Schwidefsky Cc: Christian Borntraeger LKML-Reference: <20090409184834.7a0df7b2@osiris.boeblingen.de.ibm.com> Signed-off-by: Ingo Molnar --- arch/Kconfig | 3 +++ arch/s390/Kconfig | 1 + kernel/mutex.c | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/arch/Kconfig b/arch/Kconfig index dc81b34c5d82..78a35e9dc104 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -109,3 +109,6 @@ config HAVE_CLK config HAVE_DMA_API_DEBUG bool + +config HAVE_DEFAULT_NO_SPIN_MUTEXES + bool diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index dcb667c4375a..2eca5fe0e75b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,7 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES select HAVE_KRETPROBES diff --git a/kernel/mutex.c b/kernel/mutex.c index 5d79781394a3..507cf2b5e9f1 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, preempt_disable(); mutex_acquire(&lock->dep_map, subclass, 0, ip); -#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ + !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) /* * Optimistic spinning. * -- cgit v1.2.3-71-gd317 From d1e7e02f30be672c6f6ee40908be83877a0d49d1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:16:56 +0800 Subject: tracing: disable seeking for trace_pipe_raw Impact: disable pread() We set tracing_buffers_fops.llseek to no_llseek, but we can still perform pread() to read this file. That is not expected. This fix uses nonseekable_open() to disable it. tracing_buffers_fops.llseek is still set to no_llseek, it mark this file is a "non-seekable device" and is used by sys_splice(). See also do_splice() or manual of splice(2): ERRORS EINVAL Target file system doesn't support splicing; neither of the descriptors refers to a pipe; or offset given for non-seekable device. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46668.8030806@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d28476a9851..24b0168f1a29 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3285,7 +3285,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) filp->private_data = info; - return 0; + return nonseekable_open(inode, filp); out: kfree(info); -- cgit v1.2.3-71-gd317 From ddd538f3e6a1a4bec2f6942f83a753263e6577b4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:16:59 +0800 Subject: tracing: allocate page when needed Impact: Cleanup Sometimes, we open trace_pipe_raw, but we don't read(2) it, we just splice(2) it, thus, the page is not used. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D4666B.4010608@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 24b0168f1a29..8e189ffb899a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) info->tr = &global_trace; info->cpu = cpu; - info->spare = ring_buffer_alloc_read_page(info->tr->buffer); + info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; - if (!info->spare) - goto out; filp->private_data = info; return nonseekable_open(inode, filp); - - out: - kfree(info); - return -ENOMEM; } static ssize_t @@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, if (!count) return 0; + if (!info->spare) + info->spare = ring_buffer_alloc_read_page(info->tr->buffer); + if (!info->spare) + return -ENOMEM; + /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; @@ -3342,7 +3341,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; - ring_buffer_free_read_page(info->tr->buffer, info->spare); + if (info->spare) + ring_buffer_free_read_page(info->tr->buffer, info->spare); kfree(info); return 0; -- cgit v1.2.3-71-gd317 From c7625a555f55d7ae49236cde551786c88f5a5ce1 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:17:04 +0800 Subject: tracing: update file->f_pos when splice(2) it Impact: Cleanup These two lines: if (unlikely(*ppos)) return -ESPIPE; in tracing_buffers_splice_read() are not needed, VFS layer has disabled seek(2). We remove these two lines, and then we can update file->f_pos. And tracing_buffers_read() updates file->f_pos, this fix make tracing_buffers_splice_read() updates file->f_pos too. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46670.4010503@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8e189ffb899a..94629760dabf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3428,13 +3428,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int size, i; size_t ret; - /* - * We can't seek on a buffer input - */ - if (unlikely(*ppos)) - return -ESPIPE; - - for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { struct page *page; int r; @@ -3474,6 +3467,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; + *ppos += size; } spd.nr_pages = i; -- cgit v1.2.3-71-gd317 From 93cfb3c9fd83d877a8f1ffad9ff862b617b32828 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 15:17:08 +0800 Subject: tracing: fix splice return too large I got these from strace: splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 12288 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 16384 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 splice(0x3, 0, 0x5, 0, 0x1000, 0x1) = 8192 I wanted to splice_read 4096 bytes, but it returns 8192 or larger. It is because the return value of tracing_buffers_splice_read() does not include "zero out any left over data" bytes. But tracing_buffers_read() includes these bytes, we make them consistent. Signed-off-by: Lai Jiangshan Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49D46674.9030804@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 94629760dabf..1ce5dc6372b8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3428,7 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int size, i; size_t ret; - for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { + if (*ppos & (PAGE_SIZE - 1)) { + WARN_ONCE(1, "Ftrace: previous read must page-align\n"); + return -EINVAL; + } + + if (len & (PAGE_SIZE - 1)) { + WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); + if (len < PAGE_SIZE) + return -EINVAL; + len &= PAGE_MASK; + } + + for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { struct page *page; int r; @@ -3467,7 +3479,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; - *ppos += size; + *ppos += PAGE_SIZE; } spd.nr_pages = i; -- cgit v1.2.3-71-gd317 From 4d1f4372dbea068ba4ee3d98231133a4a4ee15bd Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 10 Apr 2009 08:48:36 +0800 Subject: tracing: fix document references When moving documents to Documentation/trace/, I forgot to grep Kconfig to find out those references. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Pekka Enberg Cc: Pekka Paalanen Cc: eduard.munteanu@linux360.ro LKML-Reference: <49DE97EF.7080208@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2246141bda4d..417d1985e299 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -312,7 +312,7 @@ config KMEMTRACE and profile kernel code. This requires an userspace application to use. See - Documentation/vm/kmemtrace.txt for more information. + Documentation/trace/kmemtrace.txt for more information. Saying Y will make the kernel somewhat larger and slower. However, if you disable kmemtrace at run-time or boot-time, the performance @@ -403,7 +403,7 @@ config MMIOTRACE implementation and works via page faults. Tracing is disabled by default and can be enabled at run-time. - See Documentation/tracers/mmiotrace.txt. + See Documentation/trace/mmiotrace.txt. If you are not helping to develop drivers, say N. config MMIOTRACE_TEST -- cgit v1.2.3-71-gd317 From 0462b5664b2bda5a18fef7efb5bb32ce36590c1a Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Wed, 8 Apr 2009 17:00:13 +0800 Subject: ftrace: Output REC->var instead of __entry->var for trace format print fmt: "irq=%d return=%s", __entry->irq, __entry->ret ? \"handled\" : \"unhandled\" "__entry" should be convert to "REC" by __stringify() macro. Signed-off-by: Zhao Lei Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49DC679D.2090901@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_stage_2.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index 30743f7d4110..d363c6672c6c 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h @@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ return 0; #undef __entry -#define __entry "REC" +#define __entry REC #undef TP_printk -#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args +#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) #undef TP_fast_assign #define TP_fast_assign(args...) args -- cgit v1.2.3-71-gd317 From d6de2c80e9d758d2e36c21699117db6178c0f517 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 10 Apr 2009 12:17:41 -0700 Subject: async: Fix module loading async-work regression Several drivers use asynchronous work to do device discovery, and we synchronize with them in the compiled-in case before we actually try to mount root filesystems etc. However, when compiled as modules, that synchronization is missing - the module loading completes, but the driver hasn't actually finished probing for devices, and that means that any user mode that expects to use the devices after the 'insmod' is now potentially broken. We already saw one case of a similar issue in the ACPI battery code, where the kernel itself expected the module to be all done, and unmapped the init memory - but the async device discovery was still running. That got hacked around by just removing the "__init" (see commit 5d38258ec026921a7b266f4047ebeaa75db358e5 "ACPI battery: fix async boot oops"), but the real fix is to just make the module loading wait for all async work to be completed. It will slow down module loading, but since common devices should be built in anyway, and since the bug is really annoying and hard to handle from user space (and caused several S3 resume regressions), the simple fix to wait is the right one. This fixes at least http://bugzilla.kernel.org/show_bug.cgi?id=13063 but probably a few other bugzilla entries too (12936, for example), and is confirmed to fix Rafael's storage driver breakage after resume bug report (no bugzilla entry). We should also be able to now revert that ACPI battery fix. Reported-and-tested-by: Rafael J. Wysocki Tested-by: Heinz Diehl Acked-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- kernel/module.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 05f014efa32c..e797812a4d95 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); + /* We need to finish all async code before the module init sequence is done */ + async_synchronize_full(); + mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); -- cgit v1.2.3-71-gd317 From 8433a40eb7f2c4883ad57f9900f63e4d59240eb7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:18 +0800 Subject: tracing/filters: NIL-terminate user input filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure messages from user space are NIL-terminated strings, otherwise we could dump random memory while reading filter file. Try this: # echo 'parent_comm ==' > events/sched/sched_process_fork/filter # cat events/sched/sched_process_fork/filter parent_comm == � Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C32.6060508@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 64ec4d278ffb..054bc1802bcd 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; + buf[cnt] = '\0'; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) @@ -569,6 +570,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; + buf[cnt] = '\0'; pred = kzalloc(sizeof(*pred), GFP_KERNEL); if (!pred) -- cgit v1.2.3-71-gd317 From bcabd91c271e50eebc0cb9220ac92700332b452e Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:35 +0800 Subject: tracing/filters: fix NULL pointer dereference Try this, and you'll see NULL pointer dereference bug: # echo -n 'parent_comm ==' > sched/sched_process_fork/filter Because we passed NULL ptr to simple_strtoull(). Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C43.1050504@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 026be412f356..9d2162fd2305 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -410,6 +410,11 @@ int filter_parse(char **pbuf, struct filter_pred *pred) } } + if (!val_str) { + pred->field_name = NULL; + return -EINVAL; + } + pred->field_name = kstrdup(pred->field_name, GFP_KERNEL); if (!pred->field_name) return -ENOMEM; -- cgit v1.2.3-71-gd317 From a3e0ab050774117d4a6173087c8bf3888662a83f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:52:51 +0800 Subject: tracing/filters: allow user input integer to be oct or hex Before patch: # echo 'parent_pid == 0x10' > events/sched/sched_process_fork/filter # cat sched/sched_process_fork/filter parent_pid == 0 After patch: # cat sched/sched_process_fork/filter parent_pid == 16 Also check the input more strictly. Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04C53.4010600@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9d2162fd2305..49b3ef54ec46 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -419,12 +419,13 @@ int filter_parse(char **pbuf, struct filter_pred *pred) if (!pred->field_name) return -ENOMEM; - pred->val = simple_strtoull(val_str, &tmp, 10); + pred->val = simple_strtoull(val_str, &tmp, 0); if (tmp == val_str) { pred->str_val = kstrdup(val_str, GFP_KERNEL); if (!pred->str_val) return -ENOMEM; - } + } else if (*tmp != '\0') + return -EINVAL; return 0; } -- cgit v1.2.3-71-gd317 From 44e9c8b7adc52079f0535f9de0c2c2477831389b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Sat, 11 Apr 2009 15:55:28 +0800 Subject: tracing/filters: return proper error code when writing filter file - propagate return value of filter_add_pred() to the user - return -ENOSPC but not -ENOMEM or -EINVAL when the filter array is full Signed-off-by: Li Zefan Acked-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <49E04CF0.3010105@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events.c | 10 ++++++---- kernel/trace/trace_events_filter.c | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 054bc1802bcd..576f4fa2af0d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -521,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } - if (filter_add_pred(call, pred)) { + err = filter_add_pred(call, pred); + if (err < 0) { filter_free_pred(pred); - return -EINVAL; + return err; } *ppos += cnt; @@ -588,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, return cnt; } - if (filter_add_subsystem_pred(system, pred)) { + err = filter_add_subsystem_pred(system, pred); + if (err < 0) { filter_free_subsystem_preds(system); filter_free_pred(pred); - return -EINVAL; + return err; } *ppos += cnt; diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 49b3ef54ec46..e03cbf1e38f3 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call, } } - return -ENOMEM; + return -ENOSPC; } static int is_string_field(const char *type) @@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system, } if (i == MAX_FILTER_PRED) - return -EINVAL; + return -ENOSPC; events_for_each(call) { int err; -- cgit v1.2.3-71-gd317 From 9eeba6138cefc0435695463ddadb0d95e0a6bcd2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 11 Apr 2009 03:17:17 +0200 Subject: lockdep: warn about lockdep disabling after kernel taint Impact: provide useful missing info for developers Kernel taint can occur in several situations such as warnings, load of prorietary or staging modules, bad page, etc... But when such taint happens, a developer might still be working on the kernel, expecting that lockdep is still enabled. But a taint disables lockdep without ever warning about it. Such a kernel behaviour doesn't really help for kernel development. This patch adds this missing warning. Since the taint is done most of the time after the main message that explain the real source issue, it seems safe to warn about it inside add_taint() so that it appears at last, without hurting the main information. v2: Use a generic helper to disable lockdep instead of an open coded xchg(). Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: <1239412638-6739-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/debug_locks.h | 7 +++++++ kernel/panic.c | 10 ++++++++-- lib/debug_locks.c | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 096476f1fb35..493dedb7a67b 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -2,12 +2,19 @@ #define __LINUX_DEBUG_LOCKING_H #include +#include struct task_struct; extern int debug_locks; extern int debug_locks_silent; + +static inline int __debug_locks_off(void) +{ + return xchg(&debug_locks, 0); +} + /* * Generic 'turn off all lock debugging' function: */ diff --git a/kernel/panic.c b/kernel/panic.c index 3fd8c5bf8b39..940ca14f6dbf 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -213,8 +213,14 @@ unsigned long get_taint(void) void add_taint(unsigned flag) { - /* can't trust the integrity of the kernel anymore: */ - debug_locks = 0; + /* + * Can't trust the integrity of the kernel anymore. + * We don't call directly debug_locks_off() because the issue + * is not necessarily serious enough to set oops_in_progress to 1 + */ + if (__debug_locks_off()) + printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); + set_bit(flag, &tainted_mask); } EXPORT_SYMBOL(add_taint); diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0218b4693dd8..bc3b11731b9c 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -36,7 +36,7 @@ int debug_locks_silent; */ int debug_locks_off(void) { - if (xchg(&debug_locks, 0)) { + if (__debug_locks_off()) { if (!debug_locks_silent) { oops_in_progress = 1; console_verbose(); -- cgit v1.2.3-71-gd317 From 574bbe782057fdf0490dc7dec906a2dc26363e20 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 11 Apr 2009 03:17:18 +0200 Subject: lockdep: continue lock debugging despite some taints Impact: broaden lockdep checks Lockdep is disabled after any kernel taints. This might be convenient to ignore bad locking issues which sources come from outside the kernel tree. Nevertheless, it might be a frustrating experience for the staging developers or those who experience a warning but are focused on another things that require lockdep. The v2 of this patch simply don't disable anymore lockdep in case of TAINT_CRAP and TAINT_WARN events. Signed-off-by: Frederic Weisbecker Cc: LTP Cc: Peter Zijlstra Cc: Greg KH LKML-Reference: <1239412638-6739-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/panic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 940ca14f6dbf..934fb377f4b3 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -217,8 +217,10 @@ void add_taint(unsigned flag) * Can't trust the integrity of the kernel anymore. * We don't call directly debug_locks_off() because the issue * is not necessarily serious enough to set oops_in_progress to 1 + * Also we want to keep up lockdep for staging development and + * post-warning case. */ - if (__debug_locks_off()) + if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); set_bit(flag, &tainted_mask); -- cgit v1.2.3-71-gd317 From c751085943362143f84346d274e0011419c84202 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 12 Apr 2009 20:06:56 +0200 Subject: PM/Hibernate: Wait for SCSI devices scan to complete during resume There is a race between resume from hibernation and the asynchronous scanning of SCSI devices and to prevent it from happening we need to call scsi_complete_async_scans() during resume from hibernation. In addition, if the resume from hibernation is userland-driven, it's better to wait for all device probes in the kernel to complete before attempting to open the resume device. Signed-off-by: Rafael J. Wysocki Acked-by: Arjan van de Ven Signed-off-by: Linus Torvalds --- drivers/scsi/scsi_priv.h | 3 --- drivers/scsi/scsi_wait_scan.c | 2 +- include/scsi/scsi_scan.h | 11 +++++++++++ kernel/power/disk.c | 8 ++++++++ kernel/power/user.c | 9 +++++++++ 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 include/scsi/scsi_scan.h (limited to 'kernel') diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e1850904ff73..fbc83bebdd8e 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { }; #endif -/* scsi_scan.c */ -int scsi_complete_async_scans(void); - /* scsi_devinfo.c */ extern int scsi_get_device_flags(struct scsi_device *sdev, const unsigned char *vendor, diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 8a636103083d..2f21af21269a 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c @@ -11,7 +11,7 @@ */ #include -#include "scsi_priv.h" +#include static int __init wait_scan_init(void) { diff --git a/include/scsi/scsi_scan.h b/include/scsi/scsi_scan.h new file mode 100644 index 000000000000..78898889243d --- /dev/null +++ b/include/scsi/scsi_scan.h @@ -0,0 +1,11 @@ +#ifndef _SCSI_SCSI_SCAN_H +#define _SCSI_SCSI_SCAN_H + +#ifdef CONFIG_SCSI +/* drivers/scsi/scsi_scan.c */ +extern int scsi_complete_async_scans(void); +#else +static inline int scsi_complete_async_scans(void) { return 0; } +#endif + +#endif /* _SCSI_SCSI_SCAN_H */ diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 5f21ab2bbcdf..0854770b63b9 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "power.h" @@ -644,6 +645,13 @@ static int software_resume(void) if (noresume) return 0; + /* + * We can't depend on SCSI devices being available after loading one of + * their modules if scsi_complete_async_scans() is not called and the + * resume device usually is a SCSI one. + */ + scsi_complete_async_scans(); + /* * name_to_dev_t() below takes a sysfs buffer mutex when sysfs * is configured into the kernel. Since the regular hibernate diff --git a/kernel/power/user.c b/kernel/power/user.c index 6c85359364f2..ed97375daae9 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { + /* Hibernating. The image device should be accessible. */ data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device, 0, NULL) : -1; data->mode = O_RDONLY; @@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp) if (error) pm_notifier_call_chain(PM_POST_HIBERNATION); } else { + /* + * Resuming. We may need to wait for the image device to + * appear. + */ + wait_for_device_probe(); + scsi_complete_async_scans(); + data->swap = -1; data->mode = O_WRONLY; error = pm_notifier_call_chain(PM_RESTORE_PREPARE); -- cgit v1.2.3-71-gd317 From a3d03ecaf9fe722bf96e4ef4a2f5e42ef652ddeb Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 13 Apr 2009 15:23:53 +0800 Subject: tracing: Fix power tracer header Before patch: # tracer: power # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | [ 676.875865889] CSTATE: Going to C1 on cpu 0 for 0.005911463 [ 676.882938805] CSTATE: Going to C1 on cpu 0 for 0.104796532 ... After patch: # tracer: power # # TIMESTAMP STATE EVENT # | | | [ 676.875865889] CSTATE: Going to C1 on cpu 0 for 0.005911463 [ 676.882938805] CSTATE: Going to C1 on cpu 0 for 0.104796532 ... v2: Use seq_puts instead of seq_printf Signed-off-by: Zhao Lei Cc: Arjan van de Ven Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Tom Zanussi LKML-Reference: <49E2E889.5000903@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_power.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index bae791ebcc51..118439709fb7 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c @@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter) return TRACE_TYPE_UNHANDLED; } +static void power_print_header(struct seq_file *s) +{ + seq_puts(s, "# TIMESTAMP STATE EVENT\n"); + seq_puts(s, "# | | |\n"); +} + static struct tracer power_tracer __read_mostly = { .name = "power", @@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly = .stop = stop_power_trace, .reset = power_trace_reset, .print_line = power_print_line, + .print_header = power_print_header, }; static int init_power_trace(void) -- cgit v1.2.3-71-gd317 From 4be6f6bb66111c9468733a4ed9cad10dc3a762c0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Apr 2009 14:39:33 -0700 Subject: mm: move the scan_unevictable_pages sysctl to the vm table vm knobs should go in the vm table. Probably too late for randomize_va_space though. Signed-off-by: Peter Zijlstra Acked-by: Lee Schermerhorn Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4286b62b34a0..e3d2c7dd59b9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -902,16 +902,6 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif -#ifdef CONFIG_UNEVICTABLE_LRU - { - .ctl_name = CTL_UNNUMBERED, - .procname = "scan_unevictable_pages", - .data = &scan_unevictable_pages, - .maxlen = sizeof(scan_unevictable_pages), - .mode = 0644, - .proc_handler = &scan_unevictable_handler, - }, -#endif #ifdef CONFIG_SLOW_WORK { .ctl_name = CTL_UNNUMBERED, @@ -1302,6 +1292,16 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif +#ifdef CONFIG_UNEVICTABLE_LRU + { + .ctl_name = CTL_UNNUMBERED, + .procname = "scan_unevictable_pages", + .data = &scan_unevictable_pages, + .maxlen = sizeof(scan_unevictable_pages), + .mode = 0644, + .proc_handler = &scan_unevictable_handler, + }, +#endif /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt -- cgit v1.2.3-71-gd317 From f1671f6d783a2385d32e11f456cbe32f0e4b4b49 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 13 Apr 2009 14:40:03 -0700 Subject: ptrace: fix exit_ptrace() vs ptrace_traceme() race Pointed out by Roland. The bug was recently introduced by me in "forget_original_parent: split out the un-ptrace part", commit 39c626ae47c469abdfd30c6e42eff884931380d6. Since that patch we have a window after exit_ptrace() drops tasklist and before forget_original_parent() takes it again. In this window the child can do ptrace(PTRACE_TRACEME) and nobody can untrace this child after that. Change ptrace_traceme() to not attach to the exiting ->real_parent. We don't report the error in this case, we pretend we attach right before ->real_parent calls exit_ptrace() which should untrace us anyway. Signed-off-by: Oleg Nesterov Acked-by: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 64191fa09b7e..dfcd83ceee3b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -604,10 +604,11 @@ repeat: ret = security_ptrace_traceme(current->parent); /* - * Set the ptrace bit in the process ptrace flags. - * Then link us on our parent's ptraced list. + * Check PF_EXITING to ensure ->real_parent has not passed + * exit_ptrace(). Otherwise we don't report the error but + * pretend ->real_parent untraces us right after return. */ - if (!ret) { + if (!ret && !(current->real_parent->flags & PF_EXITING)) { current->ptrace |= PT_PTRACED; __ptrace_link(current, current->real_parent); } -- cgit v1.2.3-71-gd317 From 3d26dcf7679c5cc6c9f3b95ffdb2152fba2b7fae Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 13 Apr 2009 14:40:08 -0700 Subject: kernel/sys.c: clean up sys_shutdown exit path Impact: cleanup, fix Clean up sys_shutdown() exit path. Factor out common code. Return correct error code instead of always 0 on failure. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 51dbb55604e8..e7998cf31498 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg) { char buffer[256]; + int ret = 0; /* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT)) @@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, kernel_halt(); unlock_kernel(); do_exit(0); - break; + panic("cannot halt"); case LINUX_REBOOT_CMD_POWER_OFF: kernel_power_off(); @@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, #ifdef CONFIG_KEXEC case LINUX_REBOOT_CMD_KEXEC: - { - int ret; - ret = kernel_kexec(); - unlock_kernel(); - return ret; - } + ret = kernel_kexec(); + break; #endif #ifdef CONFIG_HIBERNATION case LINUX_REBOOT_CMD_SW_SUSPEND: - { - int ret = hibernate(); - unlock_kernel(); - return ret; - } + ret = hibernate(); + break; #endif default: - unlock_kernel(); - return -EINVAL; + ret = -EINVAL; + break; } unlock_kernel(); - return 0; + return ret; } static void deferred_cad(struct work_struct *dummy) -- cgit v1.2.3-71-gd317 From 132380a06b24704fd6c9be55c44d4ef3972cead2 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Apr 2009 14:18:25 +0800 Subject: tracing, sched: mark get_parent_ip() notrace Impact: remove overly redundant tracing entries When tracer is "function" or "function_graph", way too much "get_parent_ip" entries are recorded in ring_buffer. Signed-off-by: Lai Jiangshan Acked-by: Frederic Weisbecker Acked-by: Steven Rostedt LKML-Reference: <49D458B1.5000703@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 5724508c3b66..e90e70ed36a3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4846,7 +4846,7 @@ void scheduler_tick(void) #endif } -unsigned long get_parent_ip(unsigned long addr) +notrace unsigned long get_parent_ip(unsigned long addr) { if (in_lock_functions(addr)) { addr = CALLER_ADDR2; -- cgit v1.2.3-71-gd317 From 557055bebe9212dfa6b9f5df811dfd0dac77ec55 Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 13 Apr 2009 16:02:34 +0800 Subject: tracing: Fix branch tracer header Before patch: # tracer: branch # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | <...>-2981 [000] 24008.872738: [ ok ] trace_irq_handler_exit:irq_event_types.h:41 <...>-2981 [000] 24008.872742: [ ok ] note_interrupt:spurious.c:229 ... After patch: # tracer: branch # # TASK-PID CPU# TIMESTAMP CORRECT FUNC:FILE:LINE # | | | | | | <...>-2985 [000] 26329.142970: [ ok ] slab_free:slub.c:1776 <...>-2985 [000] 26329.142972: [ ok ] trace_kmem_cache_free:kmem_event_types.h:191 ... Signed-off-by: Zhao Lei Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <49E2F19A.3040006@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_branch.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..8333715e4066 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, return TRACE_TYPE_HANDLED; } +static void branch_print_header(struct seq_file *s) +{ + seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" + " FUNC:FILE:LINE\n"); + seq_puts(s, "# | | | | | " + " |\n"); +} static struct trace_event trace_branch_event = { .type = TRACE_BRANCH, @@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly = #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_branch, #endif /* CONFIG_FTRACE_SELFTEST */ + .print_header = branch_print_header, }; __init static int init_branch_tracer(void) -- cgit v1.2.3-71-gd317 From ef631b0ca01655d24e9ca7e199262c4a46416a26 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 13 Apr 2009 21:31:16 -0700 Subject: rcu: Make hierarchical RCU less IPI-happy This patch fixes a hierarchical-RCU performance bug located by Anton Blanchard. The problem stems from a misguided attempt to provide a work-around for jiffies-counter failure. This work-around uses a per-CPU n_rcu_pending counter, which is incremented on each call to rcu_pending(), which in turn is called from each scheduling-clock interrupt. Each CPU then treats this counter as a surrogate for the jiffies counter, so that if the jiffies counter fails to advance, the per-CPU n_rcu_pending counter will cause RCU to invoke force_quiescent_state(), which in turn will (among other things) send resched IPIs to CPUs that have thus far failed to pass through an RCU quiescent state. Unfortunately, each CPU resets only its own counter after sending a batch of IPIs. This means that the other CPUs will also (needlessly) send -another- round of IPIs, for a full N-squared set of IPIs in the worst case every three scheduler-clock ticks until the grace period finally ends. It is not reasonable for a given CPU to reset each and every n_rcu_pending for all the other CPUs, so this patch instead simply disables the jiffies-counter "training wheels", thus eliminating the excessive IPIs. Note that the jiffies-counter IPIs do not have this problem due to the fact that the jiffies counter is global, so that the CPU sending the IPIs can easily reset things, thus preventing the other CPUs from sending redundant IPIs. Note also that the n_rcu_pending counter remains, as it will continue to be used for tracing. It may also see use to update the jiffies counter, should an appropriate kick-the-jiffies-counter API appear. Located-by: Anton Blanchard Tested-by: Anton Blanchard Signed-off-by: Paul E. McKenney Cc: anton@samba.org Cc: akpm@linux-foundation.org Cc: dipankar@in.ibm.com Cc: manfred@colorfullife.com Cc: cl@linux-foundation.org Cc: josht@linux.vnet.ibm.com Cc: schamp@sgi.com Cc: niv@us.ibm.com Cc: dvhltc@us.ibm.com Cc: ego@in.ibm.com Cc: laijs@cn.fujitsu.com Cc: rostedt@goodmis.org Cc: peterz@infradead.org Cc: penberg@cs.helsinki.fi Cc: andi@firstfloor.org Cc: "Paul E. McKenney" LKML-Reference: <12396834793575-git-send-email-> Signed-off-by: Ingo Molnar --- include/linux/rcutree.h | 3 +-- kernel/rcutree.c | 19 ++++--------------- kernel/rcutree_trace.c | 14 +++++--------- 3 files changed, 10 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 0cdda00f2b2a..58b2aa5312b9 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -161,9 +161,8 @@ struct rcu_data { unsigned long offline_fqs; /* Kicked due to being offline. */ unsigned long resched_ipi; /* Sent a resched IPI. */ - /* 5) state to allow this CPU to force_quiescent_state on others */ + /* 5) For future __rcu_pending statistics. */ long n_rcu_pending; /* rcu_pending() calls since boot. */ - long n_rcu_pending_force_qs; /* when to force quiescent states. */ int cpu; }; diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7f3266922572..d2a372fb0b9b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->gpnum = rsp->gpnum; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; } /* @@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rsp->gpnum++; rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); dyntick_record_completed(rsp, rsp->completed - 1); note_new_gpnum(rsp, rdp); @@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; long lastcomp; - struct rcu_data *rdp = rsp->rda[smp_processor_id()]; struct rcu_node *rnp = rcu_get_root(rsp); u8 signaled; @@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) return; /* Someone else is already on the job. */ } if (relaxed && - (long)(rsp->jiffies_force_qs - jiffies) >= 0 && - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) + (long)(rsp->jiffies_force_qs - jiffies) >= 0) goto unlock_ret; /* no emergency and done recently. */ rsp->n_force_qs++; spin_lock(&rnp->lock); lastcomp = rsp->completed; signaled = rsp->signaled; rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + - RCU_JIFFIES_TILL_FORCE_QS; if (lastcomp == rsp->gpnum) { rsp->n_force_qs_ngp++; spin_unlock(&rnp->lock); @@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ - if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); /* @@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = LONG_MAX; force_quiescent_state(rsp, 0); - } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) + } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) force_quiescent_state(rsp, 1); local_irq_restore(flags); } @@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Has an RCU GP gone long enough to send resched IPIs &c? */ if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && - ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || - (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) + ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) return 1; /* nothing to do */ diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4ee954f6a8d5..4b1875ba9404 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c @@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", + seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - (int)(rdp->n_rcu_pending & 0xffff)); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, " dt=%d/%d dn=%d df=%lu", rdp->dynticks->dynticks, @@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", + seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", rdp->completed, rdp->gpnum, rdp->passed_quiesc, rdp->passed_quiesc_completed, - rdp->qs_pending, - rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, - rdp->n_rcu_pending); + rdp->qs_pending); #ifdef CONFIG_NO_HZ seq_printf(m, ",%d,%d,%d,%lu", rdp->dynticks->dynticks, @@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) static int show_rcudata_csv(struct seq_file *m, void *unused) { - seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); + seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); #ifdef CONFIG_NO_HZ seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); #endif /* #ifdef CONFIG_NO_HZ */ -- cgit v1.2.3-71-gd317 From 297dbf50d7ab0539cf9cf7f2a66918665a18e45e Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Wed, 15 Apr 2009 10:37:04 +0530 Subject: swap: Remove code handling bio_alloc failure with __GFP_WAIT Remove code handling bio_alloc failure with __GFP_WAIT. Signed-off-by: Nikanth Karthikesan Signed-off-by: Jens Axboe --- kernel/power/swap.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 505f319e489c..8ba052c86d48 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page, struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - if (!bio) - return -ENOMEM; bio->bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_bdev = resume_bdev; bio->bi_end_io = end_swap_bio_read; -- cgit v1.2.3-71-gd317 From 5b1d07ed0e5b2707f786957c7a40eb2f399c84a8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 15 Apr 2009 19:35:01 +0100 Subject: RCU: Don't try and predeclare inline funcs as it upsets some versions of gcc Don't try and predeclare inline funcs like this: static inline void wait_migrated_callbacks(void) ... static void _rcu_barrier(enum rcu_barrier type) { ... wait_migrated_callbacks(); } ... static inline void wait_migrated_callbacks(void) { wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); } as it upsets some versions of gcc under some circumstances: kernel/rcupdate.c: In function `_rcu_barrier': kernel/rcupdate.c:125: sorry, unimplemented: inlining failed in call to 'wait_migrated_callbacks': function body not available kernel/rcupdate.c:152: sorry, unimplemented: called from here This can be dealt with by simply putting the static variables (rcu_migrate_*) at the top, and moving the implementation of the function up so that it replaces its forward declaration. Signed-off-by: David Howells Cc: Dipankar Sarma Cc: Paul E. McKenney Signed-off-by: Linus Torvalds --- kernel/rcupdate.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c7b8457d0d2..a967c9feb90a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; int rcu_scheduler_active __read_mostly; +static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); +static struct rcu_head rcu_migrate_head[3]; +static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); + /* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. @@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type) } } -static inline void wait_migrated_callbacks(void); +static inline void wait_migrated_callbacks(void) +{ + wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); +} /* * Orchestrate the specified type of RCU barrier, waiting for all @@ -179,21 +186,12 @@ void rcu_barrier_sched(void) } EXPORT_SYMBOL_GPL(rcu_barrier_sched); -static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); -static struct rcu_head rcu_migrate_head[3]; -static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); - static void rcu_migrate_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_migrate_type_count)) wake_up(&rcu_migrate_wq); } -static inline void wait_migrated_callbacks(void) -{ - wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); -} - static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, unsigned long action, void *hcpu) { -- cgit v1.2.3-71-gd317 From 381512cf3d27f63f7a45b1bbe7d2d609c2ea3b74 Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Tue, 14 Apr 2009 09:09:36 +0530 Subject: sched: Avoid printing sched_group::__cpu_power for default case Commit 46e0bb9c12f4 ("sched: Print sched_group::__cpu_power in sched_domain_debug") produces a messy dmesg output while attempting to print the sched_group::__cpu_power for each group in the sched_domain hierarchy. Fix this by avoid printing the __cpu_power for default cases. (i.e, __cpu_power == SCHED_LOAD_SCALE). [ Impact: reduce syslog clutter ] Reported-by: Tony Luck Signed-off-by: Gautham R Shenoy Fixed-by: Tony Luck Cc: a.p.zijlstra@chello.nl LKML-Reference: <20090414033936.GA534@in.ibm.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index e90e70ed36a3..b902e587a3a0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7367,8 +7367,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpumask_or(groupmask, groupmask, sched_group_cpus(group)); cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); - printk(KERN_CONT " %s (__cpu_power = %d)", str, - group->__cpu_power); + + printk(KERN_CONT " %s", str); + if (group->__cpu_power != SCHED_LOAD_SCALE) { + printk(KERN_CONT " (__cpu_power = %d)", + group->__cpu_power); + } group = group->next; } while (group != sd->groups); -- cgit v1.2.3-71-gd317 From 79d381c9f2354b594dcab9b04dfcc0debf7294fe Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Thu, 16 Apr 2009 19:30:18 -0400 Subject: kernel/softirq.c: fix sparse warning Fix sparse warning in kernel/softirq.c. warning: do-while statement is not a compound statement Signed-off-by: H Hartley Sweeten LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/softirq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 2fecefacdc5b..b525dd348511 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t) printk("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do + do { yield(); - while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); -- cgit v1.2.3-71-gd317 From ff54250a0ebab7f90a5f848a0ba63f999830c872 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 18 Apr 2009 21:44:24 -0700 Subject: Remove 'recurse into child resources' logic from 'reserve_region_with_split()' This function is not actually used right now, since the original use case for it was done with insert_resource_expand_to_fit() instead. However, we now have another usage case that wants to basically do a "reserve IO resource, splitting around existing resources", however that one doesn't actually want the "recurse into the conflicting resource" logic at all. And since recursing into the conflicting resource was the most complex part, and isn't wanted, just remove it. Maybe we'll some day want both versions, but we can just resurrect the logic then. Tested-by: Yinghai Lu Signed-off-by: Linus Torvalds --- kernel/resource.c | 46 ++++++++++++---------------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index fd5d7d574bb9..ac5f3a36923f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root, res->end = end; res->flags = IORESOURCE_BUSY; - for (;;) { - conflict = __request_resource(parent, res); - if (!conflict) - break; - if (conflict != parent) { - parent = conflict; - if (!(conflict->flags & IORESOURCE_BUSY)) - continue; - } - - /* Uhhuh, that didn't work out.. */ - kfree(res); - res = NULL; - break; - } - - if (!res) { - /* failed, split and try again */ - - /* conflict covered whole area */ - if (conflict->start <= start && conflict->end >= end) - return; + conflict = __request_resource(parent, res); + if (!conflict) + return; - if (conflict->start > start) - __reserve_region_with_split(root, start, conflict->start-1, name); - if (!(conflict->flags & IORESOURCE_BUSY)) { - resource_size_t common_start, common_end; + /* failed, split and try again */ + kfree(res); - common_start = max(conflict->start, start); - common_end = min(conflict->end, end); - if (common_start < common_end) - __reserve_region_with_split(root, common_start, common_end, name); - } - if (conflict->end < end) - __reserve_region_with_split(root, conflict->end+1, end, name); - } + /* conflict covered whole area */ + if (conflict->start <= start && conflict->end >= end) + return; + if (conflict->start > start) + __reserve_region_with_split(root, start, conflict->start-1, name); + if (conflict->end < end) + __reserve_region_with_split(root, conflict->end+1, end, name); } void __init reserve_region_with_split(struct resource *root, -- cgit v1.2.3-71-gd317 From 6a7c7eaf71b636f197d73b381a2ab729ebdcfb2e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 19 Apr 2009 20:08:42 +0200 Subject: PM/Suspend: Introduce two new platform callbacks to avoid breakage Commit 900af0d973856d6feb6fc088c2d0d3fde57707d3 (PM: Change suspend code ordering) changed the ordering of suspend code in such a way that the platform .prepare() callback is now executed after the device drivers' late suspend callbacks have run. Unfortunately, this turns out to break ARM platforms that need to talk via I2C to power control devices during the .prepare() callback. For this reason introduce two new platform suspend callbacks, .prepare_late() and .wake(), that will be called just prior to disabling non-boot CPUs and right after bringing them back on line, respectively, and use them instead of .prepare() and .finish() for ACPI suspend. Make the PM core execute the .prepare() and .finish() platform suspend callbacks where they were executed previously (that is, right after calling the regular suspend methods provided by device drivers and right before executing their regular resume methods, respectively). It is not necessary to make analogous changes to the hibernation code and data structures at the moment, because they are only used by ACPI platforms. Signed-off-by: Rafael J. Wysocki Reported-by: Russell King Acked-by: Len Brown --- drivers/acpi/sleep.c | 8 ++++---- include/linux/suspend.h | 36 ++++++++++++++++++++++++++---------- kernel/power/main.c | 24 +++++++++++++++++------- 3 files changed, 47 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 779e4e500df4..d060e6fd7fd5 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -300,9 +300,9 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, - .prepare = acpi_pm_prepare, + .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, - .finish = acpi_pm_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, }; @@ -328,9 +328,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state) static struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, - .prepare = acpi_pm_disable_gpes, + .prepare_late = acpi_pm_disable_gpes, .enter = acpi_suspend_enter, - .finish = acpi_pm_finish, + .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3e3a4364cbff..795032edfc46 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -58,10 +58,17 @@ typedef int __bitwise suspend_state_t; * by @begin(). * @prepare() is called right after devices have been suspended (ie. the * appropriate .suspend() method has been executed for each device) and - * before the nonboot CPUs are disabled (it is executed with IRQs enabled). - * This callback is optional. It returns 0 on success or a negative - * error code otherwise, in which case the system cannot enter the desired - * sleep state (@enter() and @finish() will not be called in that case). + * before device drivers' late suspend callbacks are executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@prepare_late(), @enter(), + * @wake(), and @finish() will not be called in that case). + * + * @prepare_late: Finish preparing the platform for entering the system sleep + * state indicated by @begin(). + * @prepare_late is called before disabling nonboot CPUs and after + * device drivers' late suspend callbacks have been executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@enter() and @wake()). * * @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. @@ -69,19 +76,26 @@ typedef int __bitwise suspend_state_t; * error code otherwise, in which case the system cannot enter the desired * sleep state. * - * @finish: Called when the system has just left a sleep state, right after - * the nonboot CPUs have been enabled and before devices are resumed (it is - * executed with IRQs enabled). + * @wake: Called when the system has just left a sleep state, right after + * the nonboot CPUs have been enabled and before device drivers' early + * resume callbacks are executed. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare_late(). If implemented, it is always called + * after @enter(), even if @enter() fails. + * + * @finish: Finish wake-up of the platform. + * @finish is called right prior to calling device drivers' regular suspend + * callbacks. * This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after - * @enter() (even if @enter() fails). + * @enter() and @wake(), if implemented, even if any of them fails. * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. * This callback is optional, but should be implemented by the platforms - * that implement @begin(), but platforms implementing @begin() should - * also provide a @end() which cleans up transitions aborted before + * that implement @begin(). Accordingly, platforms implementing @begin() + * should also provide a @end() which cleans up transitions aborted before * @enter(). * * @recover: Recover the platform from a suspend failure. @@ -93,7 +107,9 @@ struct platform_suspend_ops { int (*valid)(suspend_state_t state); int (*begin)(suspend_state_t state); int (*prepare)(void); + int (*prepare_late)(void); int (*enter)(suspend_state_t state); + void (*wake)(void); void (*finish)(void); void (*end)(void); void (*recover)(void); diff --git a/kernel/power/main.c b/kernel/power/main.c index f172f41858bb..f99ed6a75eac 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state) device_pm_lock(); + if (suspend_ops->prepare) { + error = suspend_ops->prepare(); + if (error) + goto Done; + } + error = device_power_down(PMSG_SUSPEND); if (error) { printk(KERN_ERR "PM: Some devices failed to power down\n"); - goto Done; + goto Platfrom_finish; } - if (suspend_ops->prepare) { - error = suspend_ops->prepare(); + if (suspend_ops->prepare_late) { + error = suspend_ops->prepare_late(); if (error) goto Power_up_devices; } if (suspend_test(TEST_PLATFORM)) - goto Platfrom_finish; + goto Platform_wake; error = disable_nonboot_cpus(); if (error || suspend_test(TEST_CPUS)) @@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state) Enable_cpus: enable_nonboot_cpus(); - Platfrom_finish: - if (suspend_ops->finish) - suspend_ops->finish(); + Platform_wake: + if (suspend_ops->wake) + suspend_ops->wake(); Power_up_devices: device_power_up(PMSG_RESUME); + Platfrom_finish: + if (suspend_ops->finish) + suspend_ops->finish(); + Done: device_pm_unlock(); -- cgit v1.2.3-71-gd317 From 24b6f16ecf37f918a1934d590e9e71c100d6388f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Apr 2009 03:25:41 -0400 Subject: No need for crossing to mountpoint in audit_tag_tree() is_under() will DTRT anyway. And yes, is_subdir() behaviour is intentional. Signed-off-by: Al Viro --- fs/dcache.c | 1 - kernel/audit_tree.c | 3 --- 2 files changed, 4 deletions(-) (limited to 'kernel') diff --git a/fs/dcache.c b/fs/dcache.c index 761d30be2683..1fcffebfb44f 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2149,7 +2149,6 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) int result; unsigned long seq; - /* FIXME: This is old behavior, needed? Please check callers. */ if (new_dentry == old_dentry) return 1; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 917ab9525568..6e7351739a82 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new) dentry = dget(path.dentry); path_put(&path); - if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) - follow_up(&mnt, &dentry); - list_add_tail(&list, &tagged->mnt_list); mutex_lock(&audit_filter_mutex); -- cgit v1.2.3-71-gd317 From 8e19608e8b5c001e4a66ce482edc474f05fb7355 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Apr 2009 12:24:00 -0700 Subject: clocksource: pass clocksource to read() callback Pass clocksource pointer to the read() callback for clocksources. This allows us to share the callback between multiple instances. [hugh@veritas.com: fix powerpc build of clocksource pass clocksource mods] [akpm@linux-foundation.org: cleanup] Signed-off-by: Magnus Damm Acked-by: John Stultz Cc: Thomas Gleixner Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-at91/at91rm9200_time.c | 2 +- arch/arm/mach-at91/at91sam926x_time.c | 2 +- arch/arm/mach-davinci/time.c | 2 +- arch/arm/mach-imx/time.c | 2 +- arch/arm/mach-ixp4xx/common.c | 2 +- arch/arm/mach-msm/timer.c | 4 ++-- arch/arm/mach-netx/time.c | 2 +- arch/arm/mach-ns9xxx/time-ns9360.c | 2 +- arch/arm/mach-omap1/time.c | 2 +- arch/arm/mach-omap2/timer-gp.c | 2 +- arch/arm/mach-pxa/time.c | 2 +- arch/arm/mach-realview/core.c | 2 +- arch/arm/mach-versatile/core.c | 2 +- arch/arm/plat-mxc/time.c | 2 +- arch/arm/plat-omap/common.c | 4 ++-- arch/arm/plat-orion/time.c | 2 +- arch/avr32/kernel/time.c | 2 +- arch/blackfin/kernel/time-ts.c | 12 ++++++------ arch/ia64/kernel/cyclone.c | 2 +- arch/ia64/kernel/time.c | 4 ++-- arch/ia64/sn/kernel/sn2/timer.c | 2 +- arch/m68knommu/platform/68328/timers.c | 2 +- arch/m68knommu/platform/coldfire/dma_timer.c | 2 +- arch/m68knommu/platform/coldfire/pit.c | 2 +- arch/m68knommu/platform/coldfire/timers.c | 2 +- arch/mips/kernel/cevt-txx9.c | 2 +- arch/mips/kernel/csrc-bcm1480.c | 2 +- arch/mips/kernel/csrc-ioasic.c | 6 +++--- arch/mips/kernel/csrc-r4k.c | 2 +- arch/mips/kernel/csrc-sb1250.c | 2 +- arch/mips/kernel/i8253.c | 2 +- arch/mips/nxp/pnx8550/common/time.c | 2 +- arch/mips/sgi-ip27/ip27-timer.c | 2 +- arch/powerpc/kernel/time.c | 8 ++++---- arch/s390/kernel/time.c | 2 +- arch/sh/kernel/time_32.c | 2 +- arch/sh/kernel/timers/timer-tmu.c | 2 +- arch/sparc/kernel/time_64.c | 7 ++++++- arch/um/kernel/time.c | 2 +- arch/x86/kernel/hpet.c | 6 +++--- arch/x86/kernel/i8253.c | 2 +- arch/x86/kernel/kvmclock.c | 7 ++++++- arch/x86/kernel/tsc.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 2 +- arch/x86/xen/time.c | 7 ++++++- drivers/char/hpet.c | 2 +- drivers/clocksource/acpi_pm.c | 12 ++++++------ drivers/clocksource/cyclone.c | 2 +- drivers/clocksource/scx200_hrt.c | 2 +- drivers/clocksource/tcb_clksrc.c | 2 +- include/linux/clocksource.h | 6 +++--- kernel/time/clocksource.c | 8 ++++---- kernel/time/jiffies.c | 2 +- 54 files changed, 94 insertions(+), 79 deletions(-) (limited to 'kernel') diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 1ff1bda0a894..309f3511aa20 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -85,7 +85,7 @@ static struct irqaction at91rm9200_timer_irq = { .handler = at91rm9200_timer_interrupt }; -static cycle_t read_clk32k(void) +static cycle_t read_clk32k(struct clocksource *cs) { return read_CRTR(); } diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index b63e1d5f1bad..4bd56aee4370 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -31,7 +31,7 @@ static u32 pit_cnt; /* access only w/system irq blocked */ * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ -static cycle_t read_pit_clk(void) +static cycle_t read_pit_clk(struct clocksource *cs) { unsigned long flags; u32 elapsed; diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index f8bcd29d17a6..6c227d4ba998 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -238,7 +238,7 @@ static void __init timer_init(void) /* * clocksource */ -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { struct timer_s *t = &timers[TID_CLOCKSOURCE]; diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index aff0ebcfa847..5aef18b599e5 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -73,7 +73,7 @@ static void __init imx_timer_hardware_init(void) IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN; } -cycle_t imx_get_cycles(void) +cycle_t imx_get_cycles(struct clocksource *cs) { return IMX_TCN(TIMER_BASE); } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index f4656d2ac8a8..1e93dfee7543 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -401,7 +401,7 @@ void __init ixp4xx_sys_init(void) /* * clocksource */ -cycle_t ixp4xx_get_cycles(void) +cycle_t ixp4xx_get_cycles(struct clocksource *cs) { return *IXP4XX_OSTS; } diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 444d9c0f5ca6..4855b8ca5101 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -57,12 +57,12 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t msm_gpt_read(void) +static cycle_t msm_gpt_read(struct clocksource *cs) { return readl(MSM_GPT_BASE + TIMER_COUNT_VAL); } -static cycle_t msm_dgt_read(void) +static cycle_t msm_dgt_read(struct clocksource *cs) { return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT; } diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c index f201fddb594f..82801dbf0579 100644 --- a/arch/arm/mach-netx/time.c +++ b/arch/arm/mach-netx/time.c @@ -104,7 +104,7 @@ static struct irqaction netx_timer_irq = { .handler = netx_timer_interrupt, }; -cycle_t netx_get_cycles(void) +cycle_t netx_get_cycles(struct clocksource *cs) { return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index 41df69721769..77281260358a 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c @@ -25,7 +25,7 @@ #define TIMER_CLOCKEVENT 1 static u32 latch; -static cycle_t ns9360_clocksource_read(void) +static cycle_t ns9360_clocksource_read(struct clocksource *cs) { return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE)); } diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 495a32c287b4..4d56408d3cff 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -198,7 +198,7 @@ static struct irqaction omap_mpu_timer2_irq = { .handler = omap_mpu_timer2_interrupt, }; -static cycle_t mpu_read(void) +static cycle_t mpu_read(struct clocksource *cs) { return ~omap_mpu_timer_read(1); } diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 9fc13a2cc3f4..1cb2c0909c2b 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -138,7 +138,7 @@ static inline void __init omap2_gp_clocksource_init(void) {} * clocksource */ static struct omap_dm_timer *gpt_clocksource; -static cycle_t clocksource_read_cycles(void) +static cycle_t clocksource_read_cycles(struct clocksource *cs) { return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource); } diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8eb3830fbb0b..750c448db672 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -125,7 +125,7 @@ static struct clock_event_device ckevt_pxa_osmr0 = { .set_mode = pxa_osmr0_set_mode, }; -static cycle_t pxa_read_oscr(void) +static cycle_t pxa_read_oscr(struct clocksource *cs) { return OSCR; } diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 9ab947c14f26..942e1a7eb9b2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -715,7 +715,7 @@ static struct irqaction realview_timer_irq = { .handler = realview_timer_interrupt, }; -static cycle_t realview_get_cycles(void) +static cycle_t realview_get_cycles(struct clocksource *cs) { return ~readl(timer3_va_base + TIMER_VALUE); } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 565776680d8c..1f929c391af7 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -948,7 +948,7 @@ static struct irqaction versatile_timer_irq = { .handler = versatile_timer_interrupt, }; -static cycle_t versatile_get_cycles(void) +static cycle_t versatile_get_cycles(struct clocksource *cs) { return ~readl(TIMER3_VA_BASE + TIMER_VALUE); } diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index ef1b3cd85bd3..dab3357196fb 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c @@ -36,7 +36,7 @@ static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED; /* clock source */ -static cycle_t mxc_get_cycles(void) +static cycle_t mxc_get_cycles(struct clocksource *cs) { return __raw_readl(TIMER_BASE + MXC_TCN); } diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index d1797147732f..433021f3d7cc 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -185,7 +185,7 @@ console_initcall(omap_add_serial_console); #include -static cycle_t omap_32k_read(void) +static cycle_t omap_32k_read(struct clocksource *cs) { return omap_readl(TIMER_32K_SYNCHRONIZED); } @@ -207,7 +207,7 @@ unsigned long long sched_clock(void) { unsigned long long ret; - ret = (unsigned long long)omap_32k_read(); + ret = (unsigned long long)omap_32k_read(&clocksource_32k); ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift; return ret; } diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 6fa2923e6dca..2faf9dba4ef7 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -41,7 +41,7 @@ static u32 ticks_per_jiffy; /* * Clocksource handling. */ -static cycle_t orion_clksrc_read(void) +static cycle_t orion_clksrc_read(struct clocksource *cs) { return 0xffffffff - readl(TIMER0_VAL); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 0ff46bf873b0..f27aa3b259fa 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -18,7 +18,7 @@ #include -static cycle_t read_cycle_count(void) +static cycle_t read_cycle_count(struct clocksource *cs) { return (cycle_t)sysreg_read(COUNT); } diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 0ed2badfd746..27646121280a 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -58,16 +58,11 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc) return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; } -static cycle_t read_cycles(void) +static cycle_t read_cycles(struct clocksource *cs) { return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); } -unsigned long long sched_clock(void) -{ - return cycles_2_ns(read_cycles()); -} - static struct clocksource clocksource_bfin = { .name = "bfin_cycles", .rating = 350, @@ -77,6 +72,11 @@ static struct clocksource clocksource_bfin = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +unsigned long long sched_clock(void) +{ + return cycles_2_ns(read_cycles(&clocksource_bfin)); +} + static int __init bfin_clocksource_init(void) { set_cyc2ns_scale(get_cclk() / 1000); diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 790ef0d87e12..71e35864d2e2 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -21,7 +21,7 @@ void __init cyclone_setup(void) static void __iomem *cyclone_mc; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readq((void __iomem *)cyclone_mc); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 641c8b61c4f1..604c1a35db33 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -33,7 +33,7 @@ #include "fsyscall_gtod_data.h" -static cycle_t itc_get_cycles(void); +static cycle_t itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data = { .lock = SEQLOCK_UNLOCKED, @@ -383,7 +383,7 @@ ia64_init_itm (void) } } -static cycle_t itc_get_cycles(void) +static cycle_t itc_get_cycles(struct clocksource *cs) { u64 lcycle, now, ret; diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index cf67fc562054..21d6f09e3447 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -23,7 +23,7 @@ extern unsigned long sn_rtc_cycles_per_second; -static cycle_t read_sn2(void) +static cycle_t read_sn2(struct clocksource *cs) { return (cycle_t)readq(RTC_COUNTER_ADDR); } diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c index 6bafefa546e5..309f725995bf 100644 --- a/arch/m68knommu/platform/68328/timers.c +++ b/arch/m68knommu/platform/68328/timers.c @@ -75,7 +75,7 @@ static struct irqaction m68328_timer_irq = { /***************************************************************************/ -static cycle_t m68328_read_clk(void) +static cycle_t m68328_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/dma_timer.c b/arch/m68knommu/platform/coldfire/dma_timer.c index 772578b1084f..a5f562823d7a 100644 --- a/arch/m68knommu/platform/coldfire/dma_timer.c +++ b/arch/m68knommu/platform/coldfire/dma_timer.c @@ -34,7 +34,7 @@ #define DMA_DTMR_CLK_DIV_16 (2 << 1) #define DMA_DTMR_ENABLE (1 << 0) -static cycle_t cf_dt_get_cycles(void) +static cycle_t cf_dt_get_cycles(struct clocksource *cs) { return __raw_readl(DTCN0); } diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index 2a12e7fa9748..61b96211f8ff 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c @@ -125,7 +125,7 @@ static struct irqaction pit_irq = { /***************************************************************************/ -static cycle_t pit_read_clk(void) +static cycle_t pit_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68knommu/platform/coldfire/timers.c b/arch/m68knommu/platform/coldfire/timers.c index 454f25493491..1ba8a3731653 100644 --- a/arch/m68knommu/platform/coldfire/timers.c +++ b/arch/m68knommu/platform/coldfire/timers.c @@ -78,7 +78,7 @@ static struct irqaction mcftmr_timer_irq = { /***************************************************************************/ -static cycle_t mcftmr_read_clk(void) +static cycle_t mcftmr_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index eccf7d6096bd..2e911e3da8d3 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -22,7 +22,7 @@ static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; -static cycle_t txx9_cs_read(void) +static cycle_t txx9_cs_read(struct clocksource *cs) { return __raw_readl(&txx9_cs_tmrptr->trr); } diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 868745e7184b..51489f8a825e 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -28,7 +28,7 @@ #include -static cycle_t bcm1480_hpt_read(void) +static cycle_t bcm1480_hpt_read(struct clocksource *cs) { return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); } diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 1d5f63cf8997..b551f48d3a07 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -25,7 +25,7 @@ #include #include -static cycle_t dec_ioasic_hpt_read(void) +static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) { return ioasic_read(IO_REG_FCTR); } @@ -47,13 +47,13 @@ void __init dec_ioasic_clocksource_init(void) while (!ds1287_timer_state()) ; - start = dec_ioasic_hpt_read(); + start = dec_ioasic_hpt_read(&clocksource_dec); while (i--) while (!ds1287_timer_state()) ; - end = dec_ioasic_hpt_read(); + end = dec_ioasic_hpt_read(&clocksource_dec); freq = (end - start) * 10; printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index f1a2893931ed..e95a3cd48eea 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -10,7 +10,7 @@ #include -static cycle_t c0_hpt_read(void) +static cycle_t c0_hpt_read(struct clocksource *cs) { return read_c0_count(); } diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index 92212bbb8e45..d14d3d1907fa 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -33,7 +33,7 @@ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ -static cycle_t sb1250_hpt_read(void) +static cycle_t sb1250_hpt_read(struct clocksource *cs) { unsigned int count; diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 689719e34f08..ed20e7fe65e3 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -128,7 +128,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { unsigned long flags; int count; diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index cf293b279098..8df43e9e4d90 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c @@ -35,7 +35,7 @@ static unsigned long cpj; -static cycle_t hpt_read(void) +static cycle_t hpt_read(struct clocksource *cs) { return read_c0_count2(); } diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index f024057a35f8..f10a7cd64f7e 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -159,7 +159,7 @@ static void __init hub_rt_clock_event_global_init(void) setup_irq(irq, &hub_rt_irqaction); } -static cycle_t hub_rt_read(void) +static cycle_t hub_rt_read(struct clocksource *cs) { return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 926ea864e34f..48571ac56fb7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -77,7 +77,7 @@ #include #include -static cycle_t rtc_read(void); +static cycle_t rtc_read(struct clocksource *); static struct clocksource clocksource_rtc = { .name = "rtc", .rating = 400, @@ -88,7 +88,7 @@ static struct clocksource clocksource_rtc = { .read = rtc_read, }; -static cycle_t timebase_read(void); +static cycle_t timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", .rating = 400, @@ -766,12 +766,12 @@ unsigned long read_persistent_clock(void) } /* clocksource code */ -static cycle_t rtc_read(void) +static cycle_t rtc_read(struct clocksource *cs) { return (cycle_t)get_rtc(); } -static cycle_t timebase_read(void) +static cycle_t timebase_read(struct clocksource *cs) { return (cycle_t)get_tb(); } diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6ded50dfa75a..ef596d020573 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -201,7 +201,7 @@ unsigned long read_persistent_clock(void) return ts.tv_sec; } -static cycle_t read_tod_clock(void) +static cycle_t read_tod_clock(struct clocksource *cs) { return get_clock(); } diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c index c34e1e0f9b02..1700d2465f6c 100644 --- a/arch/sh/kernel/time_32.c +++ b/arch/sh/kernel/time_32.c @@ -208,7 +208,7 @@ unsigned long long sched_clock(void) if (!clocksource_sh.rating) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); - cycles = clocksource_sh.read(); + cycles = clocksource_sh.read(&clocksource_sh); return cyc2ns(&clocksource_sh, cycles); } #endif diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index c5d3396f5960..fe8d8930ccb6 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -81,7 +81,7 @@ static int tmu_timer_stop(void) */ static int tmus_are_scaled; -static cycle_t tmu_timer_read(void) +static cycle_t tmu_timer_read(struct clocksource *cs) { return ((cycle_t)(~_tmu_read(TMU1)))<get_tick(); +} + void __init time_init(void) { unsigned long freq = sparc64_init_timers(); @@ -827,7 +832,7 @@ void __init time_init(void) clocksource_tick.mult = clocksource_hz2mult(freq, clocksource_tick.shift); - clocksource_tick.read = tick_ops->get_tick; + clocksource_tick.read = clocksource_tick_read; printk("clocksource: mult[%x] shift[%d]\n", clocksource_tick.mult, clocksource_tick.shift); diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index b13a87a3ec95..c8b9c469fcd7 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -65,7 +65,7 @@ static irqreturn_t um_timer(int irq, void *dev) return IRQ_HANDLED; } -static cycle_t itimer_read(void) +static cycle_t itimer_read(struct clocksource *cs) { return os_nsecs() / 1000; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a44..3f0019e0a229 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t read_hpet(struct clocksource *cs) { return (cycle_t)hpet_readl(HPET_COUNTER); } @@ -756,7 +756,7 @@ static int hpet_clocksource_register(void) hpet_restart_counter(); /* Verify whether hpet counter works */ - t1 = read_hpet(); + t1 = hpet_readl(HPET_COUNTER); rdtscll(start); /* @@ -770,7 +770,7 @@ static int hpet_clocksource_register(void) rdtscll(now); } while ((now - start) < 200000UL); - if (t1 == read_hpet()) { + if (t1 == hpet_readl(HPET_COUNTER)) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); return -ENODEV; diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa54..c2e0bb0890d4 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -129,7 +129,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132df..223af43f1526 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) return ret; } +static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +{ + return kvm_clock_read(); +} + /* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - @@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) static struct clocksource kvm_clock = { .name = "kvm-clock", - .read = kvm_clock_read, + .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .mult = 1 << KVM_SCALE, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe6361..d57de05dc430 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; * code, which is necessary to support wrapping clocksources like pm * timer. */ -static cycle_t read_tsc(void) +static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7bad..2b3eb82efeeb 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) /** vmi clocksource */ static struct clocksource clocksource_vmi; -static cycle_t read_real_cycles(void) +static cycle_t read_real_cycles(struct clocksource *cs) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); return max(ret, clocksource_vmi.cycle_last); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a2085368a3dc..ca7ec44bafc3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void) /* If we can't use the TSC, the kernel falls back to our lower-priority * "lguest_clock", where we read the time value given to us by the Host. */ -static cycle_t lguest_clock_read(void) +static cycle_t lguest_clock_read(struct clocksource *cs) { unsigned long sec, nsec; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 14f240623497..0a5aa44299a5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void) return ret; } +static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) +{ + return xen_clocksource_read(); +} + static void xen_read_wallclock(struct timespec *ts) { struct shared_info *s = HYPERVISOR_shared_info; @@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now) static struct clocksource xen_clocksource __read_mostly = { .name = "xen", .rating = 400, - .read = xen_clocksource_read, + .read = xen_clocksource_get_cycles, .mask = ~0, .mult = 1< value1) diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index 8615059a8729..64e528e8bfa6 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c @@ -19,7 +19,7 @@ int use_cyclone = 0; static void __iomem *cyclone_ptr; -static cycle_t read_cyclone(void) +static cycle_t read_cyclone(struct clocksource *cs) { return (cycle_t)readl(cyclone_ptr); } diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index b92da677aa5d..27f4d9637b62 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -43,7 +43,7 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 -static cycle_t read_hrt(void) +static cycle_t read_hrt(struct clocksource *cs) { /* Read the timer value */ return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 254f1064d973..01b886e68822 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -39,7 +39,7 @@ static void __iomem *tcaddr; -static cycle_t tc_get_cycles(void) +static cycle_t tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 573819ef4cc0..0d96cde9ee5d 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -143,7 +143,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @read: returns a cycle value + * @read: returns a cycle value, passes clocksource as argument * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -162,7 +162,7 @@ struct clocksource { char *name; struct list_head list; int rating; - cycle_t (*read)(void); + cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -271,7 +271,7 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) */ static inline cycle_t clocksource_read(struct clocksource *cs) { - return cs->read(); + return cs->read(cs); } /** diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931a7fe7..ecfd7b5187e0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) resumed = test_and_clear_bit(0, &watchdog_resumed); - wdnow = watchdog->read(); + wdnow = watchdog->read(watchdog); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { - csnow = cs->read(); + csnow = cs->read(cs); if (unlikely(resumed)) { cs->wd_last = csnow; @@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) list_add(&cs->wd_list, &watchdog_list); if (!started && watchdog) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); @@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) cse->flags &= ~CLOCK_SOURCE_WATCHDOG; /* Start if list is not empty */ if (!list_empty(&watchdog_list)) { - watchdog_last = watchdog->read(); + watchdog_last = watchdog->read(watchdog); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f197560f3b..c3f6c30816e3 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -50,7 +50,7 @@ */ #define JIFFIES_SHIFT 8 -static cycle_t jiffies_read(void) +static cycle_t jiffies_read(struct clocksource *cs) { return (cycle_t) jiffies; } -- cgit v1.2.3-71-gd317 From 4614e6adafa2c5e6c3a9c245af2807fa7bc5117a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 21 Apr 2009 12:24:02 -0700 Subject: clocksource: add enable() and disable() callbacks Add enable() and disable() callbacks for clocksources. This allows us to put unused clocksources in power save mode. The functions clocksource_enable() and clocksource_disable() wrap the callbacks and are inserted in the timekeeping code to enable before use and disable after switching to a new clocksource. Signed-off-by: Magnus Damm Acked-by: John Stultz Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/clocksource.h | 31 +++++++++++++++++++++++++++++++ kernel/time/timekeeping.c | 12 +++++++++--- 2 files changed, 40 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 0d96cde9ee5d..5a40d14daa9f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -144,6 +144,8 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * The ideal clocksource. A must-use where * available. * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -163,6 +165,8 @@ struct clocksource { struct list_head list; int rating; cycle_t (*read)(struct clocksource *cs); + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -274,6 +278,33 @@ static inline cycle_t clocksource_read(struct clocksource *cs) return cs->read(cs); } +/** + * clocksource_enable: - enable clocksource + * @cs: pointer to clocksource + * + * Enables the specified clocksource. The clocksource callback + * function should start up the hardware and setup mult and field + * members of struct clocksource to reflect hardware capabilities. + */ +static inline int clocksource_enable(struct clocksource *cs) +{ + return cs->enable ? cs->enable(cs) : 0; +} + +/** + * clocksource_disable: - disable clocksource + * @cs: pointer to clocksource + * + * Disables the specified clocksource. The clocksource callback + * function should power down the now unused hardware block to + * save power. + */ +static inline void clocksource_disable(struct clocksource *cs) +{ + if (cs->disable) + cs->disable(cs); +} + /** * cyc2ns - converts clocksource cycles to nanoseconds * @cs: Pointer to clocksource diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 900f1b6598d1..687dff49f6e7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday); */ static void change_clocksource(void) { - struct clocksource *new; + struct clocksource *new, *old; new = clocksource_get_next(); @@ -191,11 +191,16 @@ static void change_clocksource(void) clocksource_forward_now(); - new->raw_time = clock->raw_time; + if (clocksource_enable(new)) + return; + new->raw_time = clock->raw_time; + old = clock; clock = new; + clocksource_disable(old); + clock->cycle_last = 0; - clock->cycle_last = clocksource_read(new); + clock->cycle_last = clocksource_read(clock); clock->error = 0; clock->xtime_nsec = 0; clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); @@ -292,6 +297,7 @@ void __init timekeeping_init(void) ntp_init(); clock = clocksource_get_next(); + clocksource_enable(clock); clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); clock->cycle_last = clocksource_read(clock); -- cgit v1.2.3-71-gd317