From 04e82ffb0f02e645f3dde5128ef39d425a8b3c6d Mon Sep 17 00:00:00 2001 From: Peter Huewe Date: Wed, 10 Mar 2010 11:55:05 +0900 Subject: serial: sh-sci: Fix build failure for non-sh architectures. This patch fixes a build failure for various arm based defconfigs [1][2][3] and maybe other architectures/configs. The build failure was introduced by the sh specific patch [4] "serial: sh-sci: Add DMA support" by Guennadi Liakhovetski Patch against linux-next of 20100309 References: [1] http://kisskb.ellerman.id.au/kisskb/buildresult/2248992/ [2] http://kisskb.ellerman.id.au/kisskb/buildresult/2248996/ [3] http://kisskb.ellerman.id.au/kisskb/buildresult/2248998/ [4] http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git;a=commit;h=73a19e4c0301908ce6346715fd08a74308451f5a Signed-off-by: Peter Huewe Signed-off-by: Paul Mundt --- include/linux/serial_sci.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 1b177d29a7f0..193d4bfe42ff 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -2,7 +2,9 @@ #define __LINUX_SERIAL_SCI_H #include +#ifdef CONFIG_SERIAL_SH_SCI_DMA #include +#endif /* * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) @@ -30,8 +32,10 @@ struct plat_sci_port { upf_t flags; /* UPF_* flags */ char *clk; /* clock string */ struct device *dma_dev; +#ifdef CONFIG_SERIAL_SH_SCI_DMA enum sh_dmae_slave_chan_id dma_slave_tx; enum sh_dmae_slave_chan_id dma_slave_rx; +#endif }; #endif /* __LINUX_SERIAL_SCI_H */ -- cgit v1.2.3-71-gd317 From 3f6da3905398826d85731247e7fbcf53400c18bd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Mar 2010 13:01:18 +0100 Subject: perf: Rework and fix the arch CPU-hotplug hooks Remove the hw_perf_event_*() hotplug hooks in favour of per PMU hotplug notifiers. This has the advantage of reducing the static weak interface as well as exposing all hotplug actions to the PMU. Use this to fix x86 hotplug usage where we did things in ONLINE which should have been done in UP_PREPARE or STARTING. Signed-off-by: Peter Zijlstra Cc: Paul Mundt Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100305154128.736225361@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 21 +++++++++- arch/sh/kernel/perf_event.c | 20 +++++++++- arch/x86/kernel/cpu/perf_event.c | 70 ++++++++++++++++++++-------------- arch/x86/kernel/cpu/perf_event_amd.c | 60 ++++++++++++----------------- arch/x86/kernel/cpu/perf_event_intel.c | 5 ++- include/linux/perf_event.h | 16 ++++++++ kernel/perf_event.c | 15 -------- 7 files changed, 126 insertions(+), 81 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5120bd44f69a..fbe101d7505d 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs) irq_exit(); } -void hw_perf_event_setup(int cpu) +static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); @@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } +static int __cpuinit +power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + power_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + int register_power_pmu(struct power_pmu *pmu) { if (ppmu) @@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ + perf_cpu_notifier(power_pmu_notifier); + return 0; } diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7ff0943e7a08..9f253e9cce01 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) return &pmu; } -void hw_perf_event_setup(int cpu) +static void sh_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); } +static int __cpuinit +sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + sh_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + void hw_perf_enable(void) { if (!sh_pmu_initialized()) @@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu) WARN_ON(pmu->num_events > MAX_HWEVENTS); + perf_cpu_notifier(sh_pmu_notifier); return 0; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 42aafd11e170..585d5608ae6b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -157,6 +157,11 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; + + void (*cpu_prepare)(int cpu); + void (*cpu_starting)(int cpu); + void (*cpu_dying)(int cpu); + void (*cpu_dead)(int cpu); }; static struct x86_pmu x86_pmu __read_mostly; @@ -293,7 +298,7 @@ static inline bool bts_available(void) return x86_pmu.enable_bts != NULL; } -static inline void init_debug_store_on_cpu(int cpu) +static void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu) (u32)((u64)(unsigned long)ds >> 32)); } -static inline void fini_debug_store_on_cpu(int cpu) +static void fini_debug_store_on_cpu(int cpu) { if (!per_cpu(cpu_hw_events, cpu).ds) return; @@ -1337,6 +1342,39 @@ undo: #include "perf_event_p6.c" #include "perf_event_intel.c" +static int __cpuinit +x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + if (x86_pmu.cpu_prepare) + x86_pmu.cpu_prepare(cpu); + break; + + case CPU_STARTING: + if (x86_pmu.cpu_starting) + x86_pmu.cpu_starting(cpu); + break; + + case CPU_DYING: + if (x86_pmu.cpu_dying) + x86_pmu.cpu_dying(cpu); + break; + + case CPU_DEAD: + if (x86_pmu.cpu_dead) + x86_pmu.cpu_dead(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + static void __init pmu_check_apic(void) { if (cpu_has_apic) @@ -1415,6 +1453,8 @@ void __init init_hw_perf_events(void) pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); pr_info("... event mask: %016Lx\n", perf_event_mask); + + perf_cpu_notifier(x86_pmu_notifier); } static inline void x86_pmu_read(struct perf_event *event) @@ -1674,29 +1714,3 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } - -void hw_perf_event_setup_online(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_online(cpu); - break; - default: - return; - } -} - -void hw_perf_event_setup_offline(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_offline(cpu); - break; - default: - return; - } -} diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfda3c4f..014528ba7d57 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -271,28 +271,6 @@ done: return &emptyconstraint; } -static __initconst struct x86_pmu amd_pmu = { - .name = "AMD", - .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, - .enable_all = x86_pmu_enable_all, - .enable = x86_pmu_enable_event, - .disable = x86_pmu_disable_event, - .eventsel = MSR_K7_EVNTSEL0, - .perfctr = MSR_K7_PERFCTR0, - .event_map = amd_pmu_event_map, - .raw_event = amd_pmu_raw_event, - .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_events = 4, - .event_bits = 48, - .event_mask = (1ULL << 48) - 1, - .apic = 1, - /* use highest bit to detect overflow */ - .max_period = (1ULL << 47) - 1, - .get_event_constraints = amd_get_event_constraints, - .put_event_constraints = amd_put_event_constraints -}; - static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) { struct amd_nb *nb; @@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu) raw_spin_unlock(&amd_nb_lock); } +static __initconst struct x86_pmu amd_pmu = { + .name = "AMD", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = amd_pmu_event_map, + .raw_event = amd_pmu_raw_event, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_events = 4, + .event_bits = 48, + .event_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = amd_get_event_constraints, + .put_event_constraints = amd_put_event_constraints, + + .cpu_prepare = amd_pmu_cpu_online, + .cpu_dead = amd_pmu_cpu_offline, +}; + static __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -390,11 +393,6 @@ static __init int amd_pmu_init(void) memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - /* - * explicitly initialize the boot cpu, other cpus will get - * the cpu hotplug callbacks from smp_init() - */ - amd_pmu_cpu_online(smp_processor_id()); return 0; } @@ -405,12 +403,4 @@ static int amd_pmu_init(void) return 0; } -static void amd_pmu_cpu_online(int cpu) -{ -} - -static void amd_pmu_cpu_offline(int cpu) -{ -} - #endif diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44b60c852107..12e811a7d747 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -870,7 +870,10 @@ static __initconst struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, - .get_event_constraints = intel_get_event_constraints + .get_event_constraints = intel_get_event_constraints, + + .cpu_starting = init_debug_store_on_cpu, + .cpu_dying = fini_debug_store_on_cpu, }; static __init int intel_pmu_init(void) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7da1a01..80acbf3d5de1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -936,5 +936,21 @@ static inline void perf_event_disable(struct perf_event *event) { } #define perf_output_put(handle, x) \ perf_output_copy((handle), &(x), sizeof(x)) +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = 20 }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ +} while (0) + #endif /* __KERNEL__ */ #endif /* _LINUX_PERF_EVENT_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4393b9e73740..73329dedb5ad 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } -void __weak hw_perf_event_setup(int cpu) { barrier(); } -void __weak hw_perf_event_setup_online(int cpu) { barrier(); } -void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } - int __weak hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, @@ -5382,8 +5378,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; spin_unlock(&perf_resource_lock); - - hw_perf_event_setup(cpu); } #ifdef CONFIG_HOTPLUG_CPU @@ -5423,20 +5417,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) perf_event_init_cpu(cpu); break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hw_perf_event_setup_online(cpu); - break; - case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; - case CPU_DEAD: - hw_perf_event_setup_offline(cpu); - break; - default: break; } -- cgit v1.2.3-71-gd317 From 5331d7b84613b8325362dde53dc2bff2fb87d351 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Mar 2010 21:15:56 +0100 Subject: perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot Events that trigger overflows by interrupting a context can use get_irq_regs() or task_pt_regs() to retrieve the state when the event triggered. But this is not the case for some other class of events like trace events as tracepoints are executed in the same context than the code that triggered the event. It means we need a different api to capture the regs there, namely we need a hot snapshot to get the most important informations for perf: the instruction pointer to get the event origin, the frame pointer for the callchain, the code segment for user_mode() tests (we always use __KERNEL_CS as trace events always occur from the kernel) and the eflags for further purposes. v2: rename perf_save_regs to perf_fetch_caller_regs as per Masami's suggestion. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- arch/x86/kernel/cpu/perf_event.c | 12 ++++++++++++ arch/x86/kernel/dumpstack.h | 15 ++++++++++++++ include/linux/perf_event.h | 42 +++++++++++++++++++++++++++++++++++++++- kernel/perf_event.c | 5 +++++ 4 files changed, 73 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d665a0b202c..c6bde7d7afdc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1707,3 +1707,15 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } + +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ + regs->ip = ip; + /* + * perf_arch_fetch_caller_regs adds another call, we need to increment + * the skip level + */ + regs->bp = rewind_frame_pointer(skip + 1); + regs->cs = __KERNEL_CS; + local_save_flags(regs->flags); +} diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420faffa..29e5f7c845b2 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h @@ -29,4 +29,19 @@ struct stack_frame { struct stack_frame *next_frame; unsigned long return_address; }; + +static inline unsigned long rewind_frame_pointer(int n) +{ + struct stack_frame *frame; + + get_bp(frame); + +#ifdef CONFIG_FRAME_POINTER + while (n--) + frame = frame->next_frame; #endif + + return (unsigned long)frame; +} + +#endif /* DUMPSTACK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 80acbf3d5de1..70cffd052c04 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -452,6 +452,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 @@ -847,6 +848,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) __perf_sw_event(event_id, nr, nmi, regs, addr); } +extern void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +{ + unsigned long ip; + + memset(regs, 0, sizeof(*regs)); + + switch (skip) { + case 1 : + ip = CALLER_ADDR0; + break; + case 2 : + ip = CALLER_ADDR1; + break; + case 3 : + ip = CALLER_ADDR2; + break; + case 4: + ip = CALLER_ADDR3; + break; + /* No need to support further for now */ + default: + ip = 0; + } + + return perf_arch_fetch_caller_regs(regs, ip, skip); +} + extern void __perf_event_mmap(struct vm_area_struct *vma); static inline void perf_event_mmap(struct vm_area_struct *vma) @@ -880,7 +919,8 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 52c69a34d697..359d7f690c2b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,6 +2786,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +__weak +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ +} + /* * Output */ -- cgit v1.2.3-71-gd317 From c530665c31c0140b74ca7689e7f836177796e5bd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:16:16 +0100 Subject: perf: Take a hot regs snapshot for trace events We are taking a wrong regs snapshot when a trace event triggers. Either we use get_irq_regs(), which gives us the interrupted registers if we are in an interrupt, or we use task_pt_regs() which gives us the state before we entered the kernel, assuming we are lucky enough to be no kernel thread, in which case task_pt_regs() returns the initial set of regs when the kernel thread was started. What we want is different. We need a hot snapshot of the regs, so that we can get the instruction pointer to record in the sample, the frame pointer for the callchain, and some other things. Let's use the new perf_fetch_caller_regs() for that. Comparison with perf record -e lock: -R -a -f -g Before: perf [kernel] [k] __do_softirq | --- __do_softirq | |--55.16%-- __open | --44.84%-- __write_nocancel After: perf [kernel] [k] perf_tp_event | --- perf_tp_event | |--41.07%-- lock_acquire | | | |--39.36%-- _raw_spin_lock | | | | | |--7.81%-- hrtimer_interrupt | | | smp_apic_timer_interrupt | | | apic_timer_interrupt The old case was producing unreliable callchains. Now having right frame and instruction pointers, we have the trace we want. Also syscalls and kprobe events already have the right regs, let's use them instead of wasting a retrieval. v2: Follow the rename perf_save_regs() -> perf_fetch_caller_regs() Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- include/linux/ftrace_event.h | 7 +++++-- include/trace/ftrace.h | 6 +++++- kernel/perf_event.c | 8 ++------ kernel/trace/trace_event_profile.c | 3 ++- kernel/trace/trace_kprobe.c | 5 +++-- kernel/trace/trace_syscalls.c | 4 ++-- 6 files changed, 19 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444ab8f6..ac424f18ce63 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -187,6 +187,9 @@ do { \ #ifdef CONFIG_PERF_EVENTS struct perf_event; + +DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); + extern int ftrace_profile_enable(int event_id); extern void ftrace_profile_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, @@ -198,11 +201,11 @@ ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, static inline void ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags) + u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; - perf_tp_event(entry->type, addr, count, raw_data, size); + perf_tp_event(entry->type, addr, count, raw_data, size, regs); perf_swevent_put_recursion_context(rctx); local_irq_restore(irq_flags); } diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd594803..f31bb8b9777c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -764,6 +764,7 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct pt_regs *__regs; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -784,8 +785,11 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ + __regs = &__get_cpu_var(perf_trace_regs); \ + perf_fetch_caller_regs(__regs, 2); \ + \ ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags); \ + __count, irq_flags, __regs); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 359d7f690c2b..45b4b6e55891 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4318,9 +4318,8 @@ static const struct pmu perf_ops_task_clock = { #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) + int entry_size, struct pt_regs *regs) { - struct pt_regs *regs = get_irq_regs(); struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4330,12 +4329,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!regs) - regs = task_pt_regs(current); - /* Trace events already protected against recursion */ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d693005075..e66d21e15a0f 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -2,13 +2,14 @@ * trace event based perf counter profiling * * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * + * Copyright (C) 2009-2010 Frederic Weisbecker */ #include #include #include "trace.h" +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..f7a20a8bfb31 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1240,7 +1240,7 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ @@ -1271,7 +1271,8 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + irq_flags, regs); } static int probe_profile_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..7e6e84fb7b6c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -467,7 +467,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysenter_enable(struct ftrace_event_call *call) @@ -542,7 +542,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3-71-gd317 From 97d5a22005f38057b4bc0d95f81cd26510268794 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 5 Mar 2010 05:35:37 +0100 Subject: perf: Drop the obsolete profile naming for trace events Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron --- include/linux/ftrace_event.h | 16 ++-- include/linux/syscalls.h | 24 +++--- include/trace/ftrace.h | 38 ++++----- include/trace/syscall.h | 8 +- kernel/perf_event.c | 4 +- kernel/trace/Makefile | 2 +- kernel/trace/trace_event_perf.c | 165 +++++++++++++++++++++++++++++++++++++ kernel/trace/trace_event_profile.c | 165 ------------------------------------- kernel/trace/trace_events.c | 2 +- kernel/trace/trace_kprobe.c | 28 +++---- kernel/trace/trace_syscalls.c | 72 ++++++++-------- 11 files changed, 262 insertions(+), 262 deletions(-) create mode 100644 kernel/trace/trace_event_perf.c delete mode 100644 kernel/trace/trace_event_profile.c (limited to 'include/linux') diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ac424f18ce63..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -131,12 +131,12 @@ struct ftrace_event_call { void *mod; void *data; - int profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int perf_refcount; + int (*perf_event_enable)(struct ftrace_event_call *); + void (*perf_event_disable)(struct ftrace_event_call *); }; -#define FTRACE_MAX_PROFILE_SIZE 2048 +#define PERF_MAX_TRACE_SIZE 2048 #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ @@ -190,17 +190,17 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int ftrace_profile_enable(int event_id); -extern void ftrace_profile_disable(int event_id); +extern int perf_trace_enable(int event_id); +extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void * -ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, +perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, unsigned long *irq_flags); static inline void -ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8126f239edf0..51435bcc3460 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -101,18 +101,18 @@ struct perf_event_attr; #ifdef CONFIG_PERF_EVENTS -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ - .profile_enable = prof_sysenter_enable, \ - .profile_disable = prof_sysenter_disable, +#define TRACE_SYS_ENTER_PERF_INIT(sname) \ + .perf_event_enable = perf_sysenter_enable, \ + .perf_event_disable = perf_sysenter_disable, -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ - .profile_enable = prof_sysexit_enable, \ - .profile_disable = prof_sysexit_disable, +#define TRACE_SYS_EXIT_PERF_INIT(sname) \ + .perf_event_enable = perf_sysexit_enable, \ + .perf_event_disable = perf_sysexit_disable, #else -#define TRACE_SYS_ENTER_PROFILE(sname) -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) -#define TRACE_SYS_EXIT_PROFILE(sname) -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) +#define TRACE_SYS_ENTER_PERF(sname) +#define TRACE_SYS_ENTER_PERF_INIT(sname) +#define TRACE_SYS_EXIT_PERF(sname) +#define TRACE_SYS_EXIT_PERF_INIT(sname) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_SYSCALLS @@ -149,7 +149,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -171,7 +171,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f31bb8b9777c..25ab56f75d65 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ \ -static void ftrace_profile_##name(proto); \ +static void perf_trace_##name(proto); \ \ static notrace int \ -ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ +perf_trace_enable_##name(struct ftrace_event_call *unused) \ { \ - return register_trace_##name(ftrace_profile_##name); \ + return register_trace_##name(perf_trace_##name); \ } \ \ static notrace void \ -ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ +perf_trace_disable_##name(struct ftrace_event_call *unused) \ { \ - unregister_trace_##name(ftrace_profile_##name); \ + unregister_trace_##name(perf_trace_##name); \ } #undef DEFINE_EVENT_PRINT @@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ #ifdef CONFIG_PERF_EVENTS -#define _TRACE_PROFILE_INIT(call) \ - .profile_enable = ftrace_profile_enable_##call, \ - .profile_disable = ftrace_profile_disable_##call, +#define _TRACE_PERF_INIT(call) \ + .perf_event_enable = perf_trace_enable_##call, \ + .perf_event_disable = perf_trace_disable_##call, #else -#define _TRACE_PROFILE_INIT(call) +#define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ #undef __entry @@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##template, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #undef DEFINE_EVENT_PRINT @@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##call, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Define the insertion callback to profile events + * Define the insertion callback to perf events * * The job is very similar to ftrace_raw_event_ except that we don't * insert in the ring buffer but in a perf counter. * - * static void ftrace_profile_(proto) + * static void ftrace_perf_(proto) * { * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_; @@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ +perf_trace_templ_##call(struct ftrace_event_call *event_call, \ proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ @@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ - entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ __entry_size, event_call->id, &rctx, &irq_flags); \ if (!entry) \ return; \ @@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ __regs = &__get_cpu_var(perf_trace_regs); \ perf_fetch_caller_regs(__regs, 2); \ \ - ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, irq_flags, __regs); \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void ftrace_profile_##call(proto) \ +static notrace void perf_trace_##call(proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ \ - ftrace_profile_templ_##template(event_call, args); \ + perf_trace_templ_##template(event_call, args); \ } #undef DEFINE_EVENT_PRINT diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif #ifdef CONFIG_PERF_EVENTS -int prof_sysenter_enable(struct ftrace_event_call *call); -void prof_sysenter_disable(struct ftrace_event_call *call); -int prof_sysexit_enable(struct ftrace_event_call *call); -void prof_sysexit_disable(struct ftrace_event_call *call); +int perf_sysenter_enable(struct ftrace_event_call *call); +void perf_sysenter_disable(struct ftrace_event_call *call); +int perf_sysexit_enable(struct ftrace_event_call *call); +void perf_sysexit_disable(struct ftrace_event_call *call); #endif #endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b4b6e55891..c502b18594cc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event, static void tp_perf_event_destroy(struct perf_event *event) { - ftrace_profile_disable(event->attr.config); + perf_trace_disable(event->attr.config); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (ftrace_profile_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config)) return NULL; event->destroy = tp_perf_event_destroy; diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o ifeq ($(CONFIG_PERF_EVENTS),y) -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c new file mode 100644 index 000000000000..f315b12a41d8 --- /dev/null +++ b/kernel/trace/trace_event_perf.c @@ -0,0 +1,165 @@ +/* + * trace event based perf event profiling/tracing + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009-2010 Frederic Weisbecker + */ + +#include +#include +#include "trace.h" + +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); + +static char *perf_trace_buf; +static char *perf_trace_buf_nmi; + +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; + +/* Count the events in use (per event id, not per instance) */ +static int total_ref_count; + +static int perf_trace_event_enable(struct ftrace_event_call *event) +{ + char *buf; + int ret = -ENOMEM; + + if (event->perf_refcount++ > 0) + return 0; + + if (!total_ref_count) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(perf_trace_buf, buf); + + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(perf_trace_buf_nmi, buf); + } + + ret = event->perf_event_enable(event); + if (!ret) { + total_ref_count++; + return 0; + } + +fail_buf_nmi: + if (!total_ref_count) { + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; + } +fail_buf: + event->perf_refcount--; + + return ret; +} + +int perf_trace_enable(int event_id) +{ + struct ftrace_event_call *event; + int ret = -EINVAL; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id && event->perf_event_enable && + try_module_get(event->mod)) { + ret = perf_trace_event_enable(event); + break; + } + } + mutex_unlock(&event_mutex); + + return ret; +} + +static void perf_trace_event_disable(struct ftrace_event_call *event) +{ + char *buf, *nmi_buf; + + if (--event->perf_refcount > 0) + return; + + event->perf_event_disable(event); + + if (!--total_ref_count) { + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); + + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } +} + +void perf_trace_disable(int event_id) +{ + struct ftrace_event_call *event; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id) { + perf_trace_event_disable(event); + module_put(event->mod); + break; + } + } + mutex_unlock(&event_mutex); +} + +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) +{ + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) + trace_buf = rcu_dereference(perf_trace_buf_nmi); + else + trace_buf = rcu_dereference(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; +err: + perf_swevent_put_recursion_context(*rctxp); +err_recursion: + local_irq_restore(*irq_flags); + return NULL; +} +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c deleted file mode 100644 index e66d21e15a0f..000000000000 --- a/kernel/trace/trace_event_profile.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * trace event based perf counter profiling - * - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * Copyright (C) 2009-2010 Frederic Weisbecker - */ - -#include -#include -#include "trace.h" - -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); - -static char *perf_trace_buf; -static char *perf_trace_buf_nmi; - -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; - -/* Count the events in use (per event id, not per instance) */ -static int total_profile_count; - -static int ftrace_profile_enable_event(struct ftrace_event_call *event) -{ - char *buf; - int ret = -ENOMEM; - - if (event->profile_count++ > 0) - return 0; - - if (!total_profile_count) { - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf; - - rcu_assign_pointer(perf_trace_buf, buf); - - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf_nmi; - - rcu_assign_pointer(perf_trace_buf_nmi, buf); - } - - ret = event->profile_enable(event); - if (!ret) { - total_profile_count++; - return 0; - } - -fail_buf_nmi: - if (!total_profile_count) { - free_percpu(perf_trace_buf_nmi); - free_percpu(perf_trace_buf); - perf_trace_buf_nmi = NULL; - perf_trace_buf = NULL; - } -fail_buf: - event->profile_count--; - - return ret; -} - -int ftrace_profile_enable(int event_id) -{ - struct ftrace_event_call *event; - int ret = -EINVAL; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->profile_enable && - try_module_get(event->mod)) { - ret = ftrace_profile_enable_event(event); - break; - } - } - mutex_unlock(&event_mutex); - - return ret; -} - -static void ftrace_profile_disable_event(struct ftrace_event_call *event) -{ - char *buf, *nmi_buf; - - if (--event->profile_count > 0) - return; - - event->profile_disable(event); - - if (!--total_profile_count) { - buf = perf_trace_buf; - rcu_assign_pointer(perf_trace_buf, NULL); - - nmi_buf = perf_trace_buf_nmi; - rcu_assign_pointer(perf_trace_buf_nmi, NULL); - - /* - * Ensure every events in profiling have finished before - * releasing the buffers - */ - synchronize_sched(); - - free_percpu(buf); - free_percpu(nmi_buf); - } -} - -void ftrace_profile_disable(int event_id) -{ - struct ftrace_event_call *event; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { - ftrace_profile_disable_event(event); - module_put(event->mod); - break; - } - } - mutex_unlock(&event_mutex); -} - -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, - int *rctxp, unsigned long *irq_flags) -{ - struct trace_entry *entry; - char *trace_buf, *raw_data; - int pc, cpu; - - pc = preempt_count(); - - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(*irq_flags); - - *rctxp = perf_swevent_get_recursion_context(); - if (*rctxp < 0) - goto err_recursion; - - cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto err; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - - entry = (struct trace_entry *)raw_data; - tracing_generic_entry_update(entry, *irq_flags, pc); - entry->type = type; - - return raw_data; -err: - perf_swevent_put_recursion_context(*rctxp); -err_recursion: - local_irq_restore(*irq_flags); - return NULL; -} -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id && call->profile_enable) + if (call->id && call->perf_event_enable) trace_create_file("id", 0444, call->dir, call, id); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f7a20a8bfb31..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_perf_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags, regs); } -static int probe_profile_enable(struct ftrace_event_call *call) +static int probe_perf_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) return enable_kprobe(&tp->rp.kp); } -static void probe_profile_disable(struct ftrace_event_call *call) +static void probe_perf_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) kprobe_trace_func(kp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_profile_func(kp, regs); + kprobe_perf_func(kp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) kretprobe_trace_func(ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_profile_func(ri, regs); + kretprobe_perf_func(ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) call->unregfunc = probe_event_disable; #ifdef CONFIG_PERF_EVENTS - call->profile_enable = probe_profile_enable; - call->profile_disable = probe_profile_disable; + call->perf_event_enable = probe_perf_enable; + call->perf_event_disable = probe_perf_disable; #endif call->data = tp; ret = trace_add_event_call(call); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7e6e84fb7b6c..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); -static int sys_prof_refcount_enter; -static int sys_prof_refcount_exit; +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); +static int sys_perf_refcount_enter; +static int sys_perf_refcount_exit; -static void prof_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "profile buffer not large enough")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "perf buffer not large enough")) return; - rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->id, &rctx, &flags); if (!rec) return; @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysenter_enable(struct ftrace_event_call *call) +int perf_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_enter) - ret = register_trace_sys_enter(prof_syscall_enter); + if (!sys_perf_refcount_enter) + ret = register_trace_sys_enter(perf_syscall_enter); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); } else { - set_bit(num, enabled_prof_enter_syscalls); - sys_prof_refcount_enter++; + set_bit(num, enabled_perf_enter_syscalls); + sys_perf_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysenter_disable(struct ftrace_event_call *call) +void perf_sysenter_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_enter--; - clear_bit(num, enabled_prof_enter_syscalls); - if (!sys_prof_refcount_enter) - unregister_trace_sys_enter(prof_syscall_enter); + sys_perf_refcount_enter--; + clear_bit(num, enabled_perf_enter_syscalls); + if (!sys_perf_refcount_enter) + unregister_trace_sys_enter(perf_syscall_enter); mutex_unlock(&syscall_trace_lock); } -static void prof_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) * Impossible, but be paranoid with the future * How to put this check outside runtime? */ - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "exit event has grown above profile buffer size")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "exit event has grown above perf buffer size")) return; - rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, sys_data->exit_event->id, &rctx, &flags); if (!rec) return; @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysexit_enable(struct ftrace_event_call *call) +int perf_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_exit) - ret = register_trace_sys_exit(prof_syscall_exit); + if (!sys_perf_refcount_exit) + ret = register_trace_sys_exit(perf_syscall_exit); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); } else { - set_bit(num, enabled_prof_exit_syscalls); - sys_prof_refcount_exit++; + set_bit(num, enabled_perf_exit_syscalls); + sys_perf_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysexit_disable(struct ftrace_event_call *call) +void perf_sysexit_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_exit--; - clear_bit(num, enabled_prof_exit_syscalls); - if (!sys_prof_refcount_exit) - unregister_trace_sys_exit(prof_syscall_exit); + sys_perf_refcount_exit--; + clear_bit(num, enabled_perf_exit_syscalls); + if (!sys_perf_refcount_exit) + unregister_trace_sys_exit(perf_syscall_exit); mutex_unlock(&syscall_trace_lock); } -- cgit v1.2.3-71-gd317 From 85cfabbcd10f8d112feee6e2ec64ee78033b6d3c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Mar 2010 13:06:56 +0100 Subject: perf, ppc: Fix compile error due to new cpu notifiers Fix: arch/powerpc/kernel/perf_event.c:1334: error: 'power_pmu_notifier' undeclared (first use in this function) arch/powerpc/kernel/perf_event.c:1334: error: (Each undeclared identifier is reported only once arch/powerpc/kernel/perf_event.c:1334: error: for each function it appears in.) arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'power_pmu_notifier' arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'register_cpu_notifier' Due to commit 3f6da390 (perf: Rework and fix the arch CPU-hotplug hooks). Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 2 +- include/linux/perf_event.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index fbe101d7505d..08460a2e9f41 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1298,7 +1298,7 @@ static void power_pmu_setup(int cpu) } static int __cpuinit -power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 70cffd052c04..95477038a72a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -453,6 +453,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 -- cgit v1.2.3-71-gd317 From 3f17522ce461a31e7ced6311b28fcf5b8a763316 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 12 Feb 2010 14:32:01 +0000 Subject: Video: ARM CLCD: Better fix for swapped IENB and CNTL registers On PL111, as found on Realview and other platforms, these registers are always arranged as CNTL then IENB. On PL110, these registers are IENB then CNTL, except on Versatile platforms. Re-arrange the handling of these register swaps so that PL111 always gets it right without resorting to ifdefs, leaving the only case needing special handling being PL110 on Versatile. Fill out amba/clcd.h with the PL110/PL111 register definition differences in case someone tries to use the PL110 specific definitions on PL111. Signed-off-by: Russell King --- drivers/video/amba-clcd.c | 31 ++++++++++++++++++++++++------- include/linux/amba/clcd.h | 33 +++++++++++++++++---------------- 2 files changed, 41 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index a21efcd10b78..afe21e6eb544 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -65,16 +65,16 @@ static void clcdfb_disable(struct clcd_fb *fb) if (fb->board->disable) fb->board->disable(fb); - val = readl(fb->regs + CLCD_CNTL); + val = readl(fb->regs + fb->off_cntl); if (val & CNTL_LCDPWR) { val &= ~CNTL_LCDPWR; - writel(val, fb->regs + CLCD_CNTL); + writel(val, fb->regs + fb->off_cntl); clcdfb_sleep(20); } if (val & CNTL_LCDEN) { val &= ~CNTL_LCDEN; - writel(val, fb->regs + CLCD_CNTL); + writel(val, fb->regs + fb->off_cntl); } /* @@ -94,7 +94,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) * Bring up by first enabling.. */ cntl |= CNTL_LCDEN; - writel(cntl, fb->regs + CLCD_CNTL); + writel(cntl, fb->regs + fb->off_cntl); clcdfb_sleep(20); @@ -102,7 +102,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) * and now apply power. */ cntl |= CNTL_LCDPWR; - writel(cntl, fb->regs + CLCD_CNTL); + writel(cntl, fb->regs + fb->off_cntl); /* * finally, enable the interface. @@ -233,7 +233,7 @@ static int clcdfb_set_par(struct fb_info *info) readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1), readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3), readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS), - readl(fb->regs + CLCD_IENB), readl(fb->regs + CLCD_CNTL)); + readl(fb->regs + fb->off_ienb), readl(fb->regs + fb->off_cntl)); #endif return 0; @@ -345,6 +345,23 @@ static int clcdfb_register(struct clcd_fb *fb) { int ret; + /* + * ARM PL111 always has IENB at 0x1c; it's only PL110 + * which is reversed on some platforms. + */ + if (amba_manf(fb->dev) == 0x41 && amba_part(fb->dev) == 0x111) { + fb->off_ienb = CLCD_PL111_IENB; + fb->off_cntl = CLCD_PL111_CNTL; + } else { +#ifdef CONFIG_ARCH_VERSATILE + fb->off_ienb = CLCD_PL111_IENB; + fb->off_cntl = CLCD_PL111_CNTL; +#else + fb->off_ienb = CLCD_PL110_IENB; + fb->off_cntl = CLCD_PL110_CNTL; +#endif + } + fb->clk = clk_get(&fb->dev->dev, NULL); if (IS_ERR(fb->clk)) { ret = PTR_ERR(fb->clk); @@ -416,7 +433,7 @@ static int clcdfb_register(struct clcd_fb *fb) /* * Ensure interrupts are disabled. */ - writel(0, fb->regs + CLCD_IENB); + writel(0, fb->regs + fb->off_ienb); fb_set_var(&fb->fb, &fb->fb.var); diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index 29c0448265cf..ca16c3801a1e 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h @@ -21,22 +21,21 @@ #define CLCD_UBAS 0x00000010 #define CLCD_LBAS 0x00000014 -#if !defined(CONFIG_ARCH_VERSATILE) && !defined(CONFIG_ARCH_REALVIEW) -#define CLCD_IENB 0x00000018 -#define CLCD_CNTL 0x0000001c -#else -/* - * Someone rearranged these two registers on the Versatile - * platform... - */ -#define CLCD_IENB 0x0000001c -#define CLCD_CNTL 0x00000018 -#endif - -#define CLCD_STAT 0x00000020 -#define CLCD_INTR 0x00000024 -#define CLCD_UCUR 0x00000028 -#define CLCD_LCUR 0x0000002C +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + #define CLCD_PALL 0x00000200 #define CLCD_PALETTE 0x00000200 @@ -147,6 +146,8 @@ struct clcd_fb { struct clcd_board *board; void *board_data; void __iomem *regs; + u16 off_ienb; + u16 off_cntl; u32 clcd_cntl; u32 cmap[16]; }; -- cgit v1.2.3-71-gd317 From 1976152fd8e706135deed6cf333e347c08416056 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 18 Mar 2010 07:30:31 -0600 Subject: of: Fix comparison of "compatible" properties Commit 7c7b60cb87547b1664a4385c187f029bf514a737 "of: put default string compare and #a/s-cell values into common header" Breaks various things on powerpc due to using strncasecmp instead of strcasecmp for comparing against "compatible" strings. This causes things like the 4xx PCI code to fail miserably due to the partial matches in code like this: for_each_compatible_node(np, NULL, "ibm,plb-pcix") ppc4xx_probe_pcix_bridge(np); for_each_compatible_node(np, NULL, "ibm,plb-pci") ppc4xx_probe_pci_bridge(np); It's not quite right to do partial name match. Entries in a compatible list are meant to be matched whole. If a device is compatible with both "foo" and "foo1", then the device should have both strings in its "compatible" property. This patch reverts powerpc and microblaze us to use strcasecmp. Signed-off-by: Benjamin Herrenschmidt (for patch description) Signed-off-by: Grant Likely Acked-by: David S. Miller Acked-by: Michal Simek --- include/linux/of.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index f6d9cbc39c9c..a367e19bb3af 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -127,7 +127,7 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) /* Default string compare functions, Allow arch asm/prom.h to override */ #if !defined(of_compat_cmp) -#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l)) +#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -- cgit v1.2.3-71-gd317 From 87a6aca504d65f242589583e04df5e74b5eae1fe Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2010 17:14:15 -0700 Subject: Revert "tty: Add a new VT mode which is like VT_PROCESS but doesn't require a VT_RELDISP ioctl call" This reverts commit eec9fe7d1ab4a0dfac4cb43047a7657fffd0002f. Ari writes as the reason this should be reverted: The problems with this patch include: 1. There's at least one subtlety I overlooked - switching between X servers (i.e. from one X VT to another) still requires the cooperation of both X servers. I was assuming that KMS eliminated this. 2. It hasn't been tested at all (no X server patch exists which uses the new mode). As he was the original author of the patch, I'll revert it. Cc: Ari Entlich Cc: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt_ioctl.c | 39 +++++++++++++++++++-------------------- include/linux/vt.h | 3 +-- 2 files changed, 20 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 87778dcf8727..6aa10284104a 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -EFAULT; goto out; } - if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) { + if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { ret = -EINVAL; goto out; } @@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc) * telling it that it has acquired. Also check if it has died and * clean up (similar to logic employed in change_console()) */ - if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { + if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc) * vt to auto control. */ vc = vc_cons[fg_console].d; - if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { + if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1693,28 +1693,27 @@ void change_console(struct vc_data *new_vc) */ vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { - if(vc->vt_mode.mode == VT_PROCESS) - /* - * It worked. Mark the vt to switch to and - * return. The process needs to send us a - * VT_RELDISP ioctl to complete the switch. - */ - return; - } else { /* - * The controlling process has died, so we revert back to - * normal operation. In this case, we'll also change back - * to KD_TEXT mode. I'm not sure if this is strictly correct - * but it saves the agony when the X server dies and the screen - * remains blanked due to KD_GRAPHICS! It would be nice to do - * this outside of VT_PROCESS but there is no single process - * to account for and tracking tty count may be undesirable. + * It worked. Mark the vt to switch to and + * return. The process needs to send us a + * VT_RELDISP ioctl to complete the switch. */ - reset_vc(vc); + return; } /* - * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch... + * The controlling process has died, so we revert back to + * normal operation. In this case, we'll also change back + * to KD_TEXT mode. I'm not sure if this is strictly correct + * but it saves the agony when the X server dies and the screen + * remains blanked due to KD_GRAPHICS! It would be nice to do + * this outside of VT_PROCESS but there is no single process + * to account for and tracking tty count may be undesirable. + */ + reset_vc(vc); + + /* + * Fall through to normal (VT_AUTO) handling of the switch... */ } diff --git a/include/linux/vt.h b/include/linux/vt.h index 778b7b2a47d4..d5dd0bc408fd 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -27,7 +27,7 @@ struct vt_mode { #define VT_SETMODE 0x5602 /* set mode of active vt */ #define VT_AUTO 0x00 /* auto vt switching */ #define VT_PROCESS 0x01 /* process controls switching */ -#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */ +#define VT_ACKACQ 0x02 /* acknowledge switch */ struct vt_stat { unsigned short v_active; /* active vt */ @@ -38,7 +38,6 @@ struct vt_stat { #define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ #define VT_RELDISP 0x5605 /* release display */ -#define VT_ACKACQ 0x02 /* acknowledge switch */ #define VT_ACTIVATE 0x5606 /* make vt active */ #define VT_WAITACTIVE 0x5607 /* wait for vt active */ -- cgit v1.2.3-71-gd317 From 352fa6ad16b89f8ffd1a93b4419b1a8f2259feab Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 2 Mar 2010 22:24:19 +0000 Subject: tty: Take a 256 byte padding into account when buffering below sub-page units The TTY layer takes some care to ensure that only sub-page allocations are made with interrupts disabled. It does this by setting a goal of "TTY_BUFFER_PAGE" to allocate. Unfortunately, while TTY_BUFFER_PAGE takes the size of tty_buffer into account, it fails to account that tty_buffer_find() rounds the buffer size out to the next 256 byte boundary before adding on the size of the tty_buffer. This patch adjusts the TTY_BUFFER_PAGE calculation to take into account the size of the tty_buffer and the padding. Once applied, tty_buffer_alloc() should not require high-order allocations. Signed-off-by: Mel Gorman Cc: stable Signed-off-by: Greg Kroah-Hartman --- include/linux/tty.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tty.h b/include/linux/tty.h index 568369a86306..593228a520e1 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -70,12 +70,13 @@ struct tty_buffer { /* * We default to dicing tty buffer allocations to this many characters - * in order to avoid multiple page allocations. We assume tty_buffer itself - * is under 256 bytes. See tty_buffer_find for the allocation logic this - * must match + * in order to avoid multiple page allocations. We know the size of + * tty_buffer itself but it must also be taken into account that the + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation + * logic this must match */ -#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2) +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) struct tty_bufhead { -- cgit v1.2.3-71-gd317 From 336cee42dd52824e360ab419eab4e8888eb054ec Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 8 Mar 2010 21:50:11 -0600 Subject: tty_port,usb-console: Fix usb serial console open/close regression Commit e1108a63e10d344284011cccc06328b2cd3e5da3 ("usb_serial: Use the shutdown() operation") breaks the ability to use a usb console starting in 2.6.33. This was observed when using console=ttyUSB0,115200 as a boot argument with an FTDI device. The error is: ftdi_sio ttyUSB0: ftdi_submit_read_urb - failed submitting read urb, error -22 The handling of the ASYNCB_INITIALIZED changed in 2.6.32 such that in tty_port_shutdown() it always clears the flag if it is set. The fix is to add a variable to the tty_port struct to indicate when the tty port is a console. CC: Alan Cox CC: Alan Stern CC: Oliver Neukum CC: Andrew Morton Signed-off-by: Jason Wessel Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 2 +- drivers/usb/serial/console.c | 1 + include/linux/tty.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index be492dd66437..a3bd1d0b66cf 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(tty_port_tty_set); static void tty_port_shutdown(struct tty_port *port) { mutex_lock(&port->mutex); - if (port->ops->shutdown && + if (port->ops->shutdown && !port->console && test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) port->ops->shutdown(port); mutex_unlock(&port->mutex); diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index b22ac3258523..f347da2ef00a 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -181,6 +181,7 @@ static int usb_console_setup(struct console *co, char *options) /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->console = 1; + port->port.console = 1; mutex_unlock(&serial->disc_mutex); return retval; diff --git a/include/linux/tty.h b/include/linux/tty.h index 593228a520e1..4409967db0c4 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -224,6 +224,7 @@ struct tty_port { wait_queue_head_t close_wait; /* Close waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ + unsigned char console:1; /* port is a console */ struct mutex mutex; /* Locking */ struct mutex buf_mutex; /* Buffer alloc lock */ unsigned char *xmit_buf; /* Optional buffer */ -- cgit v1.2.3-71-gd317 From f09a15e6e69884cedec4d1c022089a973aa01f1e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 16 Mar 2010 12:55:44 -0700 Subject: USB: Fix usb_fill_int_urb for SuperSpeed devices USB 3 and Wireless USB specify a logarithmic encoding of the endpoint interval that matches the USB 2 specification. usb_fill_int_urb() didn't know that and was filling in the interval as if it was USB 1.1. Fix usb_fill_int_urb() for SuperSpeed devices, but leave the wireless case alone, because David Vrabel wants to keep the old encoding. Update the struct urb kernel doc to note that SuperSpeed URBs must have urb->interval specified in microframes. Add a missing break statement in the usb_submit_urb() interrupt URB checking, since wireless USB and SuperSpeed USB encode urb->interval differently. This allows xHCI roothubs to actually register with khubd. Signed-off-by: Matthew Wilcox Signed-off-by: Sarah Sharp Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/urb.c | 1 + include/linux/usb.h | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 27080561a1c2..45a32dadb406 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -453,6 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; + break; case USB_SPEED_WIRELESS: if (urb->interval > 16) return -EINVAL; diff --git a/include/linux/usb.h b/include/linux/usb.h index 8c9f053111bb..ce1323c4e47c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1055,7 +1055,8 @@ typedef void (*usb_complete_t)(struct urb *); * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous * transfers. The units are frames (milliseconds) for full and low - * speed devices, and microframes (1/8 millisecond) for highspeed ones. + * speed devices, and microframes (1/8 millisecond) for highspeed + * and SuperSpeed devices. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to * request-specific driver context. @@ -1286,9 +1287,16 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * * Initializes a interrupt urb with the proper information needed to submit * it to a device. - * Note that high speed interrupt endpoints use a logarithmic encoding of - * the endpoint interval, and express polling intervals in microframes - * (eight per millisecond) rather than in frames (one per millisecond). + * + * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * encoding of the endpoint interval, and express polling intervals in + * microframes (eight per millisecond) rather than in frames (one per + * millisecond). + * + * Wireless USB also uses the logarithmic encoding, but specifies it in units of + * 128us instead of 125us. For Wireless USB devices, the interval is passed + * through to the host controller, rather than being translated into microframe + * units. */ static inline void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, @@ -1305,7 +1313,7 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; - if (dev->speed == USB_SPEED_HIGH) + if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) urb->interval = 1 << (interval - 1); else urb->interval = interval; -- cgit v1.2.3-71-gd317