cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

idle.c (4077B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Idle functions for s390.
      4 *
      5 * Copyright IBM Corp. 2014
      6 *
      7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
      8 */
      9
     10#include <linux/kernel.h>
     11#include <linux/kernel_stat.h>
     12#include <linux/notifier.h>
     13#include <linux/init.h>
     14#include <linux/cpu.h>
     15#include <linux/sched/cputime.h>
     16#include <trace/events/power.h>
     17#include <asm/cpu_mf.h>
     18#include <asm/nmi.h>
     19#include <asm/smp.h>
     20#include "entry.h"
     21
     22static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
     23
     24void account_idle_time_irq(void)
     25{
     26	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
     27	u64 cycles_new[8];
     28	int i;
     29
     30	clear_cpu_flag(CIF_ENABLED_WAIT);
     31	if (smp_cpu_mtid) {
     32		stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
     33		for (i = 0; i < smp_cpu_mtid; i++)
     34			this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
     35	}
     36
     37	idle->clock_idle_exit = S390_lowcore.int_clock;
     38	idle->timer_idle_exit = S390_lowcore.sys_enter_timer;
     39
     40	S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
     41	S390_lowcore.last_update_clock = idle->clock_idle_exit;
     42
     43	S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
     44	S390_lowcore.last_update_timer = idle->timer_idle_exit;
     45}
     46
     47void arch_cpu_idle(void)
     48{
     49	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
     50	unsigned long idle_time;
     51	unsigned long psw_mask;
     52
     53	/* Wait for external, I/O or machine check interrupt. */
     54	psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
     55		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
     56	clear_cpu_flag(CIF_NOHZ_DELAY);
     57
     58	/* psw_idle() returns with interrupts disabled. */
     59	psw_idle(idle, psw_mask);
     60
     61	/* Account time spent with enabled wait psw loaded as idle time. */
     62	raw_write_seqcount_begin(&idle->seqcount);
     63	idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
     64	idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
     65	idle->idle_time += idle_time;
     66	idle->idle_count++;
     67	account_idle_time(cputime_to_nsecs(idle_time));
     68	raw_write_seqcount_end(&idle->seqcount);
     69	raw_local_irq_enable();
     70}
     71
     72static ssize_t show_idle_count(struct device *dev,
     73				struct device_attribute *attr, char *buf)
     74{
     75	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
     76	unsigned long idle_count;
     77	unsigned int seq;
     78
     79	do {
     80		seq = read_seqcount_begin(&idle->seqcount);
     81		idle_count = READ_ONCE(idle->idle_count);
     82		if (READ_ONCE(idle->clock_idle_enter))
     83			idle_count++;
     84	} while (read_seqcount_retry(&idle->seqcount, seq));
     85	return sprintf(buf, "%lu\n", idle_count);
     86}
     87DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
     88
     89static ssize_t show_idle_time(struct device *dev,
     90				struct device_attribute *attr, char *buf)
     91{
     92	unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
     93	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
     94	unsigned int seq;
     95
     96	do {
     97		seq = read_seqcount_begin(&idle->seqcount);
     98		idle_time = READ_ONCE(idle->idle_time);
     99		idle_enter = READ_ONCE(idle->clock_idle_enter);
    100		idle_exit = READ_ONCE(idle->clock_idle_exit);
    101	} while (read_seqcount_retry(&idle->seqcount, seq));
    102	in_idle = 0;
    103	now = get_tod_clock();
    104	if (idle_enter) {
    105		if (idle_exit) {
    106			in_idle = idle_exit - idle_enter;
    107		} else if (now > idle_enter) {
    108			in_idle = now - idle_enter;
    109		}
    110	}
    111	idle_time += in_idle;
    112	return sprintf(buf, "%lu\n", idle_time >> 12);
    113}
    114DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
    115
    116u64 arch_cpu_idle_time(int cpu)
    117{
    118	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
    119	unsigned long now, idle_enter, idle_exit, in_idle;
    120	unsigned int seq;
    121
    122	do {
    123		seq = read_seqcount_begin(&idle->seqcount);
    124		idle_enter = READ_ONCE(idle->clock_idle_enter);
    125		idle_exit = READ_ONCE(idle->clock_idle_exit);
    126	} while (read_seqcount_retry(&idle->seqcount, seq));
    127	in_idle = 0;
    128	now = get_tod_clock();
    129	if (idle_enter) {
    130		if (idle_exit) {
    131			in_idle = idle_exit - idle_enter;
    132		} else if (now > idle_enter) {
    133			in_idle = now - idle_enter;
    134		}
    135	}
    136	return cputime_to_nsecs(in_idle);
    137}
    138
    139void arch_cpu_idle_enter(void)
    140{
    141}
    142
    143void arch_cpu_idle_exit(void)
    144{
    145}
    146
    147void arch_cpu_idle_dead(void)
    148{
    149	cpu_die();
    150}