cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

watchdog_dev.c (33271B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 *	watchdog_dev.c
      4 *
      5 *	(c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
      6 *						All Rights Reserved.
      7 *
      8 *	(c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
      9 *
     10 *	(c) Copyright 2021 Hewlett Packard Enterprise Development LP.
     11 *
     12 *	This source code is part of the generic code that can be used
     13 *	by all the watchdog timer drivers.
     14 *
     15 *	This part of the generic code takes care of the following
     16 *	misc device: /dev/watchdog.
     17 *
     18 *	Based on source code of the following authors:
     19 *	  Matt Domsch <Matt_Domsch@dell.com>,
     20 *	  Rob Radez <rob@osinvestor.com>,
     21 *	  Rusty Lynch <rusty@linux.co.intel.com>
     22 *	  Satyam Sharma <satyam@infradead.org>
     23 *	  Randy Dunlap <randy.dunlap@oracle.com>
     24 *
     25 *	Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
     26 *	admit liability nor provide warranty for any of this software.
     27 *	This material is provided "AS-IS" and at no charge.
     28 */
     29
     30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     31
     32#include <linux/cdev.h>		/* For character device */
     33#include <linux/errno.h>	/* For the -ENODEV/... values */
     34#include <linux/fs.h>		/* For file operations */
     35#include <linux/init.h>		/* For __init/__exit/... */
     36#include <linux/hrtimer.h>	/* For hrtimers */
     37#include <linux/kernel.h>	/* For printk/panic/... */
     38#include <linux/kthread.h>	/* For kthread_work */
     39#include <linux/miscdevice.h>	/* For handling misc devices */
     40#include <linux/module.h>	/* For module stuff/... */
     41#include <linux/mutex.h>	/* For mutexes */
     42#include <linux/slab.h>		/* For memory functions */
     43#include <linux/types.h>	/* For standard types (like size_t) */
     44#include <linux/watchdog.h>	/* For watchdog specific items */
     45#include <linux/uaccess.h>	/* For copy_to_user/put_user/... */
     46
     47#include "watchdog_core.h"
     48#include "watchdog_pretimeout.h"
     49
     50/* the dev_t structure to store the dynamically allocated watchdog devices */
     51static dev_t watchdog_devt;
     52/* Reference to watchdog device behind /dev/watchdog */
     53static struct watchdog_core_data *old_wd_data;
     54
     55static struct kthread_worker *watchdog_kworker;
     56
     57static bool handle_boot_enabled =
     58	IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
     59
     60static unsigned open_timeout = CONFIG_WATCHDOG_OPEN_TIMEOUT;
     61
     62static bool watchdog_past_open_deadline(struct watchdog_core_data *data)
     63{
     64	return ktime_after(ktime_get(), data->open_deadline);
     65}
     66
     67static void watchdog_set_open_deadline(struct watchdog_core_data *data)
     68{
     69	data->open_deadline = open_timeout ?
     70		ktime_get() + ktime_set(open_timeout, 0) : KTIME_MAX;
     71}
     72
     73static inline bool watchdog_need_worker(struct watchdog_device *wdd)
     74{
     75	/* All variables in milli-seconds */
     76	unsigned int hm = wdd->max_hw_heartbeat_ms;
     77	unsigned int t = wdd->timeout * 1000;
     78
     79	/*
     80	 * A worker to generate heartbeat requests is needed if all of the
     81	 * following conditions are true.
     82	 * - Userspace activated the watchdog.
     83	 * - The driver provided a value for the maximum hardware timeout, and
     84	 *   thus is aware that the framework supports generating heartbeat
     85	 *   requests.
     86	 * - Userspace requests a longer timeout than the hardware can handle.
     87	 *
     88	 * Alternatively, if userspace has not opened the watchdog
     89	 * device, we take care of feeding the watchdog if it is
     90	 * running.
     91	 */
     92	return (hm && watchdog_active(wdd) && t > hm) ||
     93		(t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
     94}
     95
     96static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
     97{
     98	struct watchdog_core_data *wd_data = wdd->wd_data;
     99	unsigned int timeout_ms = wdd->timeout * 1000;
    100	ktime_t keepalive_interval;
    101	ktime_t last_heartbeat, latest_heartbeat;
    102	ktime_t virt_timeout;
    103	unsigned int hw_heartbeat_ms;
    104
    105	if (watchdog_active(wdd))
    106		virt_timeout = ktime_add(wd_data->last_keepalive,
    107					 ms_to_ktime(timeout_ms));
    108	else
    109		virt_timeout = wd_data->open_deadline;
    110
    111	hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
    112	keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
    113
    114	/*
    115	 * To ensure that the watchdog times out wdd->timeout seconds
    116	 * after the most recent ping from userspace, the last
    117	 * worker ping has to come in hw_heartbeat_ms before this timeout.
    118	 */
    119	last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
    120	latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
    121	if (ktime_before(latest_heartbeat, keepalive_interval))
    122		return latest_heartbeat;
    123	return keepalive_interval;
    124}
    125
    126static inline void watchdog_update_worker(struct watchdog_device *wdd)
    127{
    128	struct watchdog_core_data *wd_data = wdd->wd_data;
    129
    130	if (watchdog_need_worker(wdd)) {
    131		ktime_t t = watchdog_next_keepalive(wdd);
    132
    133		if (t > 0)
    134			hrtimer_start(&wd_data->timer, t,
    135				      HRTIMER_MODE_REL_HARD);
    136	} else {
    137		hrtimer_cancel(&wd_data->timer);
    138	}
    139}
    140
    141static int __watchdog_ping(struct watchdog_device *wdd)
    142{
    143	struct watchdog_core_data *wd_data = wdd->wd_data;
    144	ktime_t earliest_keepalive, now;
    145	int err;
    146
    147	earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
    148				       ms_to_ktime(wdd->min_hw_heartbeat_ms));
    149	now = ktime_get();
    150
    151	if (ktime_after(earliest_keepalive, now)) {
    152		hrtimer_start(&wd_data->timer,
    153			      ktime_sub(earliest_keepalive, now),
    154			      HRTIMER_MODE_REL_HARD);
    155		return 0;
    156	}
    157
    158	wd_data->last_hw_keepalive = now;
    159
    160	if (wdd->ops->ping)
    161		err = wdd->ops->ping(wdd);  /* ping the watchdog */
    162	else
    163		err = wdd->ops->start(wdd); /* restart watchdog */
    164
    165	if (err == 0)
    166		watchdog_hrtimer_pretimeout_start(wdd);
    167
    168	watchdog_update_worker(wdd);
    169
    170	return err;
    171}
    172
    173/*
    174 * watchdog_ping - ping the watchdog
    175 * @wdd: The watchdog device to ping
    176 *
    177 * If the watchdog has no own ping operation then it needs to be
    178 * restarted via the start operation. This wrapper function does
    179 * exactly that.
    180 * We only ping when the watchdog device is running.
    181 * The caller must hold wd_data->lock.
    182 *
    183 * Return: 0 on success, error otherwise.
    184 */
    185static int watchdog_ping(struct watchdog_device *wdd)
    186{
    187	struct watchdog_core_data *wd_data = wdd->wd_data;
    188
    189	if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
    190		return 0;
    191
    192	set_bit(_WDOG_KEEPALIVE, &wd_data->status);
    193
    194	wd_data->last_keepalive = ktime_get();
    195	return __watchdog_ping(wdd);
    196}
    197
    198static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
    199{
    200	struct watchdog_device *wdd = wd_data->wdd;
    201
    202	if (!wdd)
    203		return false;
    204
    205	if (watchdog_active(wdd))
    206		return true;
    207
    208	return watchdog_hw_running(wdd) && !watchdog_past_open_deadline(wd_data);
    209}
    210
    211static void watchdog_ping_work(struct kthread_work *work)
    212{
    213	struct watchdog_core_data *wd_data;
    214
    215	wd_data = container_of(work, struct watchdog_core_data, work);
    216
    217	mutex_lock(&wd_data->lock);
    218	if (watchdog_worker_should_ping(wd_data))
    219		__watchdog_ping(wd_data->wdd);
    220	mutex_unlock(&wd_data->lock);
    221}
    222
    223static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
    224{
    225	struct watchdog_core_data *wd_data;
    226
    227	wd_data = container_of(timer, struct watchdog_core_data, timer);
    228
    229	kthread_queue_work(watchdog_kworker, &wd_data->work);
    230	return HRTIMER_NORESTART;
    231}
    232
    233/*
    234 * watchdog_start - wrapper to start the watchdog
    235 * @wdd: The watchdog device to start
    236 *
    237 * Start the watchdog if it is not active and mark it active.
    238 * The caller must hold wd_data->lock.
    239 *
    240 * Return: 0 on success or a negative errno code for failure.
    241 */
    242static int watchdog_start(struct watchdog_device *wdd)
    243{
    244	struct watchdog_core_data *wd_data = wdd->wd_data;
    245	ktime_t started_at;
    246	int err;
    247
    248	if (watchdog_active(wdd))
    249		return 0;
    250
    251	set_bit(_WDOG_KEEPALIVE, &wd_data->status);
    252
    253	started_at = ktime_get();
    254	if (watchdog_hw_running(wdd) && wdd->ops->ping) {
    255		err = __watchdog_ping(wdd);
    256		if (err == 0) {
    257			set_bit(WDOG_ACTIVE, &wdd->status);
    258			watchdog_hrtimer_pretimeout_start(wdd);
    259		}
    260	} else {
    261		err = wdd->ops->start(wdd);
    262		if (err == 0) {
    263			set_bit(WDOG_ACTIVE, &wdd->status);
    264			wd_data->last_keepalive = started_at;
    265			wd_data->last_hw_keepalive = started_at;
    266			watchdog_update_worker(wdd);
    267			watchdog_hrtimer_pretimeout_start(wdd);
    268		}
    269	}
    270
    271	return err;
    272}
    273
    274/*
    275 * watchdog_stop - wrapper to stop the watchdog
    276 * @wdd: The watchdog device to stop
    277 *
    278 * Stop the watchdog if it is still active and unmark it active.
    279 * If the 'nowayout' feature was set, the watchdog cannot be stopped.
    280 * The caller must hold wd_data->lock.
    281 *
    282 * Return: 0 on success or a negative errno code for failure.
    283 */
    284static int watchdog_stop(struct watchdog_device *wdd)
    285{
    286	int err = 0;
    287
    288	if (!watchdog_active(wdd))
    289		return 0;
    290
    291	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
    292		pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
    293			wdd->id);
    294		return -EBUSY;
    295	}
    296
    297	if (wdd->ops->stop) {
    298		clear_bit(WDOG_HW_RUNNING, &wdd->status);
    299		err = wdd->ops->stop(wdd);
    300	} else {
    301		set_bit(WDOG_HW_RUNNING, &wdd->status);
    302	}
    303
    304	if (err == 0) {
    305		clear_bit(WDOG_ACTIVE, &wdd->status);
    306		watchdog_update_worker(wdd);
    307		watchdog_hrtimer_pretimeout_stop(wdd);
    308	}
    309
    310	return err;
    311}
    312
    313/*
    314 * watchdog_get_status - wrapper to get the watchdog status
    315 * @wdd: The watchdog device to get the status from
    316 *
    317 * Get the watchdog's status flags.
    318 * The caller must hold wd_data->lock.
    319 *
    320 * Return: watchdog's status flags.
    321 */
    322static unsigned int watchdog_get_status(struct watchdog_device *wdd)
    323{
    324	struct watchdog_core_data *wd_data = wdd->wd_data;
    325	unsigned int status;
    326
    327	if (wdd->ops->status)
    328		status = wdd->ops->status(wdd);
    329	else
    330		status = wdd->bootstatus & (WDIOF_CARDRESET |
    331					    WDIOF_OVERHEAT |
    332					    WDIOF_FANFAULT |
    333					    WDIOF_EXTERN1 |
    334					    WDIOF_EXTERN2 |
    335					    WDIOF_POWERUNDER |
    336					    WDIOF_POWEROVER);
    337
    338	if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
    339		status |= WDIOF_MAGICCLOSE;
    340
    341	if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
    342		status |= WDIOF_KEEPALIVEPING;
    343
    344	if (IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT))
    345		status |= WDIOF_PRETIMEOUT;
    346
    347	return status;
    348}
    349
    350/*
    351 * watchdog_set_timeout - set the watchdog timer timeout
    352 * @wdd:	The watchdog device to set the timeout for
    353 * @timeout:	Timeout to set in seconds
    354 *
    355 * The caller must hold wd_data->lock.
    356 *
    357 * Return: 0 if successful, error otherwise.
    358 */
    359static int watchdog_set_timeout(struct watchdog_device *wdd,
    360							unsigned int timeout)
    361{
    362	int err = 0;
    363
    364	if (!(wdd->info->options & WDIOF_SETTIMEOUT))
    365		return -EOPNOTSUPP;
    366
    367	if (watchdog_timeout_invalid(wdd, timeout))
    368		return -EINVAL;
    369
    370	if (wdd->ops->set_timeout) {
    371		err = wdd->ops->set_timeout(wdd, timeout);
    372	} else {
    373		wdd->timeout = timeout;
    374		/* Disable pretimeout if it doesn't fit the new timeout */
    375		if (wdd->pretimeout >= wdd->timeout)
    376			wdd->pretimeout = 0;
    377	}
    378
    379	watchdog_update_worker(wdd);
    380
    381	return err;
    382}
    383
    384/*
    385 * watchdog_set_pretimeout - set the watchdog timer pretimeout
    386 * @wdd:	The watchdog device to set the timeout for
    387 * @timeout:	pretimeout to set in seconds
    388 *
    389 * Return: 0 if successful, error otherwise.
    390 */
    391static int watchdog_set_pretimeout(struct watchdog_device *wdd,
    392				   unsigned int timeout)
    393{
    394	int err = 0;
    395
    396	if (!watchdog_have_pretimeout(wdd))
    397		return -EOPNOTSUPP;
    398
    399	if (watchdog_pretimeout_invalid(wdd, timeout))
    400		return -EINVAL;
    401
    402	if (wdd->ops->set_pretimeout && (wdd->info->options & WDIOF_PRETIMEOUT))
    403		err = wdd->ops->set_pretimeout(wdd, timeout);
    404	else
    405		wdd->pretimeout = timeout;
    406
    407	return err;
    408}
    409
    410/*
    411 * watchdog_get_timeleft - wrapper to get the time left before a reboot
    412 * @wdd:	The watchdog device to get the remaining time from
    413 * @timeleft:	The time that's left
    414 *
    415 * Get the time before a watchdog will reboot (if not pinged).
    416 * The caller must hold wd_data->lock.
    417 *
    418 * Return: 0 if successful, error otherwise.
    419 */
    420static int watchdog_get_timeleft(struct watchdog_device *wdd,
    421							unsigned int *timeleft)
    422{
    423	*timeleft = 0;
    424
    425	if (!wdd->ops->get_timeleft)
    426		return -EOPNOTSUPP;
    427
    428	*timeleft = wdd->ops->get_timeleft(wdd);
    429
    430	return 0;
    431}
    432
    433#ifdef CONFIG_WATCHDOG_SYSFS
    434static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
    435				char *buf)
    436{
    437	struct watchdog_device *wdd = dev_get_drvdata(dev);
    438
    439	return sysfs_emit(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT,
    440						  &wdd->status));
    441}
    442
    443static ssize_t nowayout_store(struct device *dev, struct device_attribute *attr,
    444				const char *buf, size_t len)
    445{
    446	struct watchdog_device *wdd = dev_get_drvdata(dev);
    447	unsigned int value;
    448	int ret;
    449
    450	ret = kstrtouint(buf, 0, &value);
    451	if (ret)
    452		return ret;
    453	if (value > 1)
    454		return -EINVAL;
    455	/* nowayout cannot be disabled once set */
    456	if (test_bit(WDOG_NO_WAY_OUT, &wdd->status) && !value)
    457		return -EPERM;
    458	watchdog_set_nowayout(wdd, value);
    459	return len;
    460}
    461static DEVICE_ATTR_RW(nowayout);
    462
    463static ssize_t status_show(struct device *dev, struct device_attribute *attr,
    464				char *buf)
    465{
    466	struct watchdog_device *wdd = dev_get_drvdata(dev);
    467	struct watchdog_core_data *wd_data = wdd->wd_data;
    468	unsigned int status;
    469
    470	mutex_lock(&wd_data->lock);
    471	status = watchdog_get_status(wdd);
    472	mutex_unlock(&wd_data->lock);
    473
    474	return sysfs_emit(buf, "0x%x\n", status);
    475}
    476static DEVICE_ATTR_RO(status);
    477
    478static ssize_t bootstatus_show(struct device *dev,
    479				struct device_attribute *attr, char *buf)
    480{
    481	struct watchdog_device *wdd = dev_get_drvdata(dev);
    482
    483	return sysfs_emit(buf, "%u\n", wdd->bootstatus);
    484}
    485static DEVICE_ATTR_RO(bootstatus);
    486
    487static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
    488				char *buf)
    489{
    490	struct watchdog_device *wdd = dev_get_drvdata(dev);
    491	struct watchdog_core_data *wd_data = wdd->wd_data;
    492	ssize_t status;
    493	unsigned int val;
    494
    495	mutex_lock(&wd_data->lock);
    496	status = watchdog_get_timeleft(wdd, &val);
    497	mutex_unlock(&wd_data->lock);
    498	if (!status)
    499		status = sysfs_emit(buf, "%u\n", val);
    500
    501	return status;
    502}
    503static DEVICE_ATTR_RO(timeleft);
    504
    505static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
    506				char *buf)
    507{
    508	struct watchdog_device *wdd = dev_get_drvdata(dev);
    509
    510	return sysfs_emit(buf, "%u\n", wdd->timeout);
    511}
    512static DEVICE_ATTR_RO(timeout);
    513
    514static ssize_t min_timeout_show(struct device *dev,
    515				struct device_attribute *attr, char *buf)
    516{
    517	struct watchdog_device *wdd = dev_get_drvdata(dev);
    518
    519	return sysfs_emit(buf, "%u\n", wdd->min_timeout);
    520}
    521static DEVICE_ATTR_RO(min_timeout);
    522
    523static ssize_t max_timeout_show(struct device *dev,
    524				struct device_attribute *attr, char *buf)
    525{
    526	struct watchdog_device *wdd = dev_get_drvdata(dev);
    527
    528	return sysfs_emit(buf, "%u\n", wdd->max_timeout);
    529}
    530static DEVICE_ATTR_RO(max_timeout);
    531
    532static ssize_t pretimeout_show(struct device *dev,
    533			       struct device_attribute *attr, char *buf)
    534{
    535	struct watchdog_device *wdd = dev_get_drvdata(dev);
    536
    537	return sysfs_emit(buf, "%u\n", wdd->pretimeout);
    538}
    539static DEVICE_ATTR_RO(pretimeout);
    540
    541static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
    542				char *buf)
    543{
    544	struct watchdog_device *wdd = dev_get_drvdata(dev);
    545
    546	return sysfs_emit(buf, "%s\n", wdd->info->identity);
    547}
    548static DEVICE_ATTR_RO(identity);
    549
    550static ssize_t state_show(struct device *dev, struct device_attribute *attr,
    551				char *buf)
    552{
    553	struct watchdog_device *wdd = dev_get_drvdata(dev);
    554
    555	if (watchdog_active(wdd))
    556		return sysfs_emit(buf, "active\n");
    557
    558	return sysfs_emit(buf, "inactive\n");
    559}
    560static DEVICE_ATTR_RO(state);
    561
    562static ssize_t pretimeout_available_governors_show(struct device *dev,
    563				   struct device_attribute *attr, char *buf)
    564{
    565	return watchdog_pretimeout_available_governors_get(buf);
    566}
    567static DEVICE_ATTR_RO(pretimeout_available_governors);
    568
    569static ssize_t pretimeout_governor_show(struct device *dev,
    570					struct device_attribute *attr,
    571					char *buf)
    572{
    573	struct watchdog_device *wdd = dev_get_drvdata(dev);
    574
    575	return watchdog_pretimeout_governor_get(wdd, buf);
    576}
    577
    578static ssize_t pretimeout_governor_store(struct device *dev,
    579					 struct device_attribute *attr,
    580					 const char *buf, size_t count)
    581{
    582	struct watchdog_device *wdd = dev_get_drvdata(dev);
    583	int ret = watchdog_pretimeout_governor_set(wdd, buf);
    584
    585	if (!ret)
    586		ret = count;
    587
    588	return ret;
    589}
    590static DEVICE_ATTR_RW(pretimeout_governor);
    591
    592static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
    593				int n)
    594{
    595	struct device *dev = kobj_to_dev(kobj);
    596	struct watchdog_device *wdd = dev_get_drvdata(dev);
    597	umode_t mode = attr->mode;
    598
    599	if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
    600		mode = 0;
    601	else if (attr == &dev_attr_pretimeout.attr && !watchdog_have_pretimeout(wdd))
    602		mode = 0;
    603	else if ((attr == &dev_attr_pretimeout_governor.attr ||
    604		  attr == &dev_attr_pretimeout_available_governors.attr) &&
    605		 (!watchdog_have_pretimeout(wdd) || !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
    606		mode = 0;
    607
    608	return mode;
    609}
    610static struct attribute *wdt_attrs[] = {
    611	&dev_attr_state.attr,
    612	&dev_attr_identity.attr,
    613	&dev_attr_timeout.attr,
    614	&dev_attr_min_timeout.attr,
    615	&dev_attr_max_timeout.attr,
    616	&dev_attr_pretimeout.attr,
    617	&dev_attr_timeleft.attr,
    618	&dev_attr_bootstatus.attr,
    619	&dev_attr_status.attr,
    620	&dev_attr_nowayout.attr,
    621	&dev_attr_pretimeout_governor.attr,
    622	&dev_attr_pretimeout_available_governors.attr,
    623	NULL,
    624};
    625
    626static const struct attribute_group wdt_group = {
    627	.attrs = wdt_attrs,
    628	.is_visible = wdt_is_visible,
    629};
    630__ATTRIBUTE_GROUPS(wdt);
    631#else
    632#define wdt_groups	NULL
    633#endif
    634
    635/*
    636 * watchdog_ioctl_op - call the watchdog drivers ioctl op if defined
    637 * @wdd: The watchdog device to do the ioctl on
    638 * @cmd: Watchdog command
    639 * @arg: Argument pointer
    640 *
    641 * The caller must hold wd_data->lock.
    642 *
    643 * Return: 0 if successful, error otherwise.
    644 */
    645static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
    646							unsigned long arg)
    647{
    648	if (!wdd->ops->ioctl)
    649		return -ENOIOCTLCMD;
    650
    651	return wdd->ops->ioctl(wdd, cmd, arg);
    652}
    653
    654/*
    655 * watchdog_write - writes to the watchdog
    656 * @file:	File from VFS
    657 * @data:	User address of data
    658 * @len:	Length of data
    659 * @ppos:	Pointer to the file offset
    660 *
    661 * A write to a watchdog device is defined as a keepalive ping.
    662 * Writing the magic 'V' sequence allows the next close to turn
    663 * off the watchdog (if 'nowayout' is not set).
    664 *
    665 * Return: @len if successful, error otherwise.
    666 */
    667static ssize_t watchdog_write(struct file *file, const char __user *data,
    668						size_t len, loff_t *ppos)
    669{
    670	struct watchdog_core_data *wd_data = file->private_data;
    671	struct watchdog_device *wdd;
    672	int err;
    673	size_t i;
    674	char c;
    675
    676	if (len == 0)
    677		return 0;
    678
    679	/*
    680	 * Note: just in case someone wrote the magic character
    681	 * five months ago...
    682	 */
    683	clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
    684
    685	/* scan to see whether or not we got the magic character */
    686	for (i = 0; i != len; i++) {
    687		if (get_user(c, data + i))
    688			return -EFAULT;
    689		if (c == 'V')
    690			set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
    691	}
    692
    693	/* someone wrote to us, so we send the watchdog a keepalive ping */
    694
    695	err = -ENODEV;
    696	mutex_lock(&wd_data->lock);
    697	wdd = wd_data->wdd;
    698	if (wdd)
    699		err = watchdog_ping(wdd);
    700	mutex_unlock(&wd_data->lock);
    701
    702	if (err < 0)
    703		return err;
    704
    705	return len;
    706}
    707
    708/*
    709 * watchdog_ioctl - handle the different ioctl's for the watchdog device
    710 * @file:	File handle to the device
    711 * @cmd:	Watchdog command
    712 * @arg:	Argument pointer
    713 *
    714 * The watchdog API defines a common set of functions for all watchdogs
    715 * according to their available features.
    716 *
    717 * Return: 0 if successful, error otherwise.
    718 */
    719
    720static long watchdog_ioctl(struct file *file, unsigned int cmd,
    721							unsigned long arg)
    722{
    723	struct watchdog_core_data *wd_data = file->private_data;
    724	void __user *argp = (void __user *)arg;
    725	struct watchdog_device *wdd;
    726	int __user *p = argp;
    727	unsigned int val;
    728	int err;
    729
    730	mutex_lock(&wd_data->lock);
    731
    732	wdd = wd_data->wdd;
    733	if (!wdd) {
    734		err = -ENODEV;
    735		goto out_ioctl;
    736	}
    737
    738	err = watchdog_ioctl_op(wdd, cmd, arg);
    739	if (err != -ENOIOCTLCMD)
    740		goto out_ioctl;
    741
    742	switch (cmd) {
    743	case WDIOC_GETSUPPORT:
    744		err = copy_to_user(argp, wdd->info,
    745			sizeof(struct watchdog_info)) ? -EFAULT : 0;
    746		break;
    747	case WDIOC_GETSTATUS:
    748		val = watchdog_get_status(wdd);
    749		err = put_user(val, p);
    750		break;
    751	case WDIOC_GETBOOTSTATUS:
    752		err = put_user(wdd->bootstatus, p);
    753		break;
    754	case WDIOC_SETOPTIONS:
    755		if (get_user(val, p)) {
    756			err = -EFAULT;
    757			break;
    758		}
    759		if (val & WDIOS_DISABLECARD) {
    760			err = watchdog_stop(wdd);
    761			if (err < 0)
    762				break;
    763		}
    764		if (val & WDIOS_ENABLECARD)
    765			err = watchdog_start(wdd);
    766		break;
    767	case WDIOC_KEEPALIVE:
    768		if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
    769			err = -EOPNOTSUPP;
    770			break;
    771		}
    772		err = watchdog_ping(wdd);
    773		break;
    774	case WDIOC_SETTIMEOUT:
    775		if (get_user(val, p)) {
    776			err = -EFAULT;
    777			break;
    778		}
    779		err = watchdog_set_timeout(wdd, val);
    780		if (err < 0)
    781			break;
    782		/* If the watchdog is active then we send a keepalive ping
    783		 * to make sure that the watchdog keep's running (and if
    784		 * possible that it takes the new timeout) */
    785		err = watchdog_ping(wdd);
    786		if (err < 0)
    787			break;
    788		fallthrough;
    789	case WDIOC_GETTIMEOUT:
    790		/* timeout == 0 means that we don't know the timeout */
    791		if (wdd->timeout == 0) {
    792			err = -EOPNOTSUPP;
    793			break;
    794		}
    795		err = put_user(wdd->timeout, p);
    796		break;
    797	case WDIOC_GETTIMELEFT:
    798		err = watchdog_get_timeleft(wdd, &val);
    799		if (err < 0)
    800			break;
    801		err = put_user(val, p);
    802		break;
    803	case WDIOC_SETPRETIMEOUT:
    804		if (get_user(val, p)) {
    805			err = -EFAULT;
    806			break;
    807		}
    808		err = watchdog_set_pretimeout(wdd, val);
    809		break;
    810	case WDIOC_GETPRETIMEOUT:
    811		err = put_user(wdd->pretimeout, p);
    812		break;
    813	default:
    814		err = -ENOTTY;
    815		break;
    816	}
    817
    818out_ioctl:
    819	mutex_unlock(&wd_data->lock);
    820	return err;
    821}
    822
    823/*
    824 * watchdog_open - open the /dev/watchdog* devices
    825 * @inode:	Inode of device
    826 * @file:	File handle to device
    827 *
    828 * When the /dev/watchdog* device gets opened, we start the watchdog.
    829 * Watch out: the /dev/watchdog device is single open, so we make sure
    830 * it can only be opened once.
    831 *
    832 * Return: 0 if successful, error otherwise.
    833 */
    834static int watchdog_open(struct inode *inode, struct file *file)
    835{
    836	struct watchdog_core_data *wd_data;
    837	struct watchdog_device *wdd;
    838	bool hw_running;
    839	int err;
    840
    841	/* Get the corresponding watchdog device */
    842	if (imajor(inode) == MISC_MAJOR)
    843		wd_data = old_wd_data;
    844	else
    845		wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
    846				       cdev);
    847
    848	/* the watchdog is single open! */
    849	if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
    850		return -EBUSY;
    851
    852	wdd = wd_data->wdd;
    853
    854	/*
    855	 * If the /dev/watchdog device is open, we don't want the module
    856	 * to be unloaded.
    857	 */
    858	hw_running = watchdog_hw_running(wdd);
    859	if (!hw_running && !try_module_get(wdd->ops->owner)) {
    860		err = -EBUSY;
    861		goto out_clear;
    862	}
    863
    864	err = watchdog_start(wdd);
    865	if (err < 0)
    866		goto out_mod;
    867
    868	file->private_data = wd_data;
    869
    870	if (!hw_running)
    871		get_device(&wd_data->dev);
    872
    873	/*
    874	 * open_timeout only applies for the first open from
    875	 * userspace. Set open_deadline to infinity so that the kernel
    876	 * will take care of an always-running hardware watchdog in
    877	 * case the device gets magic-closed or WDIOS_DISABLECARD is
    878	 * applied.
    879	 */
    880	wd_data->open_deadline = KTIME_MAX;
    881
    882	/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
    883	return stream_open(inode, file);
    884
    885out_mod:
    886	module_put(wd_data->wdd->ops->owner);
    887out_clear:
    888	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
    889	return err;
    890}
    891
    892static void watchdog_core_data_release(struct device *dev)
    893{
    894	struct watchdog_core_data *wd_data;
    895
    896	wd_data = container_of(dev, struct watchdog_core_data, dev);
    897
    898	kfree(wd_data);
    899}
    900
    901/*
    902 * watchdog_release - release the watchdog device
    903 * @inode:	Inode of device
    904 * @file:	File handle to device
    905 *
    906 * This is the code for when /dev/watchdog gets closed. We will only
    907 * stop the watchdog when we have received the magic char (and nowayout
    908 * was not set), else the watchdog will keep running.
    909 *
    910 * Always returns 0.
    911 */
    912static int watchdog_release(struct inode *inode, struct file *file)
    913{
    914	struct watchdog_core_data *wd_data = file->private_data;
    915	struct watchdog_device *wdd;
    916	int err = -EBUSY;
    917	bool running;
    918
    919	mutex_lock(&wd_data->lock);
    920
    921	wdd = wd_data->wdd;
    922	if (!wdd)
    923		goto done;
    924
    925	/*
    926	 * We only stop the watchdog if we received the magic character
    927	 * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
    928	 * watchdog_stop will fail.
    929	 */
    930	if (!watchdog_active(wdd))
    931		err = 0;
    932	else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
    933		 !(wdd->info->options & WDIOF_MAGICCLOSE))
    934		err = watchdog_stop(wdd);
    935
    936	/* If the watchdog was not stopped, send a keepalive ping */
    937	if (err < 0) {
    938		pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
    939		watchdog_ping(wdd);
    940	}
    941
    942	watchdog_update_worker(wdd);
    943
    944	/* make sure that /dev/watchdog can be re-opened */
    945	clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
    946
    947done:
    948	running = wdd && watchdog_hw_running(wdd);
    949	mutex_unlock(&wd_data->lock);
    950	/*
    951	 * Allow the owner module to be unloaded again unless the watchdog
    952	 * is still running. If the watchdog is still running, it can not
    953	 * be stopped, and its driver must not be unloaded.
    954	 */
    955	if (!running) {
    956		module_put(wd_data->cdev.owner);
    957		put_device(&wd_data->dev);
    958	}
    959	return 0;
    960}
    961
    962static const struct file_operations watchdog_fops = {
    963	.owner		= THIS_MODULE,
    964	.write		= watchdog_write,
    965	.unlocked_ioctl	= watchdog_ioctl,
    966	.compat_ioctl	= compat_ptr_ioctl,
    967	.open		= watchdog_open,
    968	.release	= watchdog_release,
    969};
    970
    971static struct miscdevice watchdog_miscdev = {
    972	.minor		= WATCHDOG_MINOR,
    973	.name		= "watchdog",
    974	.fops		= &watchdog_fops,
    975};
    976
    977static struct class watchdog_class = {
    978	.name =		"watchdog",
    979	.owner =	THIS_MODULE,
    980	.dev_groups =	wdt_groups,
    981};
    982
    983/*
    984 * watchdog_cdev_register - register watchdog character device
    985 * @wdd: Watchdog device
    986 *
    987 * Register a watchdog character device including handling the legacy
    988 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
    989 * thus we set it up like that.
    990 *
    991 * Return: 0 if successful, error otherwise.
    992 */
    993static int watchdog_cdev_register(struct watchdog_device *wdd)
    994{
    995	struct watchdog_core_data *wd_data;
    996	int err;
    997
    998	wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
    999	if (!wd_data)
   1000		return -ENOMEM;
   1001	mutex_init(&wd_data->lock);
   1002
   1003	wd_data->wdd = wdd;
   1004	wdd->wd_data = wd_data;
   1005
   1006	if (IS_ERR_OR_NULL(watchdog_kworker)) {
   1007		kfree(wd_data);
   1008		return -ENODEV;
   1009	}
   1010
   1011	device_initialize(&wd_data->dev);
   1012	wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
   1013	wd_data->dev.class = &watchdog_class;
   1014	wd_data->dev.parent = wdd->parent;
   1015	wd_data->dev.groups = wdd->groups;
   1016	wd_data->dev.release = watchdog_core_data_release;
   1017	dev_set_drvdata(&wd_data->dev, wdd);
   1018	dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
   1019
   1020	kthread_init_work(&wd_data->work, watchdog_ping_work);
   1021	hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
   1022	wd_data->timer.function = watchdog_timer_expired;
   1023	watchdog_hrtimer_pretimeout_init(wdd);
   1024
   1025	if (wdd->id == 0) {
   1026		old_wd_data = wd_data;
   1027		watchdog_miscdev.parent = wdd->parent;
   1028		err = misc_register(&watchdog_miscdev);
   1029		if (err != 0) {
   1030			pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
   1031				wdd->info->identity, WATCHDOG_MINOR, err);
   1032			if (err == -EBUSY)
   1033				pr_err("%s: a legacy watchdog module is probably present.\n",
   1034					wdd->info->identity);
   1035			old_wd_data = NULL;
   1036			put_device(&wd_data->dev);
   1037			return err;
   1038		}
   1039	}
   1040
   1041	/* Fill in the data structures */
   1042	cdev_init(&wd_data->cdev, &watchdog_fops);
   1043
   1044	/* Add the device */
   1045	err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
   1046	if (err) {
   1047		pr_err("watchdog%d unable to add device %d:%d\n",
   1048			wdd->id,  MAJOR(watchdog_devt), wdd->id);
   1049		if (wdd->id == 0) {
   1050			misc_deregister(&watchdog_miscdev);
   1051			old_wd_data = NULL;
   1052			put_device(&wd_data->dev);
   1053		}
   1054		return err;
   1055	}
   1056
   1057	wd_data->cdev.owner = wdd->ops->owner;
   1058
   1059	/* Record time of most recent heartbeat as 'just before now'. */
   1060	wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
   1061	watchdog_set_open_deadline(wd_data);
   1062
   1063	/*
   1064	 * If the watchdog is running, prevent its driver from being unloaded,
   1065	 * and schedule an immediate ping.
   1066	 */
   1067	if (watchdog_hw_running(wdd)) {
   1068		__module_get(wdd->ops->owner);
   1069		get_device(&wd_data->dev);
   1070		if (handle_boot_enabled)
   1071			hrtimer_start(&wd_data->timer, 0,
   1072				      HRTIMER_MODE_REL_HARD);
   1073		else
   1074			pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
   1075				wdd->id);
   1076	}
   1077
   1078	return 0;
   1079}
   1080
   1081/*
   1082 * watchdog_cdev_unregister - unregister watchdog character device
   1083 * @wdd: Watchdog device
   1084 *
   1085 * Unregister watchdog character device and if needed the legacy
   1086 * /dev/watchdog device.
   1087 */
   1088static void watchdog_cdev_unregister(struct watchdog_device *wdd)
   1089{
   1090	struct watchdog_core_data *wd_data = wdd->wd_data;
   1091
   1092	cdev_device_del(&wd_data->cdev, &wd_data->dev);
   1093	if (wdd->id == 0) {
   1094		misc_deregister(&watchdog_miscdev);
   1095		old_wd_data = NULL;
   1096	}
   1097
   1098	if (watchdog_active(wdd) &&
   1099	    test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
   1100		watchdog_stop(wdd);
   1101	}
   1102
   1103	watchdog_hrtimer_pretimeout_stop(wdd);
   1104
   1105	mutex_lock(&wd_data->lock);
   1106	wd_data->wdd = NULL;
   1107	wdd->wd_data = NULL;
   1108	mutex_unlock(&wd_data->lock);
   1109
   1110	hrtimer_cancel(&wd_data->timer);
   1111	kthread_cancel_work_sync(&wd_data->work);
   1112
   1113	put_device(&wd_data->dev);
   1114}
   1115
   1116/**
   1117 * watchdog_dev_register - register a watchdog device
   1118 * @wdd: Watchdog device
   1119 *
   1120 * Register a watchdog device including handling the legacy
   1121 * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
   1122 * thus we set it up like that.
   1123 *
   1124 * Return: 0 if successful, error otherwise.
   1125 */
   1126int watchdog_dev_register(struct watchdog_device *wdd)
   1127{
   1128	int ret;
   1129
   1130	ret = watchdog_cdev_register(wdd);
   1131	if (ret)
   1132		return ret;
   1133
   1134	ret = watchdog_register_pretimeout(wdd);
   1135	if (ret)
   1136		watchdog_cdev_unregister(wdd);
   1137
   1138	return ret;
   1139}
   1140
   1141/**
   1142 * watchdog_dev_unregister - unregister a watchdog device
   1143 * @wdd: watchdog device
   1144 *
   1145 * Unregister watchdog device and if needed the legacy
   1146 * /dev/watchdog device.
   1147 */
   1148void watchdog_dev_unregister(struct watchdog_device *wdd)
   1149{
   1150	watchdog_unregister_pretimeout(wdd);
   1151	watchdog_cdev_unregister(wdd);
   1152}
   1153
   1154/**
   1155 * watchdog_set_last_hw_keepalive - set last HW keepalive time for watchdog
   1156 * @wdd:		Watchdog device
   1157 * @last_ping_ms:	Time since last HW heartbeat
   1158 *
   1159 * Adjusts the last known HW keepalive time for a watchdog timer.
   1160 * This is needed if the watchdog is already running when the probe
   1161 * function is called, and it can't be pinged immediately. This
   1162 * function must be called immediately after watchdog registration,
   1163 * and min_hw_heartbeat_ms must be set for this to be useful.
   1164 *
   1165 * Return: 0 if successful, error otherwise.
   1166 */
   1167int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
   1168				   unsigned int last_ping_ms)
   1169{
   1170	struct watchdog_core_data *wd_data;
   1171	ktime_t now;
   1172
   1173	if (!wdd)
   1174		return -EINVAL;
   1175
   1176	wd_data = wdd->wd_data;
   1177
   1178	now = ktime_get();
   1179
   1180	wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
   1181
   1182	if (watchdog_hw_running(wdd) && handle_boot_enabled)
   1183		return __watchdog_ping(wdd);
   1184
   1185	return 0;
   1186}
   1187EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
   1188
   1189/**
   1190 * watchdog_dev_init - init dev part of watchdog core
   1191 *
   1192 * Allocate a range of chardev nodes to use for watchdog devices.
   1193 *
   1194 * Return: 0 if successful, error otherwise.
   1195 */
   1196int __init watchdog_dev_init(void)
   1197{
   1198	int err;
   1199
   1200	watchdog_kworker = kthread_create_worker(0, "watchdogd");
   1201	if (IS_ERR(watchdog_kworker)) {
   1202		pr_err("Failed to create watchdog kworker\n");
   1203		return PTR_ERR(watchdog_kworker);
   1204	}
   1205	sched_set_fifo(watchdog_kworker->task);
   1206
   1207	err = class_register(&watchdog_class);
   1208	if (err < 0) {
   1209		pr_err("couldn't register class\n");
   1210		goto err_register;
   1211	}
   1212
   1213	err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
   1214	if (err < 0) {
   1215		pr_err("watchdog: unable to allocate char dev region\n");
   1216		goto err_alloc;
   1217	}
   1218
   1219	return 0;
   1220
   1221err_alloc:
   1222	class_unregister(&watchdog_class);
   1223err_register:
   1224	kthread_destroy_worker(watchdog_kworker);
   1225	return err;
   1226}
   1227
   1228/**
   1229 * watchdog_dev_exit - exit dev part of watchdog core
   1230 *
   1231 * Release the range of chardev nodes used for watchdog devices.
   1232 */
   1233void __exit watchdog_dev_exit(void)
   1234{
   1235	unregister_chrdev_region(watchdog_devt, MAX_DOGS);
   1236	class_unregister(&watchdog_class);
   1237	kthread_destroy_worker(watchdog_kworker);
   1238}
   1239
   1240int watchdog_dev_suspend(struct watchdog_device *wdd)
   1241{
   1242	struct watchdog_core_data *wd_data = wdd->wd_data;
   1243	int ret = 0;
   1244
   1245	if (!wdd->wd_data)
   1246		return -ENODEV;
   1247
   1248	/* ping for the last time before suspend */
   1249	mutex_lock(&wd_data->lock);
   1250	if (watchdog_worker_should_ping(wd_data))
   1251		ret = __watchdog_ping(wd_data->wdd);
   1252	mutex_unlock(&wd_data->lock);
   1253
   1254	if (ret)
   1255		return ret;
   1256
   1257	/*
   1258	 * make sure that watchdog worker will not kick in when the wdog is
   1259	 * suspended
   1260	 */
   1261	hrtimer_cancel(&wd_data->timer);
   1262	kthread_cancel_work_sync(&wd_data->work);
   1263
   1264	return 0;
   1265}
   1266
   1267int watchdog_dev_resume(struct watchdog_device *wdd)
   1268{
   1269	struct watchdog_core_data *wd_data = wdd->wd_data;
   1270	int ret = 0;
   1271
   1272	if (!wdd->wd_data)
   1273		return -ENODEV;
   1274
   1275	/*
   1276	 * __watchdog_ping will also retrigger hrtimer and therefore restore the
   1277	 * ping worker if needed.
   1278	 */
   1279	mutex_lock(&wd_data->lock);
   1280	if (watchdog_worker_should_ping(wd_data))
   1281		ret = __watchdog_ping(wd_data->wdd);
   1282	mutex_unlock(&wd_data->lock);
   1283
   1284	return ret;
   1285}
   1286
   1287module_param(handle_boot_enabled, bool, 0444);
   1288MODULE_PARM_DESC(handle_boot_enabled,
   1289	"Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
   1290	__MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");
   1291
   1292module_param(open_timeout, uint, 0644);
   1293MODULE_PARM_DESC(open_timeout,
   1294	"Maximum time (in seconds, 0 means infinity) for userspace to take over a running watchdog (default="
   1295	__MODULE_STRING(CONFIG_WATCHDOG_OPEN_TIMEOUT) ")");