cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (40016B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * main.c - Multi purpose firmware loading support
      4 *
      5 * Copyright (c) 2003 Manuel Estrada Sainz
      6 *
      7 * Please see Documentation/driver-api/firmware/ for more information.
      8 *
      9 */
     10
     11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     12
     13#include <linux/capability.h>
     14#include <linux/device.h>
     15#include <linux/kernel_read_file.h>
     16#include <linux/module.h>
     17#include <linux/init.h>
     18#include <linux/initrd.h>
     19#include <linux/timer.h>
     20#include <linux/vmalloc.h>
     21#include <linux/interrupt.h>
     22#include <linux/bitops.h>
     23#include <linux/mutex.h>
     24#include <linux/workqueue.h>
     25#include <linux/highmem.h>
     26#include <linux/firmware.h>
     27#include <linux/slab.h>
     28#include <linux/sched.h>
     29#include <linux/file.h>
     30#include <linux/list.h>
     31#include <linux/fs.h>
     32#include <linux/async.h>
     33#include <linux/pm.h>
     34#include <linux/suspend.h>
     35#include <linux/syscore_ops.h>
     36#include <linux/reboot.h>
     37#include <linux/security.h>
     38#include <linux/zstd.h>
     39#include <linux/xz.h>
     40
     41#include <generated/utsrelease.h>
     42
     43#include "../base.h"
     44#include "firmware.h"
     45#include "fallback.h"
     46
     47MODULE_AUTHOR("Manuel Estrada Sainz");
     48MODULE_DESCRIPTION("Multi purpose firmware loading support");
     49MODULE_LICENSE("GPL");
     50
     51struct firmware_cache {
     52	/* firmware_buf instance will be added into the below list */
     53	spinlock_t lock;
     54	struct list_head head;
     55	int state;
     56
     57#ifdef CONFIG_FW_CACHE
     58	/*
     59	 * Names of firmware images which have been cached successfully
     60	 * will be added into the below list so that device uncache
     61	 * helper can trace which firmware images have been cached
     62	 * before.
     63	 */
     64	spinlock_t name_lock;
     65	struct list_head fw_names;
     66
     67	struct delayed_work work;
     68
     69	struct notifier_block   pm_notify;
     70#endif
     71};
     72
     73struct fw_cache_entry {
     74	struct list_head list;
     75	const char *name;
     76};
     77
     78struct fw_name_devm {
     79	unsigned long magic;
     80	const char *name;
     81};
     82
     83static inline struct fw_priv *to_fw_priv(struct kref *ref)
     84{
     85	return container_of(ref, struct fw_priv, ref);
     86}
     87
     88#define	FW_LOADER_NO_CACHE	0
     89#define	FW_LOADER_START_CACHE	1
     90
     91/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
     92 * guarding for corner cases a global lock should be OK */
     93DEFINE_MUTEX(fw_lock);
     94
     95struct firmware_cache fw_cache;
     96
     97void fw_state_init(struct fw_priv *fw_priv)
     98{
     99	struct fw_state *fw_st = &fw_priv->fw_st;
    100
    101	init_completion(&fw_st->completion);
    102	fw_st->status = FW_STATUS_UNKNOWN;
    103}
    104
    105static inline int fw_state_wait(struct fw_priv *fw_priv)
    106{
    107	return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
    108}
    109
    110static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
    111
    112static struct fw_priv *__allocate_fw_priv(const char *fw_name,
    113					  struct firmware_cache *fwc,
    114					  void *dbuf,
    115					  size_t size,
    116					  size_t offset,
    117					  u32 opt_flags)
    118{
    119	struct fw_priv *fw_priv;
    120
    121	/* For a partial read, the buffer must be preallocated. */
    122	if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
    123		return NULL;
    124
    125	/* Only partial reads are allowed to use an offset. */
    126	if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
    127		return NULL;
    128
    129	fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
    130	if (!fw_priv)
    131		return NULL;
    132
    133	fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
    134	if (!fw_priv->fw_name) {
    135		kfree(fw_priv);
    136		return NULL;
    137	}
    138
    139	kref_init(&fw_priv->ref);
    140	fw_priv->fwc = fwc;
    141	fw_priv->data = dbuf;
    142	fw_priv->allocated_size = size;
    143	fw_priv->offset = offset;
    144	fw_priv->opt_flags = opt_flags;
    145	fw_state_init(fw_priv);
    146#ifdef CONFIG_FW_LOADER_USER_HELPER
    147	INIT_LIST_HEAD(&fw_priv->pending_list);
    148#endif
    149
    150	pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
    151
    152	return fw_priv;
    153}
    154
    155static struct fw_priv *__lookup_fw_priv(const char *fw_name)
    156{
    157	struct fw_priv *tmp;
    158	struct firmware_cache *fwc = &fw_cache;
    159
    160	list_for_each_entry(tmp, &fwc->head, list)
    161		if (!strcmp(tmp->fw_name, fw_name))
    162			return tmp;
    163	return NULL;
    164}
    165
    166/* Returns 1 for batching firmware requests with the same name */
    167int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
    168			 struct fw_priv **fw_priv, void *dbuf, size_t size,
    169			 size_t offset, u32 opt_flags)
    170{
    171	struct fw_priv *tmp;
    172
    173	spin_lock(&fwc->lock);
    174	/*
    175	 * Do not merge requests that are marked to be non-cached or
    176	 * are performing partial reads.
    177	 */
    178	if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
    179		tmp = __lookup_fw_priv(fw_name);
    180		if (tmp) {
    181			kref_get(&tmp->ref);
    182			spin_unlock(&fwc->lock);
    183			*fw_priv = tmp;
    184			pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
    185			return 1;
    186		}
    187	}
    188
    189	tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
    190	if (tmp) {
    191		INIT_LIST_HEAD(&tmp->list);
    192		if (!(opt_flags & FW_OPT_NOCACHE))
    193			list_add(&tmp->list, &fwc->head);
    194	}
    195	spin_unlock(&fwc->lock);
    196
    197	*fw_priv = tmp;
    198
    199	return tmp ? 0 : -ENOMEM;
    200}
    201
    202static void __free_fw_priv(struct kref *ref)
    203	__releases(&fwc->lock)
    204{
    205	struct fw_priv *fw_priv = to_fw_priv(ref);
    206	struct firmware_cache *fwc = fw_priv->fwc;
    207
    208	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
    209		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
    210		 (unsigned int)fw_priv->size);
    211
    212	list_del(&fw_priv->list);
    213	spin_unlock(&fwc->lock);
    214
    215	if (fw_is_paged_buf(fw_priv))
    216		fw_free_paged_buf(fw_priv);
    217	else if (!fw_priv->allocated_size)
    218		vfree(fw_priv->data);
    219
    220	kfree_const(fw_priv->fw_name);
    221	kfree(fw_priv);
    222}
    223
    224void free_fw_priv(struct fw_priv *fw_priv)
    225{
    226	struct firmware_cache *fwc = fw_priv->fwc;
    227	spin_lock(&fwc->lock);
    228	if (!kref_put(&fw_priv->ref, __free_fw_priv))
    229		spin_unlock(&fwc->lock);
    230}
    231
    232#ifdef CONFIG_FW_LOADER_PAGED_BUF
    233bool fw_is_paged_buf(struct fw_priv *fw_priv)
    234{
    235	return fw_priv->is_paged_buf;
    236}
    237
    238void fw_free_paged_buf(struct fw_priv *fw_priv)
    239{
    240	int i;
    241
    242	if (!fw_priv->pages)
    243		return;
    244
    245	vunmap(fw_priv->data);
    246
    247	for (i = 0; i < fw_priv->nr_pages; i++)
    248		__free_page(fw_priv->pages[i]);
    249	kvfree(fw_priv->pages);
    250	fw_priv->pages = NULL;
    251	fw_priv->page_array_size = 0;
    252	fw_priv->nr_pages = 0;
    253	fw_priv->data = NULL;
    254	fw_priv->size = 0;
    255}
    256
    257int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
    258{
    259	/* If the array of pages is too small, grow it */
    260	if (fw_priv->page_array_size < pages_needed) {
    261		int new_array_size = max(pages_needed,
    262					 fw_priv->page_array_size * 2);
    263		struct page **new_pages;
    264
    265		new_pages = kvmalloc_array(new_array_size, sizeof(void *),
    266					   GFP_KERNEL);
    267		if (!new_pages)
    268			return -ENOMEM;
    269		memcpy(new_pages, fw_priv->pages,
    270		       fw_priv->page_array_size * sizeof(void *));
    271		memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
    272		       (new_array_size - fw_priv->page_array_size));
    273		kvfree(fw_priv->pages);
    274		fw_priv->pages = new_pages;
    275		fw_priv->page_array_size = new_array_size;
    276	}
    277
    278	while (fw_priv->nr_pages < pages_needed) {
    279		fw_priv->pages[fw_priv->nr_pages] =
    280			alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
    281
    282		if (!fw_priv->pages[fw_priv->nr_pages])
    283			return -ENOMEM;
    284		fw_priv->nr_pages++;
    285	}
    286
    287	return 0;
    288}
    289
    290int fw_map_paged_buf(struct fw_priv *fw_priv)
    291{
    292	/* one pages buffer should be mapped/unmapped only once */
    293	if (!fw_priv->pages)
    294		return 0;
    295
    296	vunmap(fw_priv->data);
    297	fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
    298			     PAGE_KERNEL_RO);
    299	if (!fw_priv->data)
    300		return -ENOMEM;
    301
    302	return 0;
    303}
    304#endif
    305
    306/*
    307 * ZSTD-compressed firmware support
    308 */
    309#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
    310static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
    311			      size_t in_size, const void *in_buffer)
    312{
    313	size_t len, out_size, workspace_size;
    314	void *workspace, *out_buf;
    315	zstd_dctx *ctx;
    316	int err;
    317
    318	if (fw_priv->allocated_size) {
    319		out_size = fw_priv->allocated_size;
    320		out_buf = fw_priv->data;
    321	} else {
    322		zstd_frame_header params;
    323
    324		if (zstd_get_frame_header(&params, in_buffer, in_size) ||
    325		    params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
    326			dev_dbg(dev, "%s: invalid zstd header\n", __func__);
    327			return -EINVAL;
    328		}
    329		out_size = params.frameContentSize;
    330		out_buf = vzalloc(out_size);
    331		if (!out_buf)
    332			return -ENOMEM;
    333	}
    334
    335	workspace_size = zstd_dctx_workspace_bound();
    336	workspace = kvzalloc(workspace_size, GFP_KERNEL);
    337	if (!workspace) {
    338		err = -ENOMEM;
    339		goto error;
    340	}
    341
    342	ctx = zstd_init_dctx(workspace, workspace_size);
    343	if (!ctx) {
    344		dev_dbg(dev, "%s: failed to initialize context\n", __func__);
    345		err = -EINVAL;
    346		goto error;
    347	}
    348
    349	len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
    350	if (zstd_is_error(len)) {
    351		dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
    352			zstd_get_error_code(len));
    353		err = -EINVAL;
    354		goto error;
    355	}
    356
    357	if (!fw_priv->allocated_size)
    358		fw_priv->data = out_buf;
    359	fw_priv->size = len;
    360	err = 0;
    361
    362 error:
    363	kvfree(workspace);
    364	if (err && !fw_priv->allocated_size)
    365		vfree(out_buf);
    366	return err;
    367}
    368#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
    369
    370/*
    371 * XZ-compressed firmware support
    372 */
    373#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
    374/* show an error and return the standard error code */
    375static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
    376{
    377	if (xz_ret != XZ_STREAM_END) {
    378		dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
    379		return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
    380	}
    381	return 0;
    382}
    383
    384/* single-shot decompression onto the pre-allocated buffer */
    385static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
    386				   size_t in_size, const void *in_buffer)
    387{
    388	struct xz_dec *xz_dec;
    389	struct xz_buf xz_buf;
    390	enum xz_ret xz_ret;
    391
    392	xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
    393	if (!xz_dec)
    394		return -ENOMEM;
    395
    396	xz_buf.in_size = in_size;
    397	xz_buf.in = in_buffer;
    398	xz_buf.in_pos = 0;
    399	xz_buf.out_size = fw_priv->allocated_size;
    400	xz_buf.out = fw_priv->data;
    401	xz_buf.out_pos = 0;
    402
    403	xz_ret = xz_dec_run(xz_dec, &xz_buf);
    404	xz_dec_end(xz_dec);
    405
    406	fw_priv->size = xz_buf.out_pos;
    407	return fw_decompress_xz_error(dev, xz_ret);
    408}
    409
    410/* decompression on paged buffer and map it */
    411static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
    412				  size_t in_size, const void *in_buffer)
    413{
    414	struct xz_dec *xz_dec;
    415	struct xz_buf xz_buf;
    416	enum xz_ret xz_ret;
    417	struct page *page;
    418	int err = 0;
    419
    420	xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
    421	if (!xz_dec)
    422		return -ENOMEM;
    423
    424	xz_buf.in_size = in_size;
    425	xz_buf.in = in_buffer;
    426	xz_buf.in_pos = 0;
    427
    428	fw_priv->is_paged_buf = true;
    429	fw_priv->size = 0;
    430	do {
    431		if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
    432			err = -ENOMEM;
    433			goto out;
    434		}
    435
    436		/* decompress onto the new allocated page */
    437		page = fw_priv->pages[fw_priv->nr_pages - 1];
    438		xz_buf.out = kmap(page);
    439		xz_buf.out_pos = 0;
    440		xz_buf.out_size = PAGE_SIZE;
    441		xz_ret = xz_dec_run(xz_dec, &xz_buf);
    442		kunmap(page);
    443		fw_priv->size += xz_buf.out_pos;
    444		/* partial decompression means either end or error */
    445		if (xz_buf.out_pos != PAGE_SIZE)
    446			break;
    447	} while (xz_ret == XZ_OK);
    448
    449	err = fw_decompress_xz_error(dev, xz_ret);
    450	if (!err)
    451		err = fw_map_paged_buf(fw_priv);
    452
    453 out:
    454	xz_dec_end(xz_dec);
    455	return err;
    456}
    457
    458static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
    459			    size_t in_size, const void *in_buffer)
    460{
    461	/* if the buffer is pre-allocated, we can perform in single-shot mode */
    462	if (fw_priv->data)
    463		return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
    464	else
    465		return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
    466}
    467#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
    468
    469/* direct firmware loading support */
    470static char fw_path_para[256];
    471static const char * const fw_path[] = {
    472	fw_path_para,
    473	"/lib/firmware/updates/" UTS_RELEASE,
    474	"/lib/firmware/updates",
    475	"/lib/firmware/" UTS_RELEASE,
    476	"/lib/firmware"
    477};
    478
    479/*
    480 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
    481 * from kernel command line because firmware_class is generally built in
    482 * kernel instead of module.
    483 */
    484module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
    485MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
    486
    487static int
    488fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
    489			   const char *suffix,
    490			   int (*decompress)(struct device *dev,
    491					     struct fw_priv *fw_priv,
    492					     size_t in_size,
    493					     const void *in_buffer))
    494{
    495	size_t size;
    496	int i, len;
    497	int rc = -ENOENT;
    498	char *path;
    499	size_t msize = INT_MAX;
    500	void *buffer = NULL;
    501
    502	/* Already populated data member means we're loading into a buffer */
    503	if (!decompress && fw_priv->data) {
    504		buffer = fw_priv->data;
    505		msize = fw_priv->allocated_size;
    506	}
    507
    508	path = __getname();
    509	if (!path)
    510		return -ENOMEM;
    511
    512	wait_for_initramfs();
    513	for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
    514		size_t file_size = 0;
    515		size_t *file_size_ptr = NULL;
    516
    517		/* skip the unset customized path */
    518		if (!fw_path[i][0])
    519			continue;
    520
    521		len = snprintf(path, PATH_MAX, "%s/%s%s",
    522			       fw_path[i], fw_priv->fw_name, suffix);
    523		if (len >= PATH_MAX) {
    524			rc = -ENAMETOOLONG;
    525			break;
    526		}
    527
    528		fw_priv->size = 0;
    529
    530		/*
    531		 * The total file size is only examined when doing a partial
    532		 * read; the "full read" case needs to fail if the whole
    533		 * firmware was not completely loaded.
    534		 */
    535		if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
    536			file_size_ptr = &file_size;
    537
    538		/* load firmware files from the mount namespace of init */
    539		rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
    540						       &buffer, msize,
    541						       file_size_ptr,
    542						       READING_FIRMWARE);
    543		if (rc < 0) {
    544			if (rc != -ENOENT)
    545				dev_warn(device, "loading %s failed with error %d\n",
    546					 path, rc);
    547			else
    548				dev_dbg(device, "loading %s failed for no such file or directory.\n",
    549					 path);
    550			continue;
    551		}
    552		size = rc;
    553		rc = 0;
    554
    555		dev_dbg(device, "Loading firmware from %s\n", path);
    556		if (decompress) {
    557			dev_dbg(device, "f/w decompressing %s\n",
    558				fw_priv->fw_name);
    559			rc = decompress(device, fw_priv, size, buffer);
    560			/* discard the superfluous original content */
    561			vfree(buffer);
    562			buffer = NULL;
    563			if (rc) {
    564				fw_free_paged_buf(fw_priv);
    565				continue;
    566			}
    567		} else {
    568			dev_dbg(device, "direct-loading %s\n",
    569				fw_priv->fw_name);
    570			if (!fw_priv->data)
    571				fw_priv->data = buffer;
    572			fw_priv->size = size;
    573		}
    574		fw_state_done(fw_priv);
    575		break;
    576	}
    577	__putname(path);
    578
    579	return rc;
    580}
    581
    582/* firmware holds the ownership of pages */
    583static void firmware_free_data(const struct firmware *fw)
    584{
    585	/* Loaded directly? */
    586	if (!fw->priv) {
    587		vfree(fw->data);
    588		return;
    589	}
    590	free_fw_priv(fw->priv);
    591}
    592
    593/* store the pages buffer info firmware from buf */
    594static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
    595{
    596	fw->priv = fw_priv;
    597	fw->size = fw_priv->size;
    598	fw->data = fw_priv->data;
    599
    600	pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
    601		 __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
    602		 (unsigned int)fw_priv->size);
    603}
    604
    605#ifdef CONFIG_FW_CACHE
    606static void fw_name_devm_release(struct device *dev, void *res)
    607{
    608	struct fw_name_devm *fwn = res;
    609
    610	if (fwn->magic == (unsigned long)&fw_cache)
    611		pr_debug("%s: fw_name-%s devm-%p released\n",
    612				__func__, fwn->name, res);
    613	kfree_const(fwn->name);
    614}
    615
    616static int fw_devm_match(struct device *dev, void *res,
    617		void *match_data)
    618{
    619	struct fw_name_devm *fwn = res;
    620
    621	return (fwn->magic == (unsigned long)&fw_cache) &&
    622		!strcmp(fwn->name, match_data);
    623}
    624
    625static struct fw_name_devm *fw_find_devm_name(struct device *dev,
    626		const char *name)
    627{
    628	struct fw_name_devm *fwn;
    629
    630	fwn = devres_find(dev, fw_name_devm_release,
    631			  fw_devm_match, (void *)name);
    632	return fwn;
    633}
    634
    635static bool fw_cache_is_setup(struct device *dev, const char *name)
    636{
    637	struct fw_name_devm *fwn;
    638
    639	fwn = fw_find_devm_name(dev, name);
    640	if (fwn)
    641		return true;
    642
    643	return false;
    644}
    645
    646/* add firmware name into devres list */
    647static int fw_add_devm_name(struct device *dev, const char *name)
    648{
    649	struct fw_name_devm *fwn;
    650
    651	if (fw_cache_is_setup(dev, name))
    652		return 0;
    653
    654	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
    655			   GFP_KERNEL);
    656	if (!fwn)
    657		return -ENOMEM;
    658	fwn->name = kstrdup_const(name, GFP_KERNEL);
    659	if (!fwn->name) {
    660		devres_free(fwn);
    661		return -ENOMEM;
    662	}
    663
    664	fwn->magic = (unsigned long)&fw_cache;
    665	devres_add(dev, fwn);
    666
    667	return 0;
    668}
    669#else
    670static bool fw_cache_is_setup(struct device *dev, const char *name)
    671{
    672	return false;
    673}
    674
    675static int fw_add_devm_name(struct device *dev, const char *name)
    676{
    677	return 0;
    678}
    679#endif
    680
    681int assign_fw(struct firmware *fw, struct device *device)
    682{
    683	struct fw_priv *fw_priv = fw->priv;
    684	int ret;
    685
    686	mutex_lock(&fw_lock);
    687	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
    688		mutex_unlock(&fw_lock);
    689		return -ENOENT;
    690	}
    691
    692	/*
    693	 * add firmware name into devres list so that we can auto cache
    694	 * and uncache firmware for device.
    695	 *
    696	 * device may has been deleted already, but the problem
    697	 * should be fixed in devres or driver core.
    698	 */
    699	/* don't cache firmware handled without uevent */
    700	if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
    701	    !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
    702		ret = fw_add_devm_name(device, fw_priv->fw_name);
    703		if (ret) {
    704			mutex_unlock(&fw_lock);
    705			return ret;
    706		}
    707	}
    708
    709	/*
    710	 * After caching firmware image is started, let it piggyback
    711	 * on request firmware.
    712	 */
    713	if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
    714	    fw_priv->fwc->state == FW_LOADER_START_CACHE)
    715		fw_cache_piggyback_on_request(fw_priv);
    716
    717	/* pass the pages buffer to driver at the last minute */
    718	fw_set_page_data(fw_priv, fw);
    719	mutex_unlock(&fw_lock);
    720	return 0;
    721}
    722
    723/* prepare firmware and firmware_buf structs;
    724 * return 0 if a firmware is already assigned, 1 if need to load one,
    725 * or a negative error code
    726 */
    727static int
    728_request_firmware_prepare(struct firmware **firmware_p, const char *name,
    729			  struct device *device, void *dbuf, size_t size,
    730			  size_t offset, u32 opt_flags)
    731{
    732	struct firmware *firmware;
    733	struct fw_priv *fw_priv;
    734	int ret;
    735
    736	*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
    737	if (!firmware) {
    738		dev_err(device, "%s: kmalloc(struct firmware) failed\n",
    739			__func__);
    740		return -ENOMEM;
    741	}
    742
    743	if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
    744		dev_dbg(device, "using built-in %s\n", name);
    745		return 0; /* assigned */
    746	}
    747
    748	ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
    749				   offset, opt_flags);
    750
    751	/*
    752	 * bind with 'priv' now to avoid warning in failure path
    753	 * of requesting firmware.
    754	 */
    755	firmware->priv = fw_priv;
    756
    757	if (ret > 0) {
    758		ret = fw_state_wait(fw_priv);
    759		if (!ret) {
    760			fw_set_page_data(fw_priv, firmware);
    761			return 0; /* assigned */
    762		}
    763	}
    764
    765	if (ret < 0)
    766		return ret;
    767	return 1; /* need to load */
    768}
    769
    770/*
    771 * Batched requests need only one wake, we need to do this step last due to the
    772 * fallback mechanism. The buf is protected with kref_get(), and it won't be
    773 * released until the last user calls release_firmware().
    774 *
    775 * Failed batched requests are possible as well, in such cases we just share
    776 * the struct fw_priv and won't release it until all requests are woken
    777 * and have gone through this same path.
    778 */
    779static void fw_abort_batch_reqs(struct firmware *fw)
    780{
    781	struct fw_priv *fw_priv;
    782
    783	/* Loaded directly? */
    784	if (!fw || !fw->priv)
    785		return;
    786
    787	fw_priv = fw->priv;
    788	mutex_lock(&fw_lock);
    789	if (!fw_state_is_aborted(fw_priv))
    790		fw_state_aborted(fw_priv);
    791	mutex_unlock(&fw_lock);
    792}
    793
    794/* called from request_firmware() and request_firmware_work_func() */
    795static int
    796_request_firmware(const struct firmware **firmware_p, const char *name,
    797		  struct device *device, void *buf, size_t size,
    798		  size_t offset, u32 opt_flags)
    799{
    800	struct firmware *fw = NULL;
    801	struct cred *kern_cred = NULL;
    802	const struct cred *old_cred;
    803	bool nondirect = false;
    804	int ret;
    805
    806	if (!firmware_p)
    807		return -EINVAL;
    808
    809	if (!name || name[0] == '\0') {
    810		ret = -EINVAL;
    811		goto out;
    812	}
    813
    814	ret = _request_firmware_prepare(&fw, name, device, buf, size,
    815					offset, opt_flags);
    816	if (ret <= 0) /* error or already assigned */
    817		goto out;
    818
    819	/*
    820	 * We are about to try to access the firmware file. Because we may have been
    821	 * called by a driver when serving an unrelated request from userland, we use
    822	 * the kernel credentials to read the file.
    823	 */
    824	kern_cred = prepare_kernel_cred(NULL);
    825	if (!kern_cred) {
    826		ret = -ENOMEM;
    827		goto out;
    828	}
    829	old_cred = override_creds(kern_cred);
    830
    831	ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
    832
    833	/* Only full reads can support decompression, platform, and sysfs. */
    834	if (!(opt_flags & FW_OPT_PARTIAL))
    835		nondirect = true;
    836
    837#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
    838	if (ret == -ENOENT && nondirect)
    839		ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
    840						 fw_decompress_zstd);
    841#endif
    842#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
    843	if (ret == -ENOENT && nondirect)
    844		ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
    845						 fw_decompress_xz);
    846#endif
    847	if (ret == -ENOENT && nondirect)
    848		ret = firmware_fallback_platform(fw->priv);
    849
    850	if (ret) {
    851		if (!(opt_flags & FW_OPT_NO_WARN))
    852			dev_warn(device,
    853				 "Direct firmware load for %s failed with error %d\n",
    854				 name, ret);
    855		if (nondirect)
    856			ret = firmware_fallback_sysfs(fw, name, device,
    857						      opt_flags, ret);
    858	} else
    859		ret = assign_fw(fw, device);
    860
    861	revert_creds(old_cred);
    862	put_cred(kern_cred);
    863
    864 out:
    865	if (ret < 0) {
    866		fw_abort_batch_reqs(fw);
    867		release_firmware(fw);
    868		fw = NULL;
    869	}
    870
    871	*firmware_p = fw;
    872	return ret;
    873}
    874
    875/**
    876 * request_firmware() - send firmware request and wait for it
    877 * @firmware_p: pointer to firmware image
    878 * @name: name of firmware file
    879 * @device: device for which firmware is being loaded
    880 *
    881 *      @firmware_p will be used to return a firmware image by the name
    882 *      of @name for device @device.
    883 *
    884 *      Should be called from user context where sleeping is allowed.
    885 *
    886 *      @name will be used as $FIRMWARE in the uevent environment and
    887 *      should be distinctive enough not to be confused with any other
    888 *      firmware image for this or any other device.
    889 *
    890 *	Caller must hold the reference count of @device.
    891 *
    892 *	The function can be called safely inside device's suspend and
    893 *	resume callback.
    894 **/
    895int
    896request_firmware(const struct firmware **firmware_p, const char *name,
    897		 struct device *device)
    898{
    899	int ret;
    900
    901	/* Need to pin this module until return */
    902	__module_get(THIS_MODULE);
    903	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
    904				FW_OPT_UEVENT);
    905	module_put(THIS_MODULE);
    906	return ret;
    907}
    908EXPORT_SYMBOL(request_firmware);
    909
    910/**
    911 * firmware_request_nowarn() - request for an optional fw module
    912 * @firmware: pointer to firmware image
    913 * @name: name of firmware file
    914 * @device: device for which firmware is being loaded
    915 *
    916 * This function is similar in behaviour to request_firmware(), except it
    917 * doesn't produce warning messages when the file is not found. The sysfs
    918 * fallback mechanism is enabled if direct filesystem lookup fails. However,
    919 * failures to find the firmware file with it are still suppressed. It is
    920 * therefore up to the driver to check for the return value of this call and to
    921 * decide when to inform the users of errors.
    922 **/
    923int firmware_request_nowarn(const struct firmware **firmware, const char *name,
    924			    struct device *device)
    925{
    926	int ret;
    927
    928	/* Need to pin this module until return */
    929	__module_get(THIS_MODULE);
    930	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
    931				FW_OPT_UEVENT | FW_OPT_NO_WARN);
    932	module_put(THIS_MODULE);
    933	return ret;
    934}
    935EXPORT_SYMBOL_GPL(firmware_request_nowarn);
    936
    937/**
    938 * request_firmware_direct() - load firmware directly without usermode helper
    939 * @firmware_p: pointer to firmware image
    940 * @name: name of firmware file
    941 * @device: device for which firmware is being loaded
    942 *
    943 * This function works pretty much like request_firmware(), but this doesn't
    944 * fall back to usermode helper even if the firmware couldn't be loaded
    945 * directly from fs.  Hence it's useful for loading optional firmwares, which
    946 * aren't always present, without extra long timeouts of udev.
    947 **/
    948int request_firmware_direct(const struct firmware **firmware_p,
    949			    const char *name, struct device *device)
    950{
    951	int ret;
    952
    953	__module_get(THIS_MODULE);
    954	ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
    955				FW_OPT_UEVENT | FW_OPT_NO_WARN |
    956				FW_OPT_NOFALLBACK_SYSFS);
    957	module_put(THIS_MODULE);
    958	return ret;
    959}
    960EXPORT_SYMBOL_GPL(request_firmware_direct);
    961
    962/**
    963 * firmware_request_platform() - request firmware with platform-fw fallback
    964 * @firmware: pointer to firmware image
    965 * @name: name of firmware file
    966 * @device: device for which firmware is being loaded
    967 *
    968 * This function is similar in behaviour to request_firmware, except that if
    969 * direct filesystem lookup fails, it will fallback to looking for a copy of the
    970 * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
    971 **/
    972int firmware_request_platform(const struct firmware **firmware,
    973			      const char *name, struct device *device)
    974{
    975	int ret;
    976
    977	/* Need to pin this module until return */
    978	__module_get(THIS_MODULE);
    979	ret = _request_firmware(firmware, name, device, NULL, 0, 0,
    980				FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
    981	module_put(THIS_MODULE);
    982	return ret;
    983}
    984EXPORT_SYMBOL_GPL(firmware_request_platform);
    985
    986/**
    987 * firmware_request_cache() - cache firmware for suspend so resume can use it
    988 * @name: name of firmware file
    989 * @device: device for which firmware should be cached for
    990 *
    991 * There are some devices with an optimization that enables the device to not
    992 * require loading firmware on system reboot. This optimization may still
    993 * require the firmware present on resume from suspend. This routine can be
    994 * used to ensure the firmware is present on resume from suspend in these
    995 * situations. This helper is not compatible with drivers which use
    996 * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
    997 **/
    998int firmware_request_cache(struct device *device, const char *name)
    999{
   1000	int ret;
   1001
   1002	mutex_lock(&fw_lock);
   1003	ret = fw_add_devm_name(device, name);
   1004	mutex_unlock(&fw_lock);
   1005
   1006	return ret;
   1007}
   1008EXPORT_SYMBOL_GPL(firmware_request_cache);
   1009
   1010/**
   1011 * request_firmware_into_buf() - load firmware into a previously allocated buffer
   1012 * @firmware_p: pointer to firmware image
   1013 * @name: name of firmware file
   1014 * @device: device for which firmware is being loaded and DMA region allocated
   1015 * @buf: address of buffer to load firmware into
   1016 * @size: size of buffer
   1017 *
   1018 * This function works pretty much like request_firmware(), but it doesn't
   1019 * allocate a buffer to hold the firmware data. Instead, the firmware
   1020 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
   1021 * data member is pointed at @buf.
   1022 *
   1023 * This function doesn't cache firmware either.
   1024 */
   1025int
   1026request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
   1027			  struct device *device, void *buf, size_t size)
   1028{
   1029	int ret;
   1030
   1031	if (fw_cache_is_setup(device, name))
   1032		return -EOPNOTSUPP;
   1033
   1034	__module_get(THIS_MODULE);
   1035	ret = _request_firmware(firmware_p, name, device, buf, size, 0,
   1036				FW_OPT_UEVENT | FW_OPT_NOCACHE);
   1037	module_put(THIS_MODULE);
   1038	return ret;
   1039}
   1040EXPORT_SYMBOL(request_firmware_into_buf);
   1041
   1042/**
   1043 * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
   1044 * @firmware_p: pointer to firmware image
   1045 * @name: name of firmware file
   1046 * @device: device for which firmware is being loaded and DMA region allocated
   1047 * @buf: address of buffer to load firmware into
   1048 * @size: size of buffer
   1049 * @offset: offset into file to read
   1050 *
   1051 * This function works pretty much like request_firmware_into_buf except
   1052 * it allows a partial read of the file.
   1053 */
   1054int
   1055request_partial_firmware_into_buf(const struct firmware **firmware_p,
   1056				  const char *name, struct device *device,
   1057				  void *buf, size_t size, size_t offset)
   1058{
   1059	int ret;
   1060
   1061	if (fw_cache_is_setup(device, name))
   1062		return -EOPNOTSUPP;
   1063
   1064	__module_get(THIS_MODULE);
   1065	ret = _request_firmware(firmware_p, name, device, buf, size, offset,
   1066				FW_OPT_UEVENT | FW_OPT_NOCACHE |
   1067				FW_OPT_PARTIAL);
   1068	module_put(THIS_MODULE);
   1069	return ret;
   1070}
   1071EXPORT_SYMBOL(request_partial_firmware_into_buf);
   1072
   1073/**
   1074 * release_firmware() - release the resource associated with a firmware image
   1075 * @fw: firmware resource to release
   1076 **/
   1077void release_firmware(const struct firmware *fw)
   1078{
   1079	if (fw) {
   1080		if (!firmware_is_builtin(fw))
   1081			firmware_free_data(fw);
   1082		kfree(fw);
   1083	}
   1084}
   1085EXPORT_SYMBOL(release_firmware);
   1086
   1087/* Async support */
   1088struct firmware_work {
   1089	struct work_struct work;
   1090	struct module *module;
   1091	const char *name;
   1092	struct device *device;
   1093	void *context;
   1094	void (*cont)(const struct firmware *fw, void *context);
   1095	u32 opt_flags;
   1096};
   1097
   1098static void request_firmware_work_func(struct work_struct *work)
   1099{
   1100	struct firmware_work *fw_work;
   1101	const struct firmware *fw;
   1102
   1103	fw_work = container_of(work, struct firmware_work, work);
   1104
   1105	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
   1106			  fw_work->opt_flags);
   1107	fw_work->cont(fw, fw_work->context);
   1108	put_device(fw_work->device); /* taken in request_firmware_nowait() */
   1109
   1110	module_put(fw_work->module);
   1111	kfree_const(fw_work->name);
   1112	kfree(fw_work);
   1113}
   1114
   1115/**
   1116 * request_firmware_nowait() - asynchronous version of request_firmware
   1117 * @module: module requesting the firmware
   1118 * @uevent: sends uevent to copy the firmware image if this flag
   1119 *	is non-zero else the firmware copy must be done manually.
   1120 * @name: name of firmware file
   1121 * @device: device for which firmware is being loaded
   1122 * @gfp: allocation flags
   1123 * @context: will be passed over to @cont, and
   1124 *	@fw may be %NULL if firmware request fails.
   1125 * @cont: function will be called asynchronously when the firmware
   1126 *	request is over.
   1127 *
   1128 *	Caller must hold the reference count of @device.
   1129 *
   1130 *	Asynchronous variant of request_firmware() for user contexts:
   1131 *		- sleep for as small periods as possible since it may
   1132 *		  increase kernel boot time of built-in device drivers
   1133 *		  requesting firmware in their ->probe() methods, if
   1134 *		  @gfp is GFP_KERNEL.
   1135 *
   1136 *		- can't sleep at all if @gfp is GFP_ATOMIC.
   1137 **/
   1138int
   1139request_firmware_nowait(
   1140	struct module *module, bool uevent,
   1141	const char *name, struct device *device, gfp_t gfp, void *context,
   1142	void (*cont)(const struct firmware *fw, void *context))
   1143{
   1144	struct firmware_work *fw_work;
   1145
   1146	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
   1147	if (!fw_work)
   1148		return -ENOMEM;
   1149
   1150	fw_work->module = module;
   1151	fw_work->name = kstrdup_const(name, gfp);
   1152	if (!fw_work->name) {
   1153		kfree(fw_work);
   1154		return -ENOMEM;
   1155	}
   1156	fw_work->device = device;
   1157	fw_work->context = context;
   1158	fw_work->cont = cont;
   1159	fw_work->opt_flags = FW_OPT_NOWAIT |
   1160		(uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
   1161
   1162	if (!uevent && fw_cache_is_setup(device, name)) {
   1163		kfree_const(fw_work->name);
   1164		kfree(fw_work);
   1165		return -EOPNOTSUPP;
   1166	}
   1167
   1168	if (!try_module_get(module)) {
   1169		kfree_const(fw_work->name);
   1170		kfree(fw_work);
   1171		return -EFAULT;
   1172	}
   1173
   1174	get_device(fw_work->device);
   1175	INIT_WORK(&fw_work->work, request_firmware_work_func);
   1176	schedule_work(&fw_work->work);
   1177	return 0;
   1178}
   1179EXPORT_SYMBOL(request_firmware_nowait);
   1180
   1181#ifdef CONFIG_FW_CACHE
   1182static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
   1183
   1184/**
   1185 * cache_firmware() - cache one firmware image in kernel memory space
   1186 * @fw_name: the firmware image name
   1187 *
   1188 * Cache firmware in kernel memory so that drivers can use it when
   1189 * system isn't ready for them to request firmware image from userspace.
   1190 * Once it returns successfully, driver can use request_firmware or its
   1191 * nowait version to get the cached firmware without any interacting
   1192 * with userspace
   1193 *
   1194 * Return 0 if the firmware image has been cached successfully
   1195 * Return !0 otherwise
   1196 *
   1197 */
   1198static int cache_firmware(const char *fw_name)
   1199{
   1200	int ret;
   1201	const struct firmware *fw;
   1202
   1203	pr_debug("%s: %s\n", __func__, fw_name);
   1204
   1205	ret = request_firmware(&fw, fw_name, NULL);
   1206	if (!ret)
   1207		kfree(fw);
   1208
   1209	pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
   1210
   1211	return ret;
   1212}
   1213
   1214static struct fw_priv *lookup_fw_priv(const char *fw_name)
   1215{
   1216	struct fw_priv *tmp;
   1217	struct firmware_cache *fwc = &fw_cache;
   1218
   1219	spin_lock(&fwc->lock);
   1220	tmp = __lookup_fw_priv(fw_name);
   1221	spin_unlock(&fwc->lock);
   1222
   1223	return tmp;
   1224}
   1225
   1226/**
   1227 * uncache_firmware() - remove one cached firmware image
   1228 * @fw_name: the firmware image name
   1229 *
   1230 * Uncache one firmware image which has been cached successfully
   1231 * before.
   1232 *
   1233 * Return 0 if the firmware cache has been removed successfully
   1234 * Return !0 otherwise
   1235 *
   1236 */
   1237static int uncache_firmware(const char *fw_name)
   1238{
   1239	struct fw_priv *fw_priv;
   1240	struct firmware fw;
   1241
   1242	pr_debug("%s: %s\n", __func__, fw_name);
   1243
   1244	if (firmware_request_builtin(&fw, fw_name))
   1245		return 0;
   1246
   1247	fw_priv = lookup_fw_priv(fw_name);
   1248	if (fw_priv) {
   1249		free_fw_priv(fw_priv);
   1250		return 0;
   1251	}
   1252
   1253	return -EINVAL;
   1254}
   1255
   1256static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
   1257{
   1258	struct fw_cache_entry *fce;
   1259
   1260	fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
   1261	if (!fce)
   1262		goto exit;
   1263
   1264	fce->name = kstrdup_const(name, GFP_ATOMIC);
   1265	if (!fce->name) {
   1266		kfree(fce);
   1267		fce = NULL;
   1268		goto exit;
   1269	}
   1270exit:
   1271	return fce;
   1272}
   1273
   1274static int __fw_entry_found(const char *name)
   1275{
   1276	struct firmware_cache *fwc = &fw_cache;
   1277	struct fw_cache_entry *fce;
   1278
   1279	list_for_each_entry(fce, &fwc->fw_names, list) {
   1280		if (!strcmp(fce->name, name))
   1281			return 1;
   1282	}
   1283	return 0;
   1284}
   1285
   1286static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
   1287{
   1288	const char *name = fw_priv->fw_name;
   1289	struct firmware_cache *fwc = fw_priv->fwc;
   1290	struct fw_cache_entry *fce;
   1291
   1292	spin_lock(&fwc->name_lock);
   1293	if (__fw_entry_found(name))
   1294		goto found;
   1295
   1296	fce = alloc_fw_cache_entry(name);
   1297	if (fce) {
   1298		list_add(&fce->list, &fwc->fw_names);
   1299		kref_get(&fw_priv->ref);
   1300		pr_debug("%s: fw: %s\n", __func__, name);
   1301	}
   1302found:
   1303	spin_unlock(&fwc->name_lock);
   1304}
   1305
   1306static void free_fw_cache_entry(struct fw_cache_entry *fce)
   1307{
   1308	kfree_const(fce->name);
   1309	kfree(fce);
   1310}
   1311
   1312static void __async_dev_cache_fw_image(void *fw_entry,
   1313				       async_cookie_t cookie)
   1314{
   1315	struct fw_cache_entry *fce = fw_entry;
   1316	struct firmware_cache *fwc = &fw_cache;
   1317	int ret;
   1318
   1319	ret = cache_firmware(fce->name);
   1320	if (ret) {
   1321		spin_lock(&fwc->name_lock);
   1322		list_del(&fce->list);
   1323		spin_unlock(&fwc->name_lock);
   1324
   1325		free_fw_cache_entry(fce);
   1326	}
   1327}
   1328
   1329/* called with dev->devres_lock held */
   1330static void dev_create_fw_entry(struct device *dev, void *res,
   1331				void *data)
   1332{
   1333	struct fw_name_devm *fwn = res;
   1334	const char *fw_name = fwn->name;
   1335	struct list_head *head = data;
   1336	struct fw_cache_entry *fce;
   1337
   1338	fce = alloc_fw_cache_entry(fw_name);
   1339	if (fce)
   1340		list_add(&fce->list, head);
   1341}
   1342
   1343static int devm_name_match(struct device *dev, void *res,
   1344			   void *match_data)
   1345{
   1346	struct fw_name_devm *fwn = res;
   1347	return (fwn->magic == (unsigned long)match_data);
   1348}
   1349
   1350static void dev_cache_fw_image(struct device *dev, void *data)
   1351{
   1352	LIST_HEAD(todo);
   1353	struct fw_cache_entry *fce;
   1354	struct fw_cache_entry *fce_next;
   1355	struct firmware_cache *fwc = &fw_cache;
   1356
   1357	devres_for_each_res(dev, fw_name_devm_release,
   1358			    devm_name_match, &fw_cache,
   1359			    dev_create_fw_entry, &todo);
   1360
   1361	list_for_each_entry_safe(fce, fce_next, &todo, list) {
   1362		list_del(&fce->list);
   1363
   1364		spin_lock(&fwc->name_lock);
   1365		/* only one cache entry for one firmware */
   1366		if (!__fw_entry_found(fce->name)) {
   1367			list_add(&fce->list, &fwc->fw_names);
   1368		} else {
   1369			free_fw_cache_entry(fce);
   1370			fce = NULL;
   1371		}
   1372		spin_unlock(&fwc->name_lock);
   1373
   1374		if (fce)
   1375			async_schedule_domain(__async_dev_cache_fw_image,
   1376					      (void *)fce,
   1377					      &fw_cache_domain);
   1378	}
   1379}
   1380
   1381static void __device_uncache_fw_images(void)
   1382{
   1383	struct firmware_cache *fwc = &fw_cache;
   1384	struct fw_cache_entry *fce;
   1385
   1386	spin_lock(&fwc->name_lock);
   1387	while (!list_empty(&fwc->fw_names)) {
   1388		fce = list_entry(fwc->fw_names.next,
   1389				struct fw_cache_entry, list);
   1390		list_del(&fce->list);
   1391		spin_unlock(&fwc->name_lock);
   1392
   1393		uncache_firmware(fce->name);
   1394		free_fw_cache_entry(fce);
   1395
   1396		spin_lock(&fwc->name_lock);
   1397	}
   1398	spin_unlock(&fwc->name_lock);
   1399}
   1400
   1401/**
   1402 * device_cache_fw_images() - cache devices' firmware
   1403 *
   1404 * If one device called request_firmware or its nowait version
   1405 * successfully before, the firmware names are recored into the
   1406 * device's devres link list, so device_cache_fw_images can call
   1407 * cache_firmware() to cache these firmwares for the device,
   1408 * then the device driver can load its firmwares easily at
   1409 * time when system is not ready to complete loading firmware.
   1410 */
   1411static void device_cache_fw_images(void)
   1412{
   1413	struct firmware_cache *fwc = &fw_cache;
   1414	DEFINE_WAIT(wait);
   1415
   1416	pr_debug("%s\n", __func__);
   1417
   1418	/* cancel uncache work */
   1419	cancel_delayed_work_sync(&fwc->work);
   1420
   1421	fw_fallback_set_cache_timeout();
   1422
   1423	mutex_lock(&fw_lock);
   1424	fwc->state = FW_LOADER_START_CACHE;
   1425	dpm_for_each_dev(NULL, dev_cache_fw_image);
   1426	mutex_unlock(&fw_lock);
   1427
   1428	/* wait for completion of caching firmware for all devices */
   1429	async_synchronize_full_domain(&fw_cache_domain);
   1430
   1431	fw_fallback_set_default_timeout();
   1432}
   1433
   1434/**
   1435 * device_uncache_fw_images() - uncache devices' firmware
   1436 *
   1437 * uncache all firmwares which have been cached successfully
   1438 * by device_uncache_fw_images earlier
   1439 */
   1440static void device_uncache_fw_images(void)
   1441{
   1442	pr_debug("%s\n", __func__);
   1443	__device_uncache_fw_images();
   1444}
   1445
   1446static void device_uncache_fw_images_work(struct work_struct *work)
   1447{
   1448	device_uncache_fw_images();
   1449}
   1450
   1451/**
   1452 * device_uncache_fw_images_delay() - uncache devices firmwares
   1453 * @delay: number of milliseconds to delay uncache device firmwares
   1454 *
   1455 * uncache all devices's firmwares which has been cached successfully
   1456 * by device_cache_fw_images after @delay milliseconds.
   1457 */
   1458static void device_uncache_fw_images_delay(unsigned long delay)
   1459{
   1460	queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
   1461			   msecs_to_jiffies(delay));
   1462}
   1463
   1464static int fw_pm_notify(struct notifier_block *notify_block,
   1465			unsigned long mode, void *unused)
   1466{
   1467	switch (mode) {
   1468	case PM_HIBERNATION_PREPARE:
   1469	case PM_SUSPEND_PREPARE:
   1470	case PM_RESTORE_PREPARE:
   1471		/*
   1472		 * kill pending fallback requests with a custom fallback
   1473		 * to avoid stalling suspend.
   1474		 */
   1475		kill_pending_fw_fallback_reqs(true);
   1476		device_cache_fw_images();
   1477		break;
   1478
   1479	case PM_POST_SUSPEND:
   1480	case PM_POST_HIBERNATION:
   1481	case PM_POST_RESTORE:
   1482		/*
   1483		 * In case that system sleep failed and syscore_suspend is
   1484		 * not called.
   1485		 */
   1486		mutex_lock(&fw_lock);
   1487		fw_cache.state = FW_LOADER_NO_CACHE;
   1488		mutex_unlock(&fw_lock);
   1489
   1490		device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
   1491		break;
   1492	}
   1493
   1494	return 0;
   1495}
   1496
   1497/* stop caching firmware once syscore_suspend is reached */
   1498static int fw_suspend(void)
   1499{
   1500	fw_cache.state = FW_LOADER_NO_CACHE;
   1501	return 0;
   1502}
   1503
   1504static struct syscore_ops fw_syscore_ops = {
   1505	.suspend = fw_suspend,
   1506};
   1507
   1508static int __init register_fw_pm_ops(void)
   1509{
   1510	int ret;
   1511
   1512	spin_lock_init(&fw_cache.name_lock);
   1513	INIT_LIST_HEAD(&fw_cache.fw_names);
   1514
   1515	INIT_DELAYED_WORK(&fw_cache.work,
   1516			  device_uncache_fw_images_work);
   1517
   1518	fw_cache.pm_notify.notifier_call = fw_pm_notify;
   1519	ret = register_pm_notifier(&fw_cache.pm_notify);
   1520	if (ret)
   1521		return ret;
   1522
   1523	register_syscore_ops(&fw_syscore_ops);
   1524
   1525	return ret;
   1526}
   1527
   1528static inline void unregister_fw_pm_ops(void)
   1529{
   1530	unregister_syscore_ops(&fw_syscore_ops);
   1531	unregister_pm_notifier(&fw_cache.pm_notify);
   1532}
   1533#else
   1534static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
   1535{
   1536}
   1537static inline int register_fw_pm_ops(void)
   1538{
   1539	return 0;
   1540}
   1541static inline void unregister_fw_pm_ops(void)
   1542{
   1543}
   1544#endif
   1545
   1546static void __init fw_cache_init(void)
   1547{
   1548	spin_lock_init(&fw_cache.lock);
   1549	INIT_LIST_HEAD(&fw_cache.head);
   1550	fw_cache.state = FW_LOADER_NO_CACHE;
   1551}
   1552
   1553static int fw_shutdown_notify(struct notifier_block *unused1,
   1554			      unsigned long unused2, void *unused3)
   1555{
   1556	/*
   1557	 * Kill all pending fallback requests to avoid both stalling shutdown,
   1558	 * and avoid a deadlock with the usermode_lock.
   1559	 */
   1560	kill_pending_fw_fallback_reqs(false);
   1561
   1562	return NOTIFY_DONE;
   1563}
   1564
   1565static struct notifier_block fw_shutdown_nb = {
   1566	.notifier_call = fw_shutdown_notify,
   1567};
   1568
   1569static int __init firmware_class_init(void)
   1570{
   1571	int ret;
   1572
   1573	/* No need to unfold these on exit */
   1574	fw_cache_init();
   1575
   1576	ret = register_fw_pm_ops();
   1577	if (ret)
   1578		return ret;
   1579
   1580	ret = register_reboot_notifier(&fw_shutdown_nb);
   1581	if (ret)
   1582		goto out;
   1583
   1584	return register_sysfs_loader();
   1585
   1586out:
   1587	unregister_fw_pm_ops();
   1588	return ret;
   1589}
   1590
   1591static void __exit firmware_class_exit(void)
   1592{
   1593	unregister_fw_pm_ops();
   1594	unregister_reboot_notifier(&fw_shutdown_nb);
   1595	unregister_sysfs_loader();
   1596}
   1597
   1598fs_initcall(firmware_class_init);
   1599module_exit(firmware_class_exit);