cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

swap.c (38844B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * linux/kernel/power/swap.c
      4 *
      5 * This file provides functions for reading the suspend image from
      6 * and writing it to a swap partition.
      7 *
      8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
      9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
     10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
     11 */
     12
     13#define pr_fmt(fmt) "PM: " fmt
     14
     15#include <linux/module.h>
     16#include <linux/file.h>
     17#include <linux/delay.h>
     18#include <linux/bitops.h>
     19#include <linux/device.h>
     20#include <linux/bio.h>
     21#include <linux/blkdev.h>
     22#include <linux/swap.h>
     23#include <linux/swapops.h>
     24#include <linux/pm.h>
     25#include <linux/slab.h>
     26#include <linux/lzo.h>
     27#include <linux/vmalloc.h>
     28#include <linux/cpumask.h>
     29#include <linux/atomic.h>
     30#include <linux/kthread.h>
     31#include <linux/crc32.h>
     32#include <linux/ktime.h>
     33
     34#include "power.h"
     35
     36#define HIBERNATE_SIG	"S1SUSPEND"
     37
     38u32 swsusp_hardware_signature;
     39
     40/*
     41 * When reading an {un,}compressed image, we may restore pages in place,
     42 * in which case some architectures need these pages cleaning before they
     43 * can be executed. We don't know which pages these may be, so clean the lot.
     44 */
     45static bool clean_pages_on_read;
     46static bool clean_pages_on_decompress;
     47
     48/*
     49 *	The swap map is a data structure used for keeping track of each page
     50 *	written to a swap partition.  It consists of many swap_map_page
     51 *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
     52 *	These structures are stored on the swap and linked together with the
     53 *	help of the .next_swap member.
     54 *
     55 *	The swap map is created during suspend.  The swap map pages are
     56 *	allocated and populated one at a time, so we only need one memory
     57 *	page to set up the entire structure.
     58 *
     59 *	During resume we pick up all swap_map_page structures into a list.
     60 */
     61
     62#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
     63
     64/*
     65 * Number of free pages that are not high.
     66 */
     67static inline unsigned long low_free_pages(void)
     68{
     69	return nr_free_pages() - nr_free_highpages();
     70}
     71
     72/*
     73 * Number of pages required to be kept free while writing the image. Always
     74 * half of all available low pages before the writing starts.
     75 */
     76static inline unsigned long reqd_free_pages(void)
     77{
     78	return low_free_pages() / 2;
     79}
     80
     81struct swap_map_page {
     82	sector_t entries[MAP_PAGE_ENTRIES];
     83	sector_t next_swap;
     84};
     85
     86struct swap_map_page_list {
     87	struct swap_map_page *map;
     88	struct swap_map_page_list *next;
     89};
     90
     91/*
     92 *	The swap_map_handle structure is used for handling swap in
     93 *	a file-alike way
     94 */
     95
     96struct swap_map_handle {
     97	struct swap_map_page *cur;
     98	struct swap_map_page_list *maps;
     99	sector_t cur_swap;
    100	sector_t first_sector;
    101	unsigned int k;
    102	unsigned long reqd_free_pages;
    103	u32 crc32;
    104};
    105
    106struct swsusp_header {
    107	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
    108	              sizeof(u32) - sizeof(u32)];
    109	u32	hw_sig;
    110	u32	crc32;
    111	sector_t image;
    112	unsigned int flags;	/* Flags to pass to the "boot" kernel */
    113	char	orig_sig[10];
    114	char	sig[10];
    115} __packed;
    116
    117static struct swsusp_header *swsusp_header;
    118
    119/*
    120 *	The following functions are used for tracing the allocated
    121 *	swap pages, so that they can be freed in case of an error.
    122 */
    123
    124struct swsusp_extent {
    125	struct rb_node node;
    126	unsigned long start;
    127	unsigned long end;
    128};
    129
    130static struct rb_root swsusp_extents = RB_ROOT;
    131
    132static int swsusp_extents_insert(unsigned long swap_offset)
    133{
    134	struct rb_node **new = &(swsusp_extents.rb_node);
    135	struct rb_node *parent = NULL;
    136	struct swsusp_extent *ext;
    137
    138	/* Figure out where to put the new node */
    139	while (*new) {
    140		ext = rb_entry(*new, struct swsusp_extent, node);
    141		parent = *new;
    142		if (swap_offset < ext->start) {
    143			/* Try to merge */
    144			if (swap_offset == ext->start - 1) {
    145				ext->start--;
    146				return 0;
    147			}
    148			new = &((*new)->rb_left);
    149		} else if (swap_offset > ext->end) {
    150			/* Try to merge */
    151			if (swap_offset == ext->end + 1) {
    152				ext->end++;
    153				return 0;
    154			}
    155			new = &((*new)->rb_right);
    156		} else {
    157			/* It already is in the tree */
    158			return -EINVAL;
    159		}
    160	}
    161	/* Add the new node and rebalance the tree. */
    162	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
    163	if (!ext)
    164		return -ENOMEM;
    165
    166	ext->start = swap_offset;
    167	ext->end = swap_offset;
    168	rb_link_node(&ext->node, parent, new);
    169	rb_insert_color(&ext->node, &swsusp_extents);
    170	return 0;
    171}
    172
    173/*
    174 *	alloc_swapdev_block - allocate a swap page and register that it has
    175 *	been allocated, so that it can be freed in case of an error.
    176 */
    177
    178sector_t alloc_swapdev_block(int swap)
    179{
    180	unsigned long offset;
    181
    182	offset = swp_offset(get_swap_page_of_type(swap));
    183	if (offset) {
    184		if (swsusp_extents_insert(offset))
    185			swap_free(swp_entry(swap, offset));
    186		else
    187			return swapdev_block(swap, offset);
    188	}
    189	return 0;
    190}
    191
    192/*
    193 *	free_all_swap_pages - free swap pages allocated for saving image data.
    194 *	It also frees the extents used to register which swap entries had been
    195 *	allocated.
    196 */
    197
    198void free_all_swap_pages(int swap)
    199{
    200	struct rb_node *node;
    201
    202	while ((node = swsusp_extents.rb_node)) {
    203		struct swsusp_extent *ext;
    204		unsigned long offset;
    205
    206		ext = rb_entry(node, struct swsusp_extent, node);
    207		rb_erase(node, &swsusp_extents);
    208		for (offset = ext->start; offset <= ext->end; offset++)
    209			swap_free(swp_entry(swap, offset));
    210
    211		kfree(ext);
    212	}
    213}
    214
    215int swsusp_swap_in_use(void)
    216{
    217	return (swsusp_extents.rb_node != NULL);
    218}
    219
    220/*
    221 * General things
    222 */
    223
    224static unsigned short root_swap = 0xffff;
    225static struct block_device *hib_resume_bdev;
    226
    227struct hib_bio_batch {
    228	atomic_t		count;
    229	wait_queue_head_t	wait;
    230	blk_status_t		error;
    231	struct blk_plug		plug;
    232};
    233
    234static void hib_init_batch(struct hib_bio_batch *hb)
    235{
    236	atomic_set(&hb->count, 0);
    237	init_waitqueue_head(&hb->wait);
    238	hb->error = BLK_STS_OK;
    239	blk_start_plug(&hb->plug);
    240}
    241
    242static void hib_finish_batch(struct hib_bio_batch *hb)
    243{
    244	blk_finish_plug(&hb->plug);
    245}
    246
    247static void hib_end_io(struct bio *bio)
    248{
    249	struct hib_bio_batch *hb = bio->bi_private;
    250	struct page *page = bio_first_page_all(bio);
    251
    252	if (bio->bi_status) {
    253		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
    254			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
    255			 (unsigned long long)bio->bi_iter.bi_sector);
    256	}
    257
    258	if (bio_data_dir(bio) == WRITE)
    259		put_page(page);
    260	else if (clean_pages_on_read)
    261		flush_icache_range((unsigned long)page_address(page),
    262				   (unsigned long)page_address(page) + PAGE_SIZE);
    263
    264	if (bio->bi_status && !hb->error)
    265		hb->error = bio->bi_status;
    266	if (atomic_dec_and_test(&hb->count))
    267		wake_up(&hb->wait);
    268
    269	bio_put(bio);
    270}
    271
    272static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
    273		struct hib_bio_batch *hb)
    274{
    275	struct page *page = virt_to_page(addr);
    276	struct bio *bio;
    277	int error = 0;
    278
    279	bio = bio_alloc(hib_resume_bdev, 1, op | op_flags,
    280			GFP_NOIO | __GFP_HIGH);
    281	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
    282
    283	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
    284		pr_err("Adding page to bio failed at %llu\n",
    285		       (unsigned long long)bio->bi_iter.bi_sector);
    286		bio_put(bio);
    287		return -EFAULT;
    288	}
    289
    290	if (hb) {
    291		bio->bi_end_io = hib_end_io;
    292		bio->bi_private = hb;
    293		atomic_inc(&hb->count);
    294		submit_bio(bio);
    295	} else {
    296		error = submit_bio_wait(bio);
    297		bio_put(bio);
    298	}
    299
    300	return error;
    301}
    302
    303static int hib_wait_io(struct hib_bio_batch *hb)
    304{
    305	/*
    306	 * We are relying on the behavior of blk_plug that a thread with
    307	 * a plug will flush the plug list before sleeping.
    308	 */
    309	wait_event(hb->wait, atomic_read(&hb->count) == 0);
    310	return blk_status_to_errno(hb->error);
    311}
    312
    313/*
    314 * Saving part
    315 */
    316static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
    317{
    318	int error;
    319
    320	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
    321		      swsusp_header, NULL);
    322	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
    323	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
    324		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
    325		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
    326		swsusp_header->image = handle->first_sector;
    327		if (swsusp_hardware_signature) {
    328			swsusp_header->hw_sig = swsusp_hardware_signature;
    329			flags |= SF_HW_SIG;
    330		}
    331		swsusp_header->flags = flags;
    332		if (flags & SF_CRC32_MODE)
    333			swsusp_header->crc32 = handle->crc32;
    334		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
    335				      swsusp_resume_block, swsusp_header, NULL);
    336	} else {
    337		pr_err("Swap header not found!\n");
    338		error = -ENODEV;
    339	}
    340	return error;
    341}
    342
    343/**
    344 *	swsusp_swap_check - check if the resume device is a swap device
    345 *	and get its index (if so)
    346 *
    347 *	This is called before saving image
    348 */
    349static int swsusp_swap_check(void)
    350{
    351	int res;
    352
    353	if (swsusp_resume_device)
    354		res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
    355	else
    356		res = find_first_swap(&swsusp_resume_device);
    357	if (res < 0)
    358		return res;
    359	root_swap = res;
    360
    361	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
    362			NULL);
    363	if (IS_ERR(hib_resume_bdev))
    364		return PTR_ERR(hib_resume_bdev);
    365
    366	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
    367	if (res < 0)
    368		blkdev_put(hib_resume_bdev, FMODE_WRITE);
    369
    370	return res;
    371}
    372
    373/**
    374 *	write_page - Write one page to given swap location.
    375 *	@buf:		Address we're writing.
    376 *	@offset:	Offset of the swap page we're writing to.
    377 *	@hb:		bio completion batch
    378 */
    379
    380static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
    381{
    382	void *src;
    383	int ret;
    384
    385	if (!offset)
    386		return -ENOSPC;
    387
    388	if (hb) {
    389		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
    390		                              __GFP_NORETRY);
    391		if (src) {
    392			copy_page(src, buf);
    393		} else {
    394			ret = hib_wait_io(hb); /* Free pages */
    395			if (ret)
    396				return ret;
    397			src = (void *)__get_free_page(GFP_NOIO |
    398			                              __GFP_NOWARN |
    399			                              __GFP_NORETRY);
    400			if (src) {
    401				copy_page(src, buf);
    402			} else {
    403				WARN_ON_ONCE(1);
    404				hb = NULL;	/* Go synchronous */
    405				src = buf;
    406			}
    407		}
    408	} else {
    409		src = buf;
    410	}
    411	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
    412}
    413
    414static void release_swap_writer(struct swap_map_handle *handle)
    415{
    416	if (handle->cur)
    417		free_page((unsigned long)handle->cur);
    418	handle->cur = NULL;
    419}
    420
    421static int get_swap_writer(struct swap_map_handle *handle)
    422{
    423	int ret;
    424
    425	ret = swsusp_swap_check();
    426	if (ret) {
    427		if (ret != -ENOSPC)
    428			pr_err("Cannot find swap device, try swapon -a\n");
    429		return ret;
    430	}
    431	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
    432	if (!handle->cur) {
    433		ret = -ENOMEM;
    434		goto err_close;
    435	}
    436	handle->cur_swap = alloc_swapdev_block(root_swap);
    437	if (!handle->cur_swap) {
    438		ret = -ENOSPC;
    439		goto err_rel;
    440	}
    441	handle->k = 0;
    442	handle->reqd_free_pages = reqd_free_pages();
    443	handle->first_sector = handle->cur_swap;
    444	return 0;
    445err_rel:
    446	release_swap_writer(handle);
    447err_close:
    448	swsusp_close(FMODE_WRITE);
    449	return ret;
    450}
    451
    452static int swap_write_page(struct swap_map_handle *handle, void *buf,
    453		struct hib_bio_batch *hb)
    454{
    455	int error = 0;
    456	sector_t offset;
    457
    458	if (!handle->cur)
    459		return -EINVAL;
    460	offset = alloc_swapdev_block(root_swap);
    461	error = write_page(buf, offset, hb);
    462	if (error)
    463		return error;
    464	handle->cur->entries[handle->k++] = offset;
    465	if (handle->k >= MAP_PAGE_ENTRIES) {
    466		offset = alloc_swapdev_block(root_swap);
    467		if (!offset)
    468			return -ENOSPC;
    469		handle->cur->next_swap = offset;
    470		error = write_page(handle->cur, handle->cur_swap, hb);
    471		if (error)
    472			goto out;
    473		clear_page(handle->cur);
    474		handle->cur_swap = offset;
    475		handle->k = 0;
    476
    477		if (hb && low_free_pages() <= handle->reqd_free_pages) {
    478			error = hib_wait_io(hb);
    479			if (error)
    480				goto out;
    481			/*
    482			 * Recalculate the number of required free pages, to
    483			 * make sure we never take more than half.
    484			 */
    485			handle->reqd_free_pages = reqd_free_pages();
    486		}
    487	}
    488 out:
    489	return error;
    490}
    491
    492static int flush_swap_writer(struct swap_map_handle *handle)
    493{
    494	if (handle->cur && handle->cur_swap)
    495		return write_page(handle->cur, handle->cur_swap, NULL);
    496	else
    497		return -EINVAL;
    498}
    499
    500static int swap_writer_finish(struct swap_map_handle *handle,
    501		unsigned int flags, int error)
    502{
    503	if (!error) {
    504		pr_info("S");
    505		error = mark_swapfiles(handle, flags);
    506		pr_cont("|\n");
    507		flush_swap_writer(handle);
    508	}
    509
    510	if (error)
    511		free_all_swap_pages(root_swap);
    512	release_swap_writer(handle);
    513	swsusp_close(FMODE_WRITE);
    514
    515	return error;
    516}
    517
    518/* We need to remember how much compressed data we need to read. */
    519#define LZO_HEADER	sizeof(size_t)
    520
    521/* Number of pages/bytes we'll compress at one time. */
    522#define LZO_UNC_PAGES	32
    523#define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
    524
    525/* Number of pages/bytes we need for compressed data (worst case). */
    526#define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
    527			             LZO_HEADER, PAGE_SIZE)
    528#define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
    529
    530/* Maximum number of threads for compression/decompression. */
    531#define LZO_THREADS	3
    532
    533/* Minimum/maximum number of pages for read buffering. */
    534#define LZO_MIN_RD_PAGES	1024
    535#define LZO_MAX_RD_PAGES	8192
    536
    537
    538/**
    539 *	save_image - save the suspend image data
    540 */
    541
    542static int save_image(struct swap_map_handle *handle,
    543                      struct snapshot_handle *snapshot,
    544                      unsigned int nr_to_write)
    545{
    546	unsigned int m;
    547	int ret;
    548	int nr_pages;
    549	int err2;
    550	struct hib_bio_batch hb;
    551	ktime_t start;
    552	ktime_t stop;
    553
    554	hib_init_batch(&hb);
    555
    556	pr_info("Saving image data pages (%u pages)...\n",
    557		nr_to_write);
    558	m = nr_to_write / 10;
    559	if (!m)
    560		m = 1;
    561	nr_pages = 0;
    562	start = ktime_get();
    563	while (1) {
    564		ret = snapshot_read_next(snapshot);
    565		if (ret <= 0)
    566			break;
    567		ret = swap_write_page(handle, data_of(*snapshot), &hb);
    568		if (ret)
    569			break;
    570		if (!(nr_pages % m))
    571			pr_info("Image saving progress: %3d%%\n",
    572				nr_pages / m * 10);
    573		nr_pages++;
    574	}
    575	err2 = hib_wait_io(&hb);
    576	hib_finish_batch(&hb);
    577	stop = ktime_get();
    578	if (!ret)
    579		ret = err2;
    580	if (!ret)
    581		pr_info("Image saving done\n");
    582	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
    583	return ret;
    584}
    585
    586/**
    587 * Structure used for CRC32.
    588 */
    589struct crc_data {
    590	struct task_struct *thr;                  /* thread */
    591	atomic_t ready;                           /* ready to start flag */
    592	atomic_t stop;                            /* ready to stop flag */
    593	unsigned run_threads;                     /* nr current threads */
    594	wait_queue_head_t go;                     /* start crc update */
    595	wait_queue_head_t done;                   /* crc update done */
    596	u32 *crc32;                               /* points to handle's crc32 */
    597	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
    598	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
    599};
    600
    601/**
    602 * CRC32 update function that runs in its own thread.
    603 */
    604static int crc32_threadfn(void *data)
    605{
    606	struct crc_data *d = data;
    607	unsigned i;
    608
    609	while (1) {
    610		wait_event(d->go, atomic_read(&d->ready) ||
    611		                  kthread_should_stop());
    612		if (kthread_should_stop()) {
    613			d->thr = NULL;
    614			atomic_set(&d->stop, 1);
    615			wake_up(&d->done);
    616			break;
    617		}
    618		atomic_set(&d->ready, 0);
    619
    620		for (i = 0; i < d->run_threads; i++)
    621			*d->crc32 = crc32_le(*d->crc32,
    622			                     d->unc[i], *d->unc_len[i]);
    623		atomic_set(&d->stop, 1);
    624		wake_up(&d->done);
    625	}
    626	return 0;
    627}
    628/**
    629 * Structure used for LZO data compression.
    630 */
    631struct cmp_data {
    632	struct task_struct *thr;                  /* thread */
    633	atomic_t ready;                           /* ready to start flag */
    634	atomic_t stop;                            /* ready to stop flag */
    635	int ret;                                  /* return code */
    636	wait_queue_head_t go;                     /* start compression */
    637	wait_queue_head_t done;                   /* compression done */
    638	size_t unc_len;                           /* uncompressed length */
    639	size_t cmp_len;                           /* compressed length */
    640	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
    641	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
    642	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
    643};
    644
    645/**
    646 * Compression function that runs in its own thread.
    647 */
    648static int lzo_compress_threadfn(void *data)
    649{
    650	struct cmp_data *d = data;
    651
    652	while (1) {
    653		wait_event(d->go, atomic_read(&d->ready) ||
    654		                  kthread_should_stop());
    655		if (kthread_should_stop()) {
    656			d->thr = NULL;
    657			d->ret = -1;
    658			atomic_set(&d->stop, 1);
    659			wake_up(&d->done);
    660			break;
    661		}
    662		atomic_set(&d->ready, 0);
    663
    664		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
    665		                          d->cmp + LZO_HEADER, &d->cmp_len,
    666		                          d->wrk);
    667		atomic_set(&d->stop, 1);
    668		wake_up(&d->done);
    669	}
    670	return 0;
    671}
    672
    673/**
    674 * save_image_lzo - Save the suspend image data compressed with LZO.
    675 * @handle: Swap map handle to use for saving the image.
    676 * @snapshot: Image to read data from.
    677 * @nr_to_write: Number of pages to save.
    678 */
    679static int save_image_lzo(struct swap_map_handle *handle,
    680                          struct snapshot_handle *snapshot,
    681                          unsigned int nr_to_write)
    682{
    683	unsigned int m;
    684	int ret = 0;
    685	int nr_pages;
    686	int err2;
    687	struct hib_bio_batch hb;
    688	ktime_t start;
    689	ktime_t stop;
    690	size_t off;
    691	unsigned thr, run_threads, nr_threads;
    692	unsigned char *page = NULL;
    693	struct cmp_data *data = NULL;
    694	struct crc_data *crc = NULL;
    695
    696	hib_init_batch(&hb);
    697
    698	/*
    699	 * We'll limit the number of threads for compression to limit memory
    700	 * footprint.
    701	 */
    702	nr_threads = num_online_cpus() - 1;
    703	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
    704
    705	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
    706	if (!page) {
    707		pr_err("Failed to allocate LZO page\n");
    708		ret = -ENOMEM;
    709		goto out_clean;
    710	}
    711
    712	data = vzalloc(array_size(nr_threads, sizeof(*data)));
    713	if (!data) {
    714		pr_err("Failed to allocate LZO data\n");
    715		ret = -ENOMEM;
    716		goto out_clean;
    717	}
    718
    719	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
    720	if (!crc) {
    721		pr_err("Failed to allocate crc\n");
    722		ret = -ENOMEM;
    723		goto out_clean;
    724	}
    725
    726	/*
    727	 * Start the compression threads.
    728	 */
    729	for (thr = 0; thr < nr_threads; thr++) {
    730		init_waitqueue_head(&data[thr].go);
    731		init_waitqueue_head(&data[thr].done);
    732
    733		data[thr].thr = kthread_run(lzo_compress_threadfn,
    734		                            &data[thr],
    735		                            "image_compress/%u", thr);
    736		if (IS_ERR(data[thr].thr)) {
    737			data[thr].thr = NULL;
    738			pr_err("Cannot start compression threads\n");
    739			ret = -ENOMEM;
    740			goto out_clean;
    741		}
    742	}
    743
    744	/*
    745	 * Start the CRC32 thread.
    746	 */
    747	init_waitqueue_head(&crc->go);
    748	init_waitqueue_head(&crc->done);
    749
    750	handle->crc32 = 0;
    751	crc->crc32 = &handle->crc32;
    752	for (thr = 0; thr < nr_threads; thr++) {
    753		crc->unc[thr] = data[thr].unc;
    754		crc->unc_len[thr] = &data[thr].unc_len;
    755	}
    756
    757	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
    758	if (IS_ERR(crc->thr)) {
    759		crc->thr = NULL;
    760		pr_err("Cannot start CRC32 thread\n");
    761		ret = -ENOMEM;
    762		goto out_clean;
    763	}
    764
    765	/*
    766	 * Adjust the number of required free pages after all allocations have
    767	 * been done. We don't want to run out of pages when writing.
    768	 */
    769	handle->reqd_free_pages = reqd_free_pages();
    770
    771	pr_info("Using %u thread(s) for compression\n", nr_threads);
    772	pr_info("Compressing and saving image data (%u pages)...\n",
    773		nr_to_write);
    774	m = nr_to_write / 10;
    775	if (!m)
    776		m = 1;
    777	nr_pages = 0;
    778	start = ktime_get();
    779	for (;;) {
    780		for (thr = 0; thr < nr_threads; thr++) {
    781			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
    782				ret = snapshot_read_next(snapshot);
    783				if (ret < 0)
    784					goto out_finish;
    785
    786				if (!ret)
    787					break;
    788
    789				memcpy(data[thr].unc + off,
    790				       data_of(*snapshot), PAGE_SIZE);
    791
    792				if (!(nr_pages % m))
    793					pr_info("Image saving progress: %3d%%\n",
    794						nr_pages / m * 10);
    795				nr_pages++;
    796			}
    797			if (!off)
    798				break;
    799
    800			data[thr].unc_len = off;
    801
    802			atomic_set(&data[thr].ready, 1);
    803			wake_up(&data[thr].go);
    804		}
    805
    806		if (!thr)
    807			break;
    808
    809		crc->run_threads = thr;
    810		atomic_set(&crc->ready, 1);
    811		wake_up(&crc->go);
    812
    813		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
    814			wait_event(data[thr].done,
    815			           atomic_read(&data[thr].stop));
    816			atomic_set(&data[thr].stop, 0);
    817
    818			ret = data[thr].ret;
    819
    820			if (ret < 0) {
    821				pr_err("LZO compression failed\n");
    822				goto out_finish;
    823			}
    824
    825			if (unlikely(!data[thr].cmp_len ||
    826			             data[thr].cmp_len >
    827			             lzo1x_worst_compress(data[thr].unc_len))) {
    828				pr_err("Invalid LZO compressed length\n");
    829				ret = -1;
    830				goto out_finish;
    831			}
    832
    833			*(size_t *)data[thr].cmp = data[thr].cmp_len;
    834
    835			/*
    836			 * Given we are writing one page at a time to disk, we
    837			 * copy that much from the buffer, although the last
    838			 * bit will likely be smaller than full page. This is
    839			 * OK - we saved the length of the compressed data, so
    840			 * any garbage at the end will be discarded when we
    841			 * read it.
    842			 */
    843			for (off = 0;
    844			     off < LZO_HEADER + data[thr].cmp_len;
    845			     off += PAGE_SIZE) {
    846				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
    847
    848				ret = swap_write_page(handle, page, &hb);
    849				if (ret)
    850					goto out_finish;
    851			}
    852		}
    853
    854		wait_event(crc->done, atomic_read(&crc->stop));
    855		atomic_set(&crc->stop, 0);
    856	}
    857
    858out_finish:
    859	err2 = hib_wait_io(&hb);
    860	stop = ktime_get();
    861	if (!ret)
    862		ret = err2;
    863	if (!ret)
    864		pr_info("Image saving done\n");
    865	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
    866out_clean:
    867	hib_finish_batch(&hb);
    868	if (crc) {
    869		if (crc->thr)
    870			kthread_stop(crc->thr);
    871		kfree(crc);
    872	}
    873	if (data) {
    874		for (thr = 0; thr < nr_threads; thr++)
    875			if (data[thr].thr)
    876				kthread_stop(data[thr].thr);
    877		vfree(data);
    878	}
    879	if (page) free_page((unsigned long)page);
    880
    881	return ret;
    882}
    883
    884/**
    885 *	enough_swap - Make sure we have enough swap to save the image.
    886 *
    887 *	Returns TRUE or FALSE after checking the total amount of swap
    888 *	space available from the resume partition.
    889 */
    890
    891static int enough_swap(unsigned int nr_pages)
    892{
    893	unsigned int free_swap = count_swap_pages(root_swap, 1);
    894	unsigned int required;
    895
    896	pr_debug("Free swap pages: %u\n", free_swap);
    897
    898	required = PAGES_FOR_IO + nr_pages;
    899	return free_swap > required;
    900}
    901
    902/**
    903 *	swsusp_write - Write entire image and metadata.
    904 *	@flags: flags to pass to the "boot" kernel in the image header
    905 *
    906 *	It is important _NOT_ to umount filesystems at this point. We want
    907 *	them synced (in case something goes wrong) but we DO not want to mark
    908 *	filesystem clean: it is not. (And it does not matter, if we resume
    909 *	correctly, we'll mark system clean, anyway.)
    910 */
    911
    912int swsusp_write(unsigned int flags)
    913{
    914	struct swap_map_handle handle;
    915	struct snapshot_handle snapshot;
    916	struct swsusp_info *header;
    917	unsigned long pages;
    918	int error;
    919
    920	pages = snapshot_get_image_size();
    921	error = get_swap_writer(&handle);
    922	if (error) {
    923		pr_err("Cannot get swap writer\n");
    924		return error;
    925	}
    926	if (flags & SF_NOCOMPRESS_MODE) {
    927		if (!enough_swap(pages)) {
    928			pr_err("Not enough free swap\n");
    929			error = -ENOSPC;
    930			goto out_finish;
    931		}
    932	}
    933	memset(&snapshot, 0, sizeof(struct snapshot_handle));
    934	error = snapshot_read_next(&snapshot);
    935	if (error < (int)PAGE_SIZE) {
    936		if (error >= 0)
    937			error = -EFAULT;
    938
    939		goto out_finish;
    940	}
    941	header = (struct swsusp_info *)data_of(snapshot);
    942	error = swap_write_page(&handle, header, NULL);
    943	if (!error) {
    944		error = (flags & SF_NOCOMPRESS_MODE) ?
    945			save_image(&handle, &snapshot, pages - 1) :
    946			save_image_lzo(&handle, &snapshot, pages - 1);
    947	}
    948out_finish:
    949	error = swap_writer_finish(&handle, flags, error);
    950	return error;
    951}
    952
    953/**
    954 *	The following functions allow us to read data using a swap map
    955 *	in a file-alike way
    956 */
    957
    958static void release_swap_reader(struct swap_map_handle *handle)
    959{
    960	struct swap_map_page_list *tmp;
    961
    962	while (handle->maps) {
    963		if (handle->maps->map)
    964			free_page((unsigned long)handle->maps->map);
    965		tmp = handle->maps;
    966		handle->maps = handle->maps->next;
    967		kfree(tmp);
    968	}
    969	handle->cur = NULL;
    970}
    971
    972static int get_swap_reader(struct swap_map_handle *handle,
    973		unsigned int *flags_p)
    974{
    975	int error;
    976	struct swap_map_page_list *tmp, *last;
    977	sector_t offset;
    978
    979	*flags_p = swsusp_header->flags;
    980
    981	if (!swsusp_header->image) /* how can this happen? */
    982		return -EINVAL;
    983
    984	handle->cur = NULL;
    985	last = handle->maps = NULL;
    986	offset = swsusp_header->image;
    987	while (offset) {
    988		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
    989		if (!tmp) {
    990			release_swap_reader(handle);
    991			return -ENOMEM;
    992		}
    993		if (!handle->maps)
    994			handle->maps = tmp;
    995		if (last)
    996			last->next = tmp;
    997		last = tmp;
    998
    999		tmp->map = (struct swap_map_page *)
   1000			   __get_free_page(GFP_NOIO | __GFP_HIGH);
   1001		if (!tmp->map) {
   1002			release_swap_reader(handle);
   1003			return -ENOMEM;
   1004		}
   1005
   1006		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
   1007		if (error) {
   1008			release_swap_reader(handle);
   1009			return error;
   1010		}
   1011		offset = tmp->map->next_swap;
   1012	}
   1013	handle->k = 0;
   1014	handle->cur = handle->maps->map;
   1015	return 0;
   1016}
   1017
   1018static int swap_read_page(struct swap_map_handle *handle, void *buf,
   1019		struct hib_bio_batch *hb)
   1020{
   1021	sector_t offset;
   1022	int error;
   1023	struct swap_map_page_list *tmp;
   1024
   1025	if (!handle->cur)
   1026		return -EINVAL;
   1027	offset = handle->cur->entries[handle->k];
   1028	if (!offset)
   1029		return -EFAULT;
   1030	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
   1031	if (error)
   1032		return error;
   1033	if (++handle->k >= MAP_PAGE_ENTRIES) {
   1034		handle->k = 0;
   1035		free_page((unsigned long)handle->maps->map);
   1036		tmp = handle->maps;
   1037		handle->maps = handle->maps->next;
   1038		kfree(tmp);
   1039		if (!handle->maps)
   1040			release_swap_reader(handle);
   1041		else
   1042			handle->cur = handle->maps->map;
   1043	}
   1044	return error;
   1045}
   1046
   1047static int swap_reader_finish(struct swap_map_handle *handle)
   1048{
   1049	release_swap_reader(handle);
   1050
   1051	return 0;
   1052}
   1053
   1054/**
   1055 *	load_image - load the image using the swap map handle
   1056 *	@handle and the snapshot handle @snapshot
   1057 *	(assume there are @nr_pages pages to load)
   1058 */
   1059
   1060static int load_image(struct swap_map_handle *handle,
   1061                      struct snapshot_handle *snapshot,
   1062                      unsigned int nr_to_read)
   1063{
   1064	unsigned int m;
   1065	int ret = 0;
   1066	ktime_t start;
   1067	ktime_t stop;
   1068	struct hib_bio_batch hb;
   1069	int err2;
   1070	unsigned nr_pages;
   1071
   1072	hib_init_batch(&hb);
   1073
   1074	clean_pages_on_read = true;
   1075	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
   1076	m = nr_to_read / 10;
   1077	if (!m)
   1078		m = 1;
   1079	nr_pages = 0;
   1080	start = ktime_get();
   1081	for ( ; ; ) {
   1082		ret = snapshot_write_next(snapshot);
   1083		if (ret <= 0)
   1084			break;
   1085		ret = swap_read_page(handle, data_of(*snapshot), &hb);
   1086		if (ret)
   1087			break;
   1088		if (snapshot->sync_read)
   1089			ret = hib_wait_io(&hb);
   1090		if (ret)
   1091			break;
   1092		if (!(nr_pages % m))
   1093			pr_info("Image loading progress: %3d%%\n",
   1094				nr_pages / m * 10);
   1095		nr_pages++;
   1096	}
   1097	err2 = hib_wait_io(&hb);
   1098	hib_finish_batch(&hb);
   1099	stop = ktime_get();
   1100	if (!ret)
   1101		ret = err2;
   1102	if (!ret) {
   1103		pr_info("Image loading done\n");
   1104		snapshot_write_finalize(snapshot);
   1105		if (!snapshot_image_loaded(snapshot))
   1106			ret = -ENODATA;
   1107	}
   1108	swsusp_show_speed(start, stop, nr_to_read, "Read");
   1109	return ret;
   1110}
   1111
   1112/**
   1113 * Structure used for LZO data decompression.
   1114 */
   1115struct dec_data {
   1116	struct task_struct *thr;                  /* thread */
   1117	atomic_t ready;                           /* ready to start flag */
   1118	atomic_t stop;                            /* ready to stop flag */
   1119	int ret;                                  /* return code */
   1120	wait_queue_head_t go;                     /* start decompression */
   1121	wait_queue_head_t done;                   /* decompression done */
   1122	size_t unc_len;                           /* uncompressed length */
   1123	size_t cmp_len;                           /* compressed length */
   1124	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
   1125	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
   1126};
   1127
   1128/**
   1129 * Decompression function that runs in its own thread.
   1130 */
   1131static int lzo_decompress_threadfn(void *data)
   1132{
   1133	struct dec_data *d = data;
   1134
   1135	while (1) {
   1136		wait_event(d->go, atomic_read(&d->ready) ||
   1137		                  kthread_should_stop());
   1138		if (kthread_should_stop()) {
   1139			d->thr = NULL;
   1140			d->ret = -1;
   1141			atomic_set(&d->stop, 1);
   1142			wake_up(&d->done);
   1143			break;
   1144		}
   1145		atomic_set(&d->ready, 0);
   1146
   1147		d->unc_len = LZO_UNC_SIZE;
   1148		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
   1149		                               d->unc, &d->unc_len);
   1150		if (clean_pages_on_decompress)
   1151			flush_icache_range((unsigned long)d->unc,
   1152					   (unsigned long)d->unc + d->unc_len);
   1153
   1154		atomic_set(&d->stop, 1);
   1155		wake_up(&d->done);
   1156	}
   1157	return 0;
   1158}
   1159
   1160/**
   1161 * load_image_lzo - Load compressed image data and decompress them with LZO.
   1162 * @handle: Swap map handle to use for loading data.
   1163 * @snapshot: Image to copy uncompressed data into.
   1164 * @nr_to_read: Number of pages to load.
   1165 */
   1166static int load_image_lzo(struct swap_map_handle *handle,
   1167                          struct snapshot_handle *snapshot,
   1168                          unsigned int nr_to_read)
   1169{
   1170	unsigned int m;
   1171	int ret = 0;
   1172	int eof = 0;
   1173	struct hib_bio_batch hb;
   1174	ktime_t start;
   1175	ktime_t stop;
   1176	unsigned nr_pages;
   1177	size_t off;
   1178	unsigned i, thr, run_threads, nr_threads;
   1179	unsigned ring = 0, pg = 0, ring_size = 0,
   1180	         have = 0, want, need, asked = 0;
   1181	unsigned long read_pages = 0;
   1182	unsigned char **page = NULL;
   1183	struct dec_data *data = NULL;
   1184	struct crc_data *crc = NULL;
   1185
   1186	hib_init_batch(&hb);
   1187
   1188	/*
   1189	 * We'll limit the number of threads for decompression to limit memory
   1190	 * footprint.
   1191	 */
   1192	nr_threads = num_online_cpus() - 1;
   1193	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
   1194
   1195	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
   1196	if (!page) {
   1197		pr_err("Failed to allocate LZO page\n");
   1198		ret = -ENOMEM;
   1199		goto out_clean;
   1200	}
   1201
   1202	data = vzalloc(array_size(nr_threads, sizeof(*data)));
   1203	if (!data) {
   1204		pr_err("Failed to allocate LZO data\n");
   1205		ret = -ENOMEM;
   1206		goto out_clean;
   1207	}
   1208
   1209	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
   1210	if (!crc) {
   1211		pr_err("Failed to allocate crc\n");
   1212		ret = -ENOMEM;
   1213		goto out_clean;
   1214	}
   1215
   1216	clean_pages_on_decompress = true;
   1217
   1218	/*
   1219	 * Start the decompression threads.
   1220	 */
   1221	for (thr = 0; thr < nr_threads; thr++) {
   1222		init_waitqueue_head(&data[thr].go);
   1223		init_waitqueue_head(&data[thr].done);
   1224
   1225		data[thr].thr = kthread_run(lzo_decompress_threadfn,
   1226		                            &data[thr],
   1227		                            "image_decompress/%u", thr);
   1228		if (IS_ERR(data[thr].thr)) {
   1229			data[thr].thr = NULL;
   1230			pr_err("Cannot start decompression threads\n");
   1231			ret = -ENOMEM;
   1232			goto out_clean;
   1233		}
   1234	}
   1235
   1236	/*
   1237	 * Start the CRC32 thread.
   1238	 */
   1239	init_waitqueue_head(&crc->go);
   1240	init_waitqueue_head(&crc->done);
   1241
   1242	handle->crc32 = 0;
   1243	crc->crc32 = &handle->crc32;
   1244	for (thr = 0; thr < nr_threads; thr++) {
   1245		crc->unc[thr] = data[thr].unc;
   1246		crc->unc_len[thr] = &data[thr].unc_len;
   1247	}
   1248
   1249	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
   1250	if (IS_ERR(crc->thr)) {
   1251		crc->thr = NULL;
   1252		pr_err("Cannot start CRC32 thread\n");
   1253		ret = -ENOMEM;
   1254		goto out_clean;
   1255	}
   1256
   1257	/*
   1258	 * Set the number of pages for read buffering.
   1259	 * This is complete guesswork, because we'll only know the real
   1260	 * picture once prepare_image() is called, which is much later on
   1261	 * during the image load phase. We'll assume the worst case and
   1262	 * say that none of the image pages are from high memory.
   1263	 */
   1264	if (low_free_pages() > snapshot_get_image_size())
   1265		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
   1266	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
   1267
   1268	for (i = 0; i < read_pages; i++) {
   1269		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
   1270						  GFP_NOIO | __GFP_HIGH :
   1271						  GFP_NOIO | __GFP_NOWARN |
   1272						  __GFP_NORETRY);
   1273
   1274		if (!page[i]) {
   1275			if (i < LZO_CMP_PAGES) {
   1276				ring_size = i;
   1277				pr_err("Failed to allocate LZO pages\n");
   1278				ret = -ENOMEM;
   1279				goto out_clean;
   1280			} else {
   1281				break;
   1282			}
   1283		}
   1284	}
   1285	want = ring_size = i;
   1286
   1287	pr_info("Using %u thread(s) for decompression\n", nr_threads);
   1288	pr_info("Loading and decompressing image data (%u pages)...\n",
   1289		nr_to_read);
   1290	m = nr_to_read / 10;
   1291	if (!m)
   1292		m = 1;
   1293	nr_pages = 0;
   1294	start = ktime_get();
   1295
   1296	ret = snapshot_write_next(snapshot);
   1297	if (ret <= 0)
   1298		goto out_finish;
   1299
   1300	for(;;) {
   1301		for (i = 0; !eof && i < want; i++) {
   1302			ret = swap_read_page(handle, page[ring], &hb);
   1303			if (ret) {
   1304				/*
   1305				 * On real read error, finish. On end of data,
   1306				 * set EOF flag and just exit the read loop.
   1307				 */
   1308				if (handle->cur &&
   1309				    handle->cur->entries[handle->k]) {
   1310					goto out_finish;
   1311				} else {
   1312					eof = 1;
   1313					break;
   1314				}
   1315			}
   1316			if (++ring >= ring_size)
   1317				ring = 0;
   1318		}
   1319		asked += i;
   1320		want -= i;
   1321
   1322		/*
   1323		 * We are out of data, wait for some more.
   1324		 */
   1325		if (!have) {
   1326			if (!asked)
   1327				break;
   1328
   1329			ret = hib_wait_io(&hb);
   1330			if (ret)
   1331				goto out_finish;
   1332			have += asked;
   1333			asked = 0;
   1334			if (eof)
   1335				eof = 2;
   1336		}
   1337
   1338		if (crc->run_threads) {
   1339			wait_event(crc->done, atomic_read(&crc->stop));
   1340			atomic_set(&crc->stop, 0);
   1341			crc->run_threads = 0;
   1342		}
   1343
   1344		for (thr = 0; have && thr < nr_threads; thr++) {
   1345			data[thr].cmp_len = *(size_t *)page[pg];
   1346			if (unlikely(!data[thr].cmp_len ||
   1347			             data[thr].cmp_len >
   1348			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
   1349				pr_err("Invalid LZO compressed length\n");
   1350				ret = -1;
   1351				goto out_finish;
   1352			}
   1353
   1354			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
   1355			                    PAGE_SIZE);
   1356			if (need > have) {
   1357				if (eof > 1) {
   1358					ret = -1;
   1359					goto out_finish;
   1360				}
   1361				break;
   1362			}
   1363
   1364			for (off = 0;
   1365			     off < LZO_HEADER + data[thr].cmp_len;
   1366			     off += PAGE_SIZE) {
   1367				memcpy(data[thr].cmp + off,
   1368				       page[pg], PAGE_SIZE);
   1369				have--;
   1370				want++;
   1371				if (++pg >= ring_size)
   1372					pg = 0;
   1373			}
   1374
   1375			atomic_set(&data[thr].ready, 1);
   1376			wake_up(&data[thr].go);
   1377		}
   1378
   1379		/*
   1380		 * Wait for more data while we are decompressing.
   1381		 */
   1382		if (have < LZO_CMP_PAGES && asked) {
   1383			ret = hib_wait_io(&hb);
   1384			if (ret)
   1385				goto out_finish;
   1386			have += asked;
   1387			asked = 0;
   1388			if (eof)
   1389				eof = 2;
   1390		}
   1391
   1392		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
   1393			wait_event(data[thr].done,
   1394			           atomic_read(&data[thr].stop));
   1395			atomic_set(&data[thr].stop, 0);
   1396
   1397			ret = data[thr].ret;
   1398
   1399			if (ret < 0) {
   1400				pr_err("LZO decompression failed\n");
   1401				goto out_finish;
   1402			}
   1403
   1404			if (unlikely(!data[thr].unc_len ||
   1405			             data[thr].unc_len > LZO_UNC_SIZE ||
   1406			             data[thr].unc_len & (PAGE_SIZE - 1))) {
   1407				pr_err("Invalid LZO uncompressed length\n");
   1408				ret = -1;
   1409				goto out_finish;
   1410			}
   1411
   1412			for (off = 0;
   1413			     off < data[thr].unc_len; off += PAGE_SIZE) {
   1414				memcpy(data_of(*snapshot),
   1415				       data[thr].unc + off, PAGE_SIZE);
   1416
   1417				if (!(nr_pages % m))
   1418					pr_info("Image loading progress: %3d%%\n",
   1419						nr_pages / m * 10);
   1420				nr_pages++;
   1421
   1422				ret = snapshot_write_next(snapshot);
   1423				if (ret <= 0) {
   1424					crc->run_threads = thr + 1;
   1425					atomic_set(&crc->ready, 1);
   1426					wake_up(&crc->go);
   1427					goto out_finish;
   1428				}
   1429			}
   1430		}
   1431
   1432		crc->run_threads = thr;
   1433		atomic_set(&crc->ready, 1);
   1434		wake_up(&crc->go);
   1435	}
   1436
   1437out_finish:
   1438	if (crc->run_threads) {
   1439		wait_event(crc->done, atomic_read(&crc->stop));
   1440		atomic_set(&crc->stop, 0);
   1441	}
   1442	stop = ktime_get();
   1443	if (!ret) {
   1444		pr_info("Image loading done\n");
   1445		snapshot_write_finalize(snapshot);
   1446		if (!snapshot_image_loaded(snapshot))
   1447			ret = -ENODATA;
   1448		if (!ret) {
   1449			if (swsusp_header->flags & SF_CRC32_MODE) {
   1450				if(handle->crc32 != swsusp_header->crc32) {
   1451					pr_err("Invalid image CRC32!\n");
   1452					ret = -ENODATA;
   1453				}
   1454			}
   1455		}
   1456	}
   1457	swsusp_show_speed(start, stop, nr_to_read, "Read");
   1458out_clean:
   1459	hib_finish_batch(&hb);
   1460	for (i = 0; i < ring_size; i++)
   1461		free_page((unsigned long)page[i]);
   1462	if (crc) {
   1463		if (crc->thr)
   1464			kthread_stop(crc->thr);
   1465		kfree(crc);
   1466	}
   1467	if (data) {
   1468		for (thr = 0; thr < nr_threads; thr++)
   1469			if (data[thr].thr)
   1470				kthread_stop(data[thr].thr);
   1471		vfree(data);
   1472	}
   1473	vfree(page);
   1474
   1475	return ret;
   1476}
   1477
   1478/**
   1479 *	swsusp_read - read the hibernation image.
   1480 *	@flags_p: flags passed by the "frozen" kernel in the image header should
   1481 *		  be written into this memory location
   1482 */
   1483
   1484int swsusp_read(unsigned int *flags_p)
   1485{
   1486	int error;
   1487	struct swap_map_handle handle;
   1488	struct snapshot_handle snapshot;
   1489	struct swsusp_info *header;
   1490
   1491	memset(&snapshot, 0, sizeof(struct snapshot_handle));
   1492	error = snapshot_write_next(&snapshot);
   1493	if (error < (int)PAGE_SIZE)
   1494		return error < 0 ? error : -EFAULT;
   1495	header = (struct swsusp_info *)data_of(snapshot);
   1496	error = get_swap_reader(&handle, flags_p);
   1497	if (error)
   1498		goto end;
   1499	if (!error)
   1500		error = swap_read_page(&handle, header, NULL);
   1501	if (!error) {
   1502		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
   1503			load_image(&handle, &snapshot, header->pages - 1) :
   1504			load_image_lzo(&handle, &snapshot, header->pages - 1);
   1505	}
   1506	swap_reader_finish(&handle);
   1507end:
   1508	if (!error)
   1509		pr_debug("Image successfully loaded\n");
   1510	else
   1511		pr_debug("Error %d resuming\n", error);
   1512	return error;
   1513}
   1514
   1515/**
   1516 *      swsusp_check - Check for swsusp signature in the resume device
   1517 */
   1518
   1519int swsusp_check(void)
   1520{
   1521	int error;
   1522	void *holder;
   1523
   1524	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
   1525					    FMODE_READ | FMODE_EXCL, &holder);
   1526	if (!IS_ERR(hib_resume_bdev)) {
   1527		set_blocksize(hib_resume_bdev, PAGE_SIZE);
   1528		clear_page(swsusp_header);
   1529		error = hib_submit_io(REQ_OP_READ, 0,
   1530					swsusp_resume_block,
   1531					swsusp_header, NULL);
   1532		if (error)
   1533			goto put;
   1534
   1535		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
   1536			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
   1537			/* Reset swap signature now */
   1538			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
   1539						swsusp_resume_block,
   1540						swsusp_header, NULL);
   1541		} else {
   1542			error = -EINVAL;
   1543		}
   1544		if (!error && swsusp_header->flags & SF_HW_SIG &&
   1545		    swsusp_header->hw_sig != swsusp_hardware_signature) {
   1546			pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
   1547				swsusp_header->hw_sig, swsusp_hardware_signature);
   1548			error = -EINVAL;
   1549		}
   1550
   1551put:
   1552		if (error)
   1553			blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
   1554		else
   1555			pr_debug("Image signature found, resuming\n");
   1556	} else {
   1557		error = PTR_ERR(hib_resume_bdev);
   1558	}
   1559
   1560	if (error)
   1561		pr_debug("Image not found (code %d)\n", error);
   1562
   1563	return error;
   1564}
   1565
   1566/**
   1567 *	swsusp_close - close swap device.
   1568 */
   1569
   1570void swsusp_close(fmode_t mode)
   1571{
   1572	if (IS_ERR(hib_resume_bdev)) {
   1573		pr_debug("Image device not initialised\n");
   1574		return;
   1575	}
   1576
   1577	blkdev_put(hib_resume_bdev, mode);
   1578}
   1579
   1580/**
   1581 *      swsusp_unmark - Unmark swsusp signature in the resume device
   1582 */
   1583
   1584#ifdef CONFIG_SUSPEND
   1585int swsusp_unmark(void)
   1586{
   1587	int error;
   1588
   1589	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
   1590		      swsusp_header, NULL);
   1591	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
   1592		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
   1593		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
   1594					swsusp_resume_block,
   1595					swsusp_header, NULL);
   1596	} else {
   1597		pr_err("Cannot find swsusp signature!\n");
   1598		error = -ENODEV;
   1599	}
   1600
   1601	/*
   1602	 * We just returned from suspend, we don't need the image any more.
   1603	 */
   1604	free_all_swap_pages(root_swap);
   1605
   1606	return error;
   1607}
   1608#endif
   1609
   1610static int __init swsusp_header_init(void)
   1611{
   1612	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
   1613	if (!swsusp_header)
   1614		panic("Could not allocate memory for swsusp_header\n");
   1615	return 0;
   1616}
   1617
   1618core_initcall(swsusp_header_init);