cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

zone.c (36255B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Provide a pstore intermediate backend, organized into kernel memory
      4 * allocated zones that are then mapped and flushed into a single
      5 * contiguous region on a storage backend of some kind (block, mtd, etc).
      6 */
      7
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/slab.h>
     13#include <linux/mount.h>
     14#include <linux/printk.h>
     15#include <linux/fs.h>
     16#include <linux/pstore_zone.h>
     17#include <linux/kdev_t.h>
     18#include <linux/device.h>
     19#include <linux/namei.h>
     20#include <linux/fcntl.h>
     21#include <linux/uio.h>
     22#include <linux/writeback.h>
     23#include "internal.h"
     24
     25/**
     26 * struct psz_buffer - header of zone to flush to storage
     27 *
     28 * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
     29 * @datalen: length of data in @data
     30 * @start: offset into @data where the beginning of the stored bytes begin
     31 * @data: zone data.
     32 */
     33struct psz_buffer {
     34#define PSZ_SIG (0x43474244) /* DBGC */
     35	uint32_t sig;
     36	atomic_t datalen;
     37	atomic_t start;
     38	uint8_t data[];
     39};
     40
     41/**
     42 * struct psz_kmsg_header - kmsg dump-specific header to flush to storage
     43 *
     44 * @magic: magic num for kmsg dump header
     45 * @time: kmsg dump trigger time
     46 * @compressed: whether conpressed
     47 * @counter: kmsg dump counter
     48 * @reason: the kmsg dump reason (e.g. oops, panic, etc)
     49 * @data: pointer to log data
     50 *
     51 * This is a sub-header for a kmsg dump, trailing after &psz_buffer.
     52 */
     53struct psz_kmsg_header {
     54#define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */
     55	uint32_t magic;
     56	struct timespec64 time;
     57	bool compressed;
     58	uint32_t counter;
     59	enum kmsg_dump_reason reason;
     60	uint8_t data[];
     61};
     62
     63/**
     64 * struct pstore_zone - single stored buffer
     65 *
     66 * @off: zone offset of storage
     67 * @type: front-end type for this zone
     68 * @name: front-end name for this zone
     69 * @buffer: pointer to data buffer managed by this zone
     70 * @oldbuf: pointer to old data buffer
     71 * @buffer_size: bytes in @buffer->data
     72 * @should_recover: whether this zone should recover from storage
     73 * @dirty: whether the data in @buffer dirty
     74 *
     75 * zone structure in memory.
     76 */
     77struct pstore_zone {
     78	loff_t off;
     79	const char *name;
     80	enum pstore_type_id type;
     81
     82	struct psz_buffer *buffer;
     83	struct psz_buffer *oldbuf;
     84	size_t buffer_size;
     85	bool should_recover;
     86	atomic_t dirty;
     87};
     88
     89/**
     90 * struct psz_context - all about running state of pstore/zone
     91 *
     92 * @kpszs: kmsg dump storage zones
     93 * @ppsz: pmsg storage zone
     94 * @cpsz: console storage zone
     95 * @fpszs: ftrace storage zones
     96 * @kmsg_max_cnt: max count of @kpszs
     97 * @kmsg_read_cnt: counter of total read kmsg dumps
     98 * @kmsg_write_cnt: counter of total kmsg dump writes
     99 * @pmsg_read_cnt: counter of total read pmsg zone
    100 * @console_read_cnt: counter of total read console zone
    101 * @ftrace_max_cnt: max count of @fpszs
    102 * @ftrace_read_cnt: counter of max read ftrace zone
    103 * @oops_counter: counter of oops dumps
    104 * @panic_counter: counter of panic dumps
    105 * @recovered: whether finished recovering data from storage
    106 * @on_panic: whether panic is happening
    107 * @pstore_zone_info_lock: lock to @pstore_zone_info
    108 * @pstore_zone_info: information from backend
    109 * @pstore: structure for pstore
    110 */
    111struct psz_context {
    112	struct pstore_zone **kpszs;
    113	struct pstore_zone *ppsz;
    114	struct pstore_zone *cpsz;
    115	struct pstore_zone **fpszs;
    116	unsigned int kmsg_max_cnt;
    117	unsigned int kmsg_read_cnt;
    118	unsigned int kmsg_write_cnt;
    119	unsigned int pmsg_read_cnt;
    120	unsigned int console_read_cnt;
    121	unsigned int ftrace_max_cnt;
    122	unsigned int ftrace_read_cnt;
    123	/*
    124	 * These counters should be calculated during recovery.
    125	 * It records the oops/panic times after crashes rather than boots.
    126	 */
    127	unsigned int oops_counter;
    128	unsigned int panic_counter;
    129	atomic_t recovered;
    130	atomic_t on_panic;
    131
    132	/*
    133	 * pstore_zone_info_lock protects this entire structure during calls
    134	 * to register_pstore_zone()/unregister_pstore_zone().
    135	 */
    136	struct mutex pstore_zone_info_lock;
    137	struct pstore_zone_info *pstore_zone_info;
    138	struct pstore_info pstore;
    139};
    140static struct psz_context pstore_zone_cxt;
    141
    142static void psz_flush_all_dirty_zones(struct work_struct *);
    143static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones);
    144
    145/**
    146 * enum psz_flush_mode - flush mode for psz_zone_write()
    147 *
    148 * @FLUSH_NONE: do not flush to storage but update data on memory
    149 * @FLUSH_PART: just flush part of data including meta data to storage
    150 * @FLUSH_META: just flush meta data of zone to storage
    151 * @FLUSH_ALL: flush all of zone
    152 */
    153enum psz_flush_mode {
    154	FLUSH_NONE = 0,
    155	FLUSH_PART,
    156	FLUSH_META,
    157	FLUSH_ALL,
    158};
    159
    160static inline int buffer_datalen(struct pstore_zone *zone)
    161{
    162	return atomic_read(&zone->buffer->datalen);
    163}
    164
    165static inline int buffer_start(struct pstore_zone *zone)
    166{
    167	return atomic_read(&zone->buffer->start);
    168}
    169
    170static inline bool is_on_panic(void)
    171{
    172	return atomic_read(&pstore_zone_cxt.on_panic);
    173}
    174
    175static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
    176		size_t len, unsigned long off)
    177{
    178	if (!buf || !zone || !zone->buffer)
    179		return -EINVAL;
    180	if (off > zone->buffer_size)
    181		return -EINVAL;
    182	len = min_t(size_t, len, zone->buffer_size - off);
    183	memcpy(buf, zone->buffer->data + off, len);
    184	return len;
    185}
    186
    187static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
    188		size_t len, unsigned long off)
    189{
    190	if (!buf || !zone || !zone->oldbuf)
    191		return -EINVAL;
    192	if (off > zone->buffer_size)
    193		return -EINVAL;
    194	len = min_t(size_t, len, zone->buffer_size - off);
    195	memcpy(buf, zone->oldbuf->data + off, len);
    196	return 0;
    197}
    198
    199static int psz_zone_write(struct pstore_zone *zone,
    200		enum psz_flush_mode flush_mode, const char *buf,
    201		size_t len, unsigned long off)
    202{
    203	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
    204	ssize_t wcnt = 0;
    205	ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos);
    206	size_t wlen;
    207
    208	if (off > zone->buffer_size)
    209		return -EINVAL;
    210
    211	wlen = min_t(size_t, len, zone->buffer_size - off);
    212	if (buf && wlen) {
    213		memcpy(zone->buffer->data + off, buf, wlen);
    214		atomic_set(&zone->buffer->datalen, wlen + off);
    215	}
    216
    217	/* avoid to damage old records */
    218	if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered))
    219		goto dirty;
    220
    221	writeop = is_on_panic() ? info->panic_write : info->write;
    222	if (!writeop)
    223		goto dirty;
    224
    225	switch (flush_mode) {
    226	case FLUSH_NONE:
    227		if (unlikely(buf && wlen))
    228			goto dirty;
    229		return 0;
    230	case FLUSH_PART:
    231		wcnt = writeop((const char *)zone->buffer->data + off, wlen,
    232				zone->off + sizeof(*zone->buffer) + off);
    233		if (wcnt != wlen)
    234			goto dirty;
    235		fallthrough;
    236	case FLUSH_META:
    237		wlen = sizeof(struct psz_buffer);
    238		wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
    239		if (wcnt != wlen)
    240			goto dirty;
    241		break;
    242	case FLUSH_ALL:
    243		wlen = zone->buffer_size + sizeof(*zone->buffer);
    244		wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
    245		if (wcnt != wlen)
    246			goto dirty;
    247		break;
    248	}
    249
    250	return 0;
    251dirty:
    252	/* no need to mark dirty if going to try next zone */
    253	if (wcnt == -ENOMSG)
    254		return -ENOMSG;
    255	atomic_set(&zone->dirty, true);
    256	/* flush dirty zones nicely */
    257	if (wcnt == -EBUSY && !is_on_panic())
    258		schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500));
    259	return -EBUSY;
    260}
    261
    262static int psz_flush_dirty_zone(struct pstore_zone *zone)
    263{
    264	int ret;
    265
    266	if (unlikely(!zone))
    267		return -EINVAL;
    268
    269	if (unlikely(!atomic_read(&pstore_zone_cxt.recovered)))
    270		return -EBUSY;
    271
    272	if (!atomic_xchg(&zone->dirty, false))
    273		return 0;
    274
    275	ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
    276	if (ret)
    277		atomic_set(&zone->dirty, true);
    278	return ret;
    279}
    280
    281static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt)
    282{
    283	int i, ret;
    284	struct pstore_zone *zone;
    285
    286	if (!zones)
    287		return -EINVAL;
    288
    289	for (i = 0; i < cnt; i++) {
    290		zone = zones[i];
    291		if (!zone)
    292			return -EINVAL;
    293		ret = psz_flush_dirty_zone(zone);
    294		if (ret)
    295			return ret;
    296	}
    297	return 0;
    298}
    299
    300static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new)
    301{
    302	const char *data = (const char *)old->buffer->data;
    303	int ret;
    304
    305	ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0);
    306	if (ret) {
    307		atomic_set(&new->buffer->datalen, 0);
    308		atomic_set(&new->dirty, false);
    309		return ret;
    310	}
    311	atomic_set(&old->buffer->datalen, 0);
    312	return 0;
    313}
    314
    315static void psz_flush_all_dirty_zones(struct work_struct *work)
    316{
    317	struct psz_context *cxt = &pstore_zone_cxt;
    318	int ret = 0;
    319
    320	if (cxt->ppsz)
    321		ret |= psz_flush_dirty_zone(cxt->ppsz);
    322	if (cxt->cpsz)
    323		ret |= psz_flush_dirty_zone(cxt->cpsz);
    324	if (cxt->kpszs)
    325		ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt);
    326	if (cxt->fpszs)
    327		ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt);
    328	if (ret && cxt->pstore_zone_info)
    329		schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000));
    330}
    331
    332static int psz_kmsg_recover_data(struct psz_context *cxt)
    333{
    334	struct pstore_zone_info *info = cxt->pstore_zone_info;
    335	struct pstore_zone *zone = NULL;
    336	struct psz_buffer *buf;
    337	unsigned long i;
    338	ssize_t rcnt;
    339
    340	if (!info->read)
    341		return -EINVAL;
    342
    343	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
    344		zone = cxt->kpszs[i];
    345		if (unlikely(!zone))
    346			return -EINVAL;
    347		if (atomic_read(&zone->dirty)) {
    348			unsigned int wcnt = cxt->kmsg_write_cnt;
    349			struct pstore_zone *new = cxt->kpszs[wcnt];
    350			int ret;
    351
    352			ret = psz_move_zone(zone, new);
    353			if (ret) {
    354				pr_err("move zone from %lu to %d failed\n",
    355						i, wcnt);
    356				return ret;
    357			}
    358			cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt;
    359		}
    360		if (!zone->should_recover)
    361			continue;
    362		buf = zone->buffer;
    363		rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
    364				zone->off);
    365		if (rcnt != zone->buffer_size + sizeof(*buf))
    366			return (int)rcnt < 0 ? (int)rcnt : -EIO;
    367	}
    368	return 0;
    369}
    370
    371static int psz_kmsg_recover_meta(struct psz_context *cxt)
    372{
    373	struct pstore_zone_info *info = cxt->pstore_zone_info;
    374	struct pstore_zone *zone;
    375	size_t rcnt, len;
    376	struct psz_buffer *buf;
    377	struct psz_kmsg_header *hdr;
    378	struct timespec64 time = { };
    379	unsigned long i;
    380	/*
    381	 * Recover may on panic, we can't allocate any memory by kmalloc.
    382	 * So, we use local array instead.
    383	 */
    384	char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0};
    385
    386	if (!info->read)
    387		return -EINVAL;
    388
    389	len = sizeof(*buf) + sizeof(*hdr);
    390	buf = (struct psz_buffer *)buffer_header;
    391	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
    392		zone = cxt->kpszs[i];
    393		if (unlikely(!zone))
    394			return -EINVAL;
    395
    396		rcnt = info->read((char *)buf, len, zone->off);
    397		if (rcnt == -ENOMSG) {
    398			pr_debug("%s with id %lu may be broken, skip\n",
    399					zone->name, i);
    400			continue;
    401		} else if (rcnt != len) {
    402			pr_err("read %s with id %lu failed\n", zone->name, i);
    403			return (int)rcnt < 0 ? (int)rcnt : -EIO;
    404		}
    405
    406		if (buf->sig != zone->buffer->sig) {
    407			pr_debug("no valid data in kmsg dump zone %lu\n", i);
    408			continue;
    409		}
    410
    411		if (zone->buffer_size < atomic_read(&buf->datalen)) {
    412			pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n",
    413					zone->name, i, zone->off,
    414					zone->buffer_size);
    415			continue;
    416		}
    417
    418		hdr = (struct psz_kmsg_header *)buf->data;
    419		if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) {
    420			pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n",
    421					zone->name, i, zone->off,
    422					zone->buffer_size);
    423			continue;
    424		}
    425
    426		/*
    427		 * we get the newest zone, and the next one must be the oldest
    428		 * or unused zone, because we do write one by one like a circle.
    429		 */
    430		if (hdr->time.tv_sec >= time.tv_sec) {
    431			time.tv_sec = hdr->time.tv_sec;
    432			cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt;
    433		}
    434
    435		if (hdr->reason == KMSG_DUMP_OOPS)
    436			cxt->oops_counter =
    437				max(cxt->oops_counter, hdr->counter);
    438		else if (hdr->reason == KMSG_DUMP_PANIC)
    439			cxt->panic_counter =
    440				max(cxt->panic_counter, hdr->counter);
    441
    442		if (!atomic_read(&buf->datalen)) {
    443			pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
    444					zone->name, i, zone->off,
    445					zone->buffer_size,
    446					atomic_read(&buf->datalen));
    447			continue;
    448		}
    449
    450		if (!is_on_panic())
    451			zone->should_recover = true;
    452		pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
    453				zone->name, i, zone->off,
    454				zone->buffer_size, atomic_read(&buf->datalen));
    455	}
    456
    457	return 0;
    458}
    459
    460static int psz_kmsg_recover(struct psz_context *cxt)
    461{
    462	int ret;
    463
    464	if (!cxt->kpszs)
    465		return 0;
    466
    467	ret = psz_kmsg_recover_meta(cxt);
    468	if (ret)
    469		goto recover_fail;
    470
    471	ret = psz_kmsg_recover_data(cxt);
    472	if (ret)
    473		goto recover_fail;
    474
    475	return 0;
    476recover_fail:
    477	pr_debug("psz_recover_kmsg failed\n");
    478	return ret;
    479}
    480
    481static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
    482{
    483	struct pstore_zone_info *info = cxt->pstore_zone_info;
    484	struct psz_buffer *oldbuf, tmpbuf;
    485	int ret = 0;
    486	char *buf;
    487	ssize_t rcnt, len, start, off;
    488
    489	if (!zone || zone->oldbuf)
    490		return 0;
    491
    492	if (is_on_panic()) {
    493		/* save data as much as possible */
    494		psz_flush_dirty_zone(zone);
    495		return 0;
    496	}
    497
    498	if (unlikely(!info->read))
    499		return -EINVAL;
    500
    501	len = sizeof(struct psz_buffer);
    502	rcnt = info->read((char *)&tmpbuf, len, zone->off);
    503	if (rcnt != len) {
    504		pr_debug("read zone %s failed\n", zone->name);
    505		return (int)rcnt < 0 ? (int)rcnt : -EIO;
    506	}
    507
    508	if (tmpbuf.sig != zone->buffer->sig) {
    509		pr_debug("no valid data in zone %s\n", zone->name);
    510		return 0;
    511	}
    512
    513	if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
    514		zone->buffer_size < atomic_read(&tmpbuf.start)) {
    515		pr_info("found overtop zone: %s: off %lld, size %zu\n",
    516				zone->name, zone->off, zone->buffer_size);
    517		/* just keep going */
    518		return 0;
    519	}
    520
    521	if (!atomic_read(&tmpbuf.datalen)) {
    522		pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n",
    523				zone->name, zone->off, zone->buffer_size,
    524				atomic_read(&tmpbuf.datalen));
    525		return 0;
    526	}
    527
    528	pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n",
    529			zone->name, zone->off, zone->buffer_size,
    530			atomic_read(&tmpbuf.datalen));
    531
    532	len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf);
    533	oldbuf = kzalloc(len, GFP_KERNEL);
    534	if (!oldbuf)
    535		return -ENOMEM;
    536
    537	memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf));
    538	buf = (char *)oldbuf + sizeof(*oldbuf);
    539	len = atomic_read(&oldbuf->datalen);
    540	start = atomic_read(&oldbuf->start);
    541	off = zone->off + sizeof(*oldbuf);
    542
    543	/* get part of data */
    544	rcnt = info->read(buf, len - start, off + start);
    545	if (rcnt != len - start) {
    546		pr_err("read zone %s failed\n", zone->name);
    547		ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
    548		goto free_oldbuf;
    549	}
    550
    551	/* get the rest of data */
    552	rcnt = info->read(buf + len - start, start, off);
    553	if (rcnt != start) {
    554		pr_err("read zone %s failed\n", zone->name);
    555		ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
    556		goto free_oldbuf;
    557	}
    558
    559	zone->oldbuf = oldbuf;
    560	psz_flush_dirty_zone(zone);
    561	return 0;
    562
    563free_oldbuf:
    564	kfree(oldbuf);
    565	return ret;
    566}
    567
    568static int psz_recover_zones(struct psz_context *cxt,
    569		struct pstore_zone **zones, unsigned int cnt)
    570{
    571	int ret;
    572	unsigned int i;
    573	struct pstore_zone *zone;
    574
    575	if (!zones)
    576		return 0;
    577
    578	for (i = 0; i < cnt; i++) {
    579		zone = zones[i];
    580		if (unlikely(!zone))
    581			continue;
    582		ret = psz_recover_zone(cxt, zone);
    583		if (ret)
    584			goto recover_fail;
    585	}
    586
    587	return 0;
    588recover_fail:
    589	pr_debug("recover %s[%u] failed\n", zone->name, i);
    590	return ret;
    591}
    592
    593/**
    594 * psz_recovery() - recover data from storage
    595 * @cxt: the context of pstore/zone
    596 *
    597 * recovery means reading data back from storage after rebooting
    598 *
    599 * Return: 0 on success, others on failure.
    600 */
    601static inline int psz_recovery(struct psz_context *cxt)
    602{
    603	int ret;
    604
    605	if (atomic_read(&cxt->recovered))
    606		return 0;
    607
    608	ret = psz_kmsg_recover(cxt);
    609	if (ret)
    610		goto out;
    611
    612	ret = psz_recover_zone(cxt, cxt->ppsz);
    613	if (ret)
    614		goto out;
    615
    616	ret = psz_recover_zone(cxt, cxt->cpsz);
    617	if (ret)
    618		goto out;
    619
    620	ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt);
    621
    622out:
    623	if (unlikely(ret))
    624		pr_err("recover failed\n");
    625	else {
    626		pr_debug("recover end!\n");
    627		atomic_set(&cxt->recovered, 1);
    628	}
    629	return ret;
    630}
    631
    632static int psz_pstore_open(struct pstore_info *psi)
    633{
    634	struct psz_context *cxt = psi->data;
    635
    636	cxt->kmsg_read_cnt = 0;
    637	cxt->pmsg_read_cnt = 0;
    638	cxt->console_read_cnt = 0;
    639	cxt->ftrace_read_cnt = 0;
    640	return 0;
    641}
    642
    643static inline bool psz_old_ok(struct pstore_zone *zone)
    644{
    645	if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
    646		return true;
    647	return false;
    648}
    649
    650static inline bool psz_ok(struct pstore_zone *zone)
    651{
    652	if (zone && zone->buffer && buffer_datalen(zone))
    653		return true;
    654	return false;
    655}
    656
    657static inline int psz_kmsg_erase(struct psz_context *cxt,
    658		struct pstore_zone *zone, struct pstore_record *record)
    659{
    660	struct psz_buffer *buffer = zone->buffer;
    661	struct psz_kmsg_header *hdr =
    662		(struct psz_kmsg_header *)buffer->data;
    663	size_t size;
    664
    665	if (unlikely(!psz_ok(zone)))
    666		return 0;
    667
    668	/* this zone is already updated, no need to erase */
    669	if (record->count != hdr->counter)
    670		return 0;
    671
    672	size = buffer_datalen(zone) + sizeof(*zone->buffer);
    673	atomic_set(&zone->buffer->datalen, 0);
    674	if (cxt->pstore_zone_info->erase)
    675		return cxt->pstore_zone_info->erase(size, zone->off);
    676	else
    677		return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
    678}
    679
    680static inline int psz_record_erase(struct psz_context *cxt,
    681		struct pstore_zone *zone)
    682{
    683	if (unlikely(!psz_old_ok(zone)))
    684		return 0;
    685
    686	kfree(zone->oldbuf);
    687	zone->oldbuf = NULL;
    688	/*
    689	 * if there are new data in zone buffer, that means the old data
    690	 * are already invalid. It is no need to flush 0 (erase) to
    691	 * block device.
    692	 */
    693	if (!buffer_datalen(zone))
    694		return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
    695	psz_flush_dirty_zone(zone);
    696	return 0;
    697}
    698
    699static int psz_pstore_erase(struct pstore_record *record)
    700{
    701	struct psz_context *cxt = record->psi->data;
    702
    703	switch (record->type) {
    704	case PSTORE_TYPE_DMESG:
    705		if (record->id >= cxt->kmsg_max_cnt)
    706			return -EINVAL;
    707		return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record);
    708	case PSTORE_TYPE_PMSG:
    709		return psz_record_erase(cxt, cxt->ppsz);
    710	case PSTORE_TYPE_CONSOLE:
    711		return psz_record_erase(cxt, cxt->cpsz);
    712	case PSTORE_TYPE_FTRACE:
    713		if (record->id >= cxt->ftrace_max_cnt)
    714			return -EINVAL;
    715		return psz_record_erase(cxt, cxt->fpszs[record->id]);
    716	default: return -EINVAL;
    717	}
    718}
    719
    720static void psz_write_kmsg_hdr(struct pstore_zone *zone,
    721		struct pstore_record *record)
    722{
    723	struct psz_context *cxt = record->psi->data;
    724	struct psz_buffer *buffer = zone->buffer;
    725	struct psz_kmsg_header *hdr =
    726		(struct psz_kmsg_header *)buffer->data;
    727
    728	hdr->magic = PSTORE_KMSG_HEADER_MAGIC;
    729	hdr->compressed = record->compressed;
    730	hdr->time.tv_sec = record->time.tv_sec;
    731	hdr->time.tv_nsec = record->time.tv_nsec;
    732	hdr->reason = record->reason;
    733	if (hdr->reason == KMSG_DUMP_OOPS)
    734		hdr->counter = ++cxt->oops_counter;
    735	else if (hdr->reason == KMSG_DUMP_PANIC)
    736		hdr->counter = ++cxt->panic_counter;
    737	else
    738		hdr->counter = 0;
    739}
    740
    741/*
    742 * In case zone is broken, which may occur to MTD device, we try each zones,
    743 * start at cxt->kmsg_write_cnt.
    744 */
    745static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
    746		struct pstore_record *record)
    747{
    748	size_t size, hlen;
    749	struct pstore_zone *zone;
    750	unsigned int i;
    751
    752	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
    753		unsigned int zonenum, len;
    754		int ret;
    755
    756		zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt;
    757		zone = cxt->kpszs[zonenum];
    758		if (unlikely(!zone))
    759			return -ENOSPC;
    760
    761		/* avoid destroying old data, allocate a new one */
    762		len = zone->buffer_size + sizeof(*zone->buffer);
    763		zone->oldbuf = zone->buffer;
    764		zone->buffer = kzalloc(len, GFP_KERNEL);
    765		if (!zone->buffer) {
    766			zone->buffer = zone->oldbuf;
    767			return -ENOMEM;
    768		}
    769		zone->buffer->sig = zone->oldbuf->sig;
    770
    771		pr_debug("write %s to zone id %d\n", zone->name, zonenum);
    772		psz_write_kmsg_hdr(zone, record);
    773		hlen = sizeof(struct psz_kmsg_header);
    774		size = min_t(size_t, record->size, zone->buffer_size - hlen);
    775		ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
    776		if (likely(!ret || ret != -ENOMSG)) {
    777			cxt->kmsg_write_cnt = zonenum + 1;
    778			cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt;
    779			/* no need to try next zone, free last zone buffer */
    780			kfree(zone->oldbuf);
    781			zone->oldbuf = NULL;
    782			return ret;
    783		}
    784
    785		pr_debug("zone %u may be broken, try next dmesg zone\n",
    786				zonenum);
    787		kfree(zone->buffer);
    788		zone->buffer = zone->oldbuf;
    789		zone->oldbuf = NULL;
    790	}
    791
    792	return -EBUSY;
    793}
    794
    795static int notrace psz_kmsg_write(struct psz_context *cxt,
    796		struct pstore_record *record)
    797{
    798	int ret;
    799
    800	/*
    801	 * Explicitly only take the first part of any new crash.
    802	 * If our buffer is larger than kmsg_bytes, this can never happen,
    803	 * and if our buffer is smaller than kmsg_bytes, we don't want the
    804	 * report split across multiple records.
    805	 */
    806	if (record->part != 1)
    807		return -ENOSPC;
    808
    809	if (!cxt->kpszs)
    810		return -ENOSPC;
    811
    812	ret = psz_kmsg_write_record(cxt, record);
    813	if (!ret && is_on_panic()) {
    814		/* ensure all data are flushed to storage when panic */
    815		pr_debug("try to flush other dirty zones\n");
    816		psz_flush_all_dirty_zones(NULL);
    817	}
    818
    819	/* always return 0 as we had handled it on buffer */
    820	return 0;
    821}
    822
    823static int notrace psz_record_write(struct pstore_zone *zone,
    824		struct pstore_record *record)
    825{
    826	size_t start, rem;
    827	bool is_full_data = false;
    828	char *buf;
    829	int cnt;
    830
    831	if (!zone || !record)
    832		return -ENOSPC;
    833
    834	if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
    835		is_full_data = true;
    836
    837	cnt = record->size;
    838	buf = record->buf;
    839	if (unlikely(cnt > zone->buffer_size)) {
    840		buf += cnt - zone->buffer_size;
    841		cnt = zone->buffer_size;
    842	}
    843
    844	start = buffer_start(zone);
    845	rem = zone->buffer_size - start;
    846	if (unlikely(rem < cnt)) {
    847		psz_zone_write(zone, FLUSH_PART, buf, rem, start);
    848		buf += rem;
    849		cnt -= rem;
    850		start = 0;
    851		is_full_data = true;
    852	}
    853
    854	atomic_set(&zone->buffer->start, cnt + start);
    855	psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
    856
    857	/**
    858	 * psz_zone_write will set datalen as start + cnt.
    859	 * It work if actual data length lesser than buffer size.
    860	 * If data length greater than buffer size, pmsg will rewrite to
    861	 * beginning of zone, which make buffer->datalen wrongly.
    862	 * So we should reset datalen as buffer size once actual data length
    863	 * greater than buffer size.
    864	 */
    865	if (is_full_data) {
    866		atomic_set(&zone->buffer->datalen, zone->buffer_size);
    867		psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
    868	}
    869	return 0;
    870}
    871
    872static int notrace psz_pstore_write(struct pstore_record *record)
    873{
    874	struct psz_context *cxt = record->psi->data;
    875
    876	if (record->type == PSTORE_TYPE_DMESG &&
    877			record->reason == KMSG_DUMP_PANIC)
    878		atomic_set(&cxt->on_panic, 1);
    879
    880	/*
    881	 * if on panic, do not write except panic records
    882	 * Fix case that panic_write prints log which wakes up console backend.
    883	 */
    884	if (is_on_panic() && record->type != PSTORE_TYPE_DMESG)
    885		return -EBUSY;
    886
    887	switch (record->type) {
    888	case PSTORE_TYPE_DMESG:
    889		return psz_kmsg_write(cxt, record);
    890	case PSTORE_TYPE_CONSOLE:
    891		return psz_record_write(cxt->cpsz, record);
    892	case PSTORE_TYPE_PMSG:
    893		return psz_record_write(cxt->ppsz, record);
    894	case PSTORE_TYPE_FTRACE: {
    895		int zonenum = smp_processor_id();
    896
    897		if (!cxt->fpszs)
    898			return -ENOSPC;
    899		return psz_record_write(cxt->fpszs[zonenum], record);
    900	}
    901	default:
    902		return -EINVAL;
    903	}
    904}
    905
    906static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt)
    907{
    908	struct pstore_zone *zone = NULL;
    909
    910	while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) {
    911		zone = cxt->kpszs[cxt->kmsg_read_cnt++];
    912		if (psz_ok(zone))
    913			return zone;
    914	}
    915
    916	if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
    917		/*
    918		 * No need psz_old_ok(). Let psz_ftrace_read() do so for
    919		 * combination. psz_ftrace_read() should traverse over
    920		 * all zones in case of some zone without data.
    921		 */
    922		return cxt->fpszs[cxt->ftrace_read_cnt++];
    923
    924	if (cxt->pmsg_read_cnt == 0) {
    925		cxt->pmsg_read_cnt++;
    926		zone = cxt->ppsz;
    927		if (psz_old_ok(zone))
    928			return zone;
    929	}
    930
    931	if (cxt->console_read_cnt == 0) {
    932		cxt->console_read_cnt++;
    933		zone = cxt->cpsz;
    934		if (psz_old_ok(zone))
    935			return zone;
    936	}
    937
    938	return NULL;
    939}
    940
    941static int psz_kmsg_read_hdr(struct pstore_zone *zone,
    942		struct pstore_record *record)
    943{
    944	struct psz_buffer *buffer = zone->buffer;
    945	struct psz_kmsg_header *hdr =
    946		(struct psz_kmsg_header *)buffer->data;
    947
    948	if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC)
    949		return -EINVAL;
    950	record->compressed = hdr->compressed;
    951	record->time.tv_sec = hdr->time.tv_sec;
    952	record->time.tv_nsec = hdr->time.tv_nsec;
    953	record->reason = hdr->reason;
    954	record->count = hdr->counter;
    955	return 0;
    956}
    957
    958static ssize_t psz_kmsg_read(struct pstore_zone *zone,
    959		struct pstore_record *record)
    960{
    961	ssize_t size, hlen = 0;
    962
    963	size = buffer_datalen(zone);
    964	/* Clear and skip this kmsg dump record if it has no valid header */
    965	if (psz_kmsg_read_hdr(zone, record)) {
    966		atomic_set(&zone->buffer->datalen, 0);
    967		atomic_set(&zone->dirty, 0);
    968		return -ENOMSG;
    969	}
    970	size -= sizeof(struct psz_kmsg_header);
    971
    972	if (!record->compressed) {
    973		char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
    974				      kmsg_dump_reason_str(record->reason),
    975				      record->count);
    976		hlen = strlen(buf);
    977		record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
    978		if (!record->buf) {
    979			kfree(buf);
    980			return -ENOMEM;
    981		}
    982	} else {
    983		record->buf = kmalloc(size, GFP_KERNEL);
    984		if (!record->buf)
    985			return -ENOMEM;
    986	}
    987
    988	size = psz_zone_read_buffer(zone, record->buf + hlen, size,
    989			sizeof(struct psz_kmsg_header));
    990	if (unlikely(size < 0)) {
    991		kfree(record->buf);
    992		return -ENOMSG;
    993	}
    994
    995	return size + hlen;
    996}
    997
    998/* try to combine all ftrace zones */
    999static ssize_t psz_ftrace_read(struct pstore_zone *zone,
   1000		struct pstore_record *record)
   1001{
   1002	struct psz_context *cxt;
   1003	struct psz_buffer *buf;
   1004	int ret;
   1005
   1006	if (!zone || !record)
   1007		return -ENOSPC;
   1008
   1009	if (!psz_old_ok(zone))
   1010		goto out;
   1011
   1012	buf = (struct psz_buffer *)zone->oldbuf;
   1013	if (!buf)
   1014		return -ENOMSG;
   1015
   1016	ret = pstore_ftrace_combine_log(&record->buf, &record->size,
   1017			(char *)buf->data, atomic_read(&buf->datalen));
   1018	if (unlikely(ret))
   1019		return ret;
   1020
   1021out:
   1022	cxt = record->psi->data;
   1023	if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
   1024		/* then, read next ftrace zone */
   1025		return -ENOMSG;
   1026	record->id = 0;
   1027	return record->size ? record->size : -ENOMSG;
   1028}
   1029
   1030static ssize_t psz_record_read(struct pstore_zone *zone,
   1031		struct pstore_record *record)
   1032{
   1033	size_t len;
   1034	struct psz_buffer *buf;
   1035
   1036	if (!zone || !record)
   1037		return -ENOSPC;
   1038
   1039	buf = (struct psz_buffer *)zone->oldbuf;
   1040	if (!buf)
   1041		return -ENOMSG;
   1042
   1043	len = atomic_read(&buf->datalen);
   1044	record->buf = kmalloc(len, GFP_KERNEL);
   1045	if (!record->buf)
   1046		return -ENOMEM;
   1047
   1048	if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
   1049		kfree(record->buf);
   1050		return -ENOMSG;
   1051	}
   1052
   1053	return len;
   1054}
   1055
   1056static ssize_t psz_pstore_read(struct pstore_record *record)
   1057{
   1058	struct psz_context *cxt = record->psi->data;
   1059	ssize_t (*readop)(struct pstore_zone *zone,
   1060			struct pstore_record *record);
   1061	struct pstore_zone *zone;
   1062	ssize_t ret;
   1063
   1064	/* before read, we must recover from storage */
   1065	ret = psz_recovery(cxt);
   1066	if (ret)
   1067		return ret;
   1068
   1069next_zone:
   1070	zone = psz_read_next_zone(cxt);
   1071	if (!zone)
   1072		return 0;
   1073
   1074	record->type = zone->type;
   1075	switch (record->type) {
   1076	case PSTORE_TYPE_DMESG:
   1077		readop = psz_kmsg_read;
   1078		record->id = cxt->kmsg_read_cnt - 1;
   1079		break;
   1080	case PSTORE_TYPE_FTRACE:
   1081		readop = psz_ftrace_read;
   1082		break;
   1083	case PSTORE_TYPE_CONSOLE:
   1084	case PSTORE_TYPE_PMSG:
   1085		readop = psz_record_read;
   1086		break;
   1087	default:
   1088		goto next_zone;
   1089	}
   1090
   1091	ret = readop(zone, record);
   1092	if (ret == -ENOMSG)
   1093		goto next_zone;
   1094	return ret;
   1095}
   1096
   1097static struct psz_context pstore_zone_cxt = {
   1098	.pstore_zone_info_lock =
   1099		__MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock),
   1100	.recovered = ATOMIC_INIT(0),
   1101	.on_panic = ATOMIC_INIT(0),
   1102	.pstore = {
   1103		.owner = THIS_MODULE,
   1104		.open = psz_pstore_open,
   1105		.read = psz_pstore_read,
   1106		.write = psz_pstore_write,
   1107		.erase = psz_pstore_erase,
   1108	},
   1109};
   1110
   1111static void psz_free_zone(struct pstore_zone **pszone)
   1112{
   1113	struct pstore_zone *zone = *pszone;
   1114
   1115	if (!zone)
   1116		return;
   1117
   1118	kfree(zone->buffer);
   1119	kfree(zone);
   1120	*pszone = NULL;
   1121}
   1122
   1123static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt)
   1124{
   1125	struct pstore_zone **zones = *pszones;
   1126
   1127	if (!zones)
   1128		return;
   1129
   1130	while (*cnt > 0) {
   1131		(*cnt)--;
   1132		psz_free_zone(&(zones[*cnt]));
   1133	}
   1134	kfree(zones);
   1135	*pszones = NULL;
   1136}
   1137
   1138static void psz_free_all_zones(struct psz_context *cxt)
   1139{
   1140	if (cxt->kpszs)
   1141		psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt);
   1142	if (cxt->ppsz)
   1143		psz_free_zone(&cxt->ppsz);
   1144	if (cxt->cpsz)
   1145		psz_free_zone(&cxt->cpsz);
   1146	if (cxt->fpszs)
   1147		psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt);
   1148}
   1149
   1150static struct pstore_zone *psz_init_zone(enum pstore_type_id type,
   1151		loff_t *off, size_t size)
   1152{
   1153	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
   1154	struct pstore_zone *zone;
   1155	const char *name = pstore_type_to_name(type);
   1156
   1157	if (!size)
   1158		return NULL;
   1159
   1160	if (*off + size > info->total_size) {
   1161		pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n",
   1162			name, size, *off, info->total_size);
   1163		return ERR_PTR(-ENOMEM);
   1164	}
   1165
   1166	zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL);
   1167	if (!zone)
   1168		return ERR_PTR(-ENOMEM);
   1169
   1170	zone->buffer = kmalloc(size, GFP_KERNEL);
   1171	if (!zone->buffer) {
   1172		kfree(zone);
   1173		return ERR_PTR(-ENOMEM);
   1174	}
   1175	memset(zone->buffer, 0xFF, size);
   1176	zone->off = *off;
   1177	zone->name = name;
   1178	zone->type = type;
   1179	zone->buffer_size = size - sizeof(struct psz_buffer);
   1180	zone->buffer->sig = type ^ PSZ_SIG;
   1181	zone->oldbuf = NULL;
   1182	atomic_set(&zone->dirty, 0);
   1183	atomic_set(&zone->buffer->datalen, 0);
   1184	atomic_set(&zone->buffer->start, 0);
   1185
   1186	*off += size;
   1187
   1188	pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
   1189			zone->off, sizeof(*zone->buffer), zone->buffer_size);
   1190	return zone;
   1191}
   1192
   1193static struct pstore_zone **psz_init_zones(enum pstore_type_id type,
   1194	loff_t *off, size_t total_size, ssize_t record_size,
   1195	unsigned int *cnt)
   1196{
   1197	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
   1198	struct pstore_zone **zones, *zone;
   1199	const char *name = pstore_type_to_name(type);
   1200	int c, i;
   1201
   1202	*cnt = 0;
   1203	if (!total_size || !record_size)
   1204		return NULL;
   1205
   1206	if (*off + total_size > info->total_size) {
   1207		pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n",
   1208			name, total_size, *off, info->total_size);
   1209		return ERR_PTR(-ENOMEM);
   1210	}
   1211
   1212	c = total_size / record_size;
   1213	zones = kcalloc(c, sizeof(*zones), GFP_KERNEL);
   1214	if (!zones) {
   1215		pr_err("allocate for zones %s failed\n", name);
   1216		return ERR_PTR(-ENOMEM);
   1217	}
   1218	memset(zones, 0, c * sizeof(*zones));
   1219
   1220	for (i = 0; i < c; i++) {
   1221		zone = psz_init_zone(type, off, record_size);
   1222		if (!zone || IS_ERR(zone)) {
   1223			pr_err("initialize zones %s failed\n", name);
   1224			psz_free_zones(&zones, &i);
   1225			return (void *)zone;
   1226		}
   1227		zones[i] = zone;
   1228	}
   1229
   1230	*cnt = c;
   1231	return zones;
   1232}
   1233
   1234static int psz_alloc_zones(struct psz_context *cxt)
   1235{
   1236	struct pstore_zone_info *info = cxt->pstore_zone_info;
   1237	loff_t off = 0;
   1238	int err;
   1239	size_t off_size = 0;
   1240
   1241	off_size += info->pmsg_size;
   1242	cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size);
   1243	if (IS_ERR(cxt->ppsz)) {
   1244		err = PTR_ERR(cxt->ppsz);
   1245		cxt->ppsz = NULL;
   1246		goto free_out;
   1247	}
   1248
   1249	off_size += info->console_size;
   1250	cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off,
   1251			info->console_size);
   1252	if (IS_ERR(cxt->cpsz)) {
   1253		err = PTR_ERR(cxt->cpsz);
   1254		cxt->cpsz = NULL;
   1255		goto free_out;
   1256	}
   1257
   1258	off_size += info->ftrace_size;
   1259	cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off,
   1260			info->ftrace_size,
   1261			info->ftrace_size / nr_cpu_ids,
   1262			&cxt->ftrace_max_cnt);
   1263	if (IS_ERR(cxt->fpszs)) {
   1264		err = PTR_ERR(cxt->fpszs);
   1265		cxt->fpszs = NULL;
   1266		goto free_out;
   1267	}
   1268
   1269	cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off,
   1270			info->total_size - off_size,
   1271			info->kmsg_size, &cxt->kmsg_max_cnt);
   1272	if (IS_ERR(cxt->kpszs)) {
   1273		err = PTR_ERR(cxt->kpszs);
   1274		cxt->kpszs = NULL;
   1275		goto free_out;
   1276	}
   1277
   1278	return 0;
   1279free_out:
   1280	psz_free_all_zones(cxt);
   1281	return err;
   1282}
   1283
   1284/**
   1285 * register_pstore_zone() - register to pstore/zone
   1286 *
   1287 * @info: back-end driver information. See &struct pstore_zone_info.
   1288 *
   1289 * Only one back-end at one time.
   1290 *
   1291 * Return: 0 on success, others on failure.
   1292 */
   1293int register_pstore_zone(struct pstore_zone_info *info)
   1294{
   1295	int err = -EINVAL;
   1296	struct psz_context *cxt = &pstore_zone_cxt;
   1297
   1298	if (info->total_size < 4096) {
   1299		pr_warn("total_size must be >= 4096\n");
   1300		return -EINVAL;
   1301	}
   1302	if (info->total_size > SZ_128M) {
   1303		pr_warn("capping size to 128MiB\n");
   1304		info->total_size = SZ_128M;
   1305	}
   1306
   1307	if (!info->kmsg_size && !info->pmsg_size && !info->console_size &&
   1308	    !info->ftrace_size) {
   1309		pr_warn("at least one record size must be non-zero\n");
   1310		return -EINVAL;
   1311	}
   1312
   1313	if (!info->name || !info->name[0])
   1314		return -EINVAL;
   1315
   1316#define check_size(name, size) {					\
   1317		if (info->name > 0 && info->name < (size)) {		\
   1318			pr_err(#name " must be over %d\n", (size));	\
   1319			return -EINVAL;					\
   1320		}							\
   1321		if (info->name & (size - 1)) {				\
   1322			pr_err(#name " must be a multiple of %d\n",	\
   1323					(size));			\
   1324			return -EINVAL;					\
   1325		}							\
   1326	}
   1327
   1328	check_size(total_size, 4096);
   1329	check_size(kmsg_size, SECTOR_SIZE);
   1330	check_size(pmsg_size, SECTOR_SIZE);
   1331	check_size(console_size, SECTOR_SIZE);
   1332	check_size(ftrace_size, SECTOR_SIZE);
   1333
   1334#undef check_size
   1335
   1336	/*
   1337	 * the @read and @write must be applied.
   1338	 * if no @read, pstore may mount failed.
   1339	 * if no @write, pstore do not support to remove record file.
   1340	 */
   1341	if (!info->read || !info->write) {
   1342		pr_err("no valid general read/write interface\n");
   1343		return -EINVAL;
   1344	}
   1345
   1346	mutex_lock(&cxt->pstore_zone_info_lock);
   1347	if (cxt->pstore_zone_info) {
   1348		pr_warn("'%s' already loaded: ignoring '%s'\n",
   1349				cxt->pstore_zone_info->name, info->name);
   1350		mutex_unlock(&cxt->pstore_zone_info_lock);
   1351		return -EBUSY;
   1352	}
   1353	cxt->pstore_zone_info = info;
   1354
   1355	pr_debug("register %s with properties:\n", info->name);
   1356	pr_debug("\ttotal size : %ld Bytes\n", info->total_size);
   1357	pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size);
   1358	pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size);
   1359	pr_debug("\tconsole size : %ld Bytes\n", info->console_size);
   1360	pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size);
   1361
   1362	err = psz_alloc_zones(cxt);
   1363	if (err) {
   1364		pr_err("alloc zones failed\n");
   1365		goto fail_out;
   1366	}
   1367
   1368	if (info->kmsg_size) {
   1369		cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size -
   1370			sizeof(struct psz_kmsg_header);
   1371		cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
   1372		if (!cxt->pstore.buf) {
   1373			err = -ENOMEM;
   1374			goto fail_free;
   1375		}
   1376	}
   1377	cxt->pstore.data = cxt;
   1378
   1379	pr_info("registered %s as backend for", info->name);
   1380	cxt->pstore.max_reason = info->max_reason;
   1381	cxt->pstore.name = info->name;
   1382	if (info->kmsg_size) {
   1383		cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
   1384		pr_cont(" kmsg(%s",
   1385			kmsg_dump_reason_str(cxt->pstore.max_reason));
   1386		if (cxt->pstore_zone_info->panic_write)
   1387			pr_cont(",panic_write");
   1388		pr_cont(")");
   1389	}
   1390	if (info->pmsg_size) {
   1391		cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
   1392		pr_cont(" pmsg");
   1393	}
   1394	if (info->console_size) {
   1395		cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
   1396		pr_cont(" console");
   1397	}
   1398	if (info->ftrace_size) {
   1399		cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
   1400		pr_cont(" ftrace");
   1401	}
   1402	pr_cont("\n");
   1403
   1404	err = pstore_register(&cxt->pstore);
   1405	if (err) {
   1406		pr_err("registering with pstore failed\n");
   1407		goto fail_free;
   1408	}
   1409	mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
   1410
   1411	return 0;
   1412
   1413fail_free:
   1414	kfree(cxt->pstore.buf);
   1415	cxt->pstore.buf = NULL;
   1416	cxt->pstore.bufsize = 0;
   1417	psz_free_all_zones(cxt);
   1418fail_out:
   1419	pstore_zone_cxt.pstore_zone_info = NULL;
   1420	mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
   1421	return err;
   1422}
   1423EXPORT_SYMBOL_GPL(register_pstore_zone);
   1424
   1425/**
   1426 * unregister_pstore_zone() - unregister to pstore/zone
   1427 *
   1428 * @info: back-end driver information. See struct pstore_zone_info.
   1429 */
   1430void unregister_pstore_zone(struct pstore_zone_info *info)
   1431{
   1432	struct psz_context *cxt = &pstore_zone_cxt;
   1433
   1434	mutex_lock(&cxt->pstore_zone_info_lock);
   1435	if (!cxt->pstore_zone_info) {
   1436		mutex_unlock(&cxt->pstore_zone_info_lock);
   1437		return;
   1438	}
   1439
   1440	/* Stop incoming writes from pstore. */
   1441	pstore_unregister(&cxt->pstore);
   1442
   1443	/* Flush any pending writes. */
   1444	psz_flush_all_dirty_zones(NULL);
   1445	flush_delayed_work(&psz_cleaner);
   1446
   1447	/* Clean up allocations. */
   1448	kfree(cxt->pstore.buf);
   1449	cxt->pstore.buf = NULL;
   1450	cxt->pstore.bufsize = 0;
   1451	cxt->pstore_zone_info = NULL;
   1452
   1453	psz_free_all_zones(cxt);
   1454
   1455	/* Clear counters and zone state. */
   1456	cxt->oops_counter = 0;
   1457	cxt->panic_counter = 0;
   1458	atomic_set(&cxt->recovered, 0);
   1459	atomic_set(&cxt->on_panic, 0);
   1460
   1461	mutex_unlock(&cxt->pstore_zone_info_lock);
   1462}
   1463EXPORT_SYMBOL_GPL(unregister_pstore_zone);
   1464
   1465MODULE_LICENSE("GPL");
   1466MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
   1467MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
   1468MODULE_DESCRIPTION("Storage Manager for pstore/blk");