cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lpddr_cmds.c (20223B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * LPDDR flash memory device operations. This module provides read, write,
      4 * erase, lock/unlock support for LPDDR flash memories
      5 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
      6 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
      7 * Many thanks to Roman Borisov for initial enabling
      8 *
      9 * TODO:
     10 * Implement VPP management
     11 * Implement XIP support
     12 * Implement OTP support
     13 */
     14#include <linux/mtd/pfow.h>
     15#include <linux/mtd/qinfo.h>
     16#include <linux/slab.h>
     17#include <linux/module.h>
     18
     19static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
     20					size_t *retlen, u_char *buf);
     21static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
     22				size_t len, size_t *retlen, const u_char *buf);
     23static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
     24				unsigned long count, loff_t to, size_t *retlen);
     25static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
     26static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
     27static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
     28static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
     29			size_t *retlen, void **mtdbuf, resource_size_t *phys);
     30static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
     31static int get_chip(struct map_info *map, struct flchip *chip, int mode);
     32static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
     33static void put_chip(struct map_info *map, struct flchip *chip);
     34
     35struct mtd_info *lpddr_cmdset(struct map_info *map)
     36{
     37	struct lpddr_private *lpddr = map->fldrv_priv;
     38	struct flchip_shared *shared;
     39	struct flchip *chip;
     40	struct mtd_info *mtd;
     41	int numchips;
     42	int i, j;
     43
     44	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
     45	if (!mtd)
     46		return NULL;
     47	mtd->priv = map;
     48	mtd->type = MTD_NORFLASH;
     49
     50	/* Fill in the default mtd operations */
     51	mtd->_read = lpddr_read;
     52	mtd->type = MTD_NORFLASH;
     53	mtd->flags = MTD_CAP_NORFLASH;
     54	mtd->flags &= ~MTD_BIT_WRITEABLE;
     55	mtd->_erase = lpddr_erase;
     56	mtd->_write = lpddr_write_buffers;
     57	mtd->_writev = lpddr_writev;
     58	mtd->_lock = lpddr_lock;
     59	mtd->_unlock = lpddr_unlock;
     60	if (map_is_linear(map)) {
     61		mtd->_point = lpddr_point;
     62		mtd->_unpoint = lpddr_unpoint;
     63	}
     64	mtd->size = 1 << lpddr->qinfo->DevSizeShift;
     65	mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
     66	mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
     67
     68	shared = kmalloc_array(lpddr->numchips, sizeof(struct flchip_shared),
     69						GFP_KERNEL);
     70	if (!shared) {
     71		kfree(mtd);
     72		return NULL;
     73	}
     74
     75	chip = &lpddr->chips[0];
     76	numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
     77	for (i = 0; i < numchips; i++) {
     78		shared[i].writing = shared[i].erasing = NULL;
     79		mutex_init(&shared[i].lock);
     80		for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
     81			*chip = lpddr->chips[i];
     82			chip->start += j << lpddr->chipshift;
     83			chip->oldstate = chip->state = FL_READY;
     84			chip->priv = &shared[i];
     85			/* those should be reset too since
     86			   they create memory references. */
     87			init_waitqueue_head(&chip->wq);
     88			mutex_init(&chip->mutex);
     89			chip++;
     90		}
     91	}
     92
     93	return mtd;
     94}
     95EXPORT_SYMBOL(lpddr_cmdset);
     96
     97static void print_drs_error(unsigned int dsr)
     98{
     99	int prog_status = (dsr & DSR_RPS) >> 8;
    100
    101	if (!(dsr & DSR_AVAILABLE))
    102		pr_notice("DSR.15: (0) Device not Available\n");
    103	if ((prog_status & 0x03) == 0x03)
    104		pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
    105	else if (prog_status & 0x02)
    106		pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
    107	else if (prog_status &  0x01)
    108		pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
    109	if (!(dsr & DSR_READY_STATUS))
    110		pr_notice("DSR.7: (0) Device is Busy\n");
    111	if (dsr & DSR_ESS)
    112		pr_notice("DSR.6: (1) Erase Suspended\n");
    113	if (dsr & DSR_ERASE_STATUS)
    114		pr_notice("DSR.5: (1) Erase/Blank check error\n");
    115	if (dsr & DSR_PROGRAM_STATUS)
    116		pr_notice("DSR.4: (1) Program Error\n");
    117	if (dsr & DSR_VPPS)
    118		pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
    119	if (dsr & DSR_PSS)
    120		pr_notice("DSR.2: (1) Program suspended\n");
    121	if (dsr & DSR_DPS)
    122		pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
    123}
    124
    125static int wait_for_ready(struct map_info *map, struct flchip *chip,
    126		unsigned int chip_op_time)
    127{
    128	unsigned int timeo, reset_timeo, sleep_time;
    129	unsigned int dsr;
    130	flstate_t chip_state = chip->state;
    131	int ret = 0;
    132
    133	/* set our timeout to 8 times the expected delay */
    134	timeo = chip_op_time * 8;
    135	if (!timeo)
    136		timeo = 500000;
    137	reset_timeo = timeo;
    138	sleep_time = chip_op_time / 2;
    139
    140	for (;;) {
    141		dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
    142		if (dsr & DSR_READY_STATUS)
    143			break;
    144		if (!timeo) {
    145			printk(KERN_ERR "%s: Flash timeout error state %d \n",
    146							map->name, chip_state);
    147			ret = -ETIME;
    148			break;
    149		}
    150
    151		/* OK Still waiting. Drop the lock, wait a while and retry. */
    152		mutex_unlock(&chip->mutex);
    153		if (sleep_time >= 1000000/HZ) {
    154			/*
    155			 * Half of the normal delay still remaining
    156			 * can be performed with a sleeping delay instead
    157			 * of busy waiting.
    158			 */
    159			msleep(sleep_time/1000);
    160			timeo -= sleep_time;
    161			sleep_time = 1000000/HZ;
    162		} else {
    163			udelay(1);
    164			cond_resched();
    165			timeo--;
    166		}
    167		mutex_lock(&chip->mutex);
    168
    169		while (chip->state != chip_state) {
    170			/* Someone's suspended the operation: sleep */
    171			DECLARE_WAITQUEUE(wait, current);
    172			set_current_state(TASK_UNINTERRUPTIBLE);
    173			add_wait_queue(&chip->wq, &wait);
    174			mutex_unlock(&chip->mutex);
    175			schedule();
    176			remove_wait_queue(&chip->wq, &wait);
    177			mutex_lock(&chip->mutex);
    178		}
    179		if (chip->erase_suspended || chip->write_suspended)  {
    180			/* Suspend has occurred while sleep: reset timeout */
    181			timeo = reset_timeo;
    182			chip->erase_suspended = chip->write_suspended = 0;
    183		}
    184	}
    185	/* check status for errors */
    186	if (dsr & DSR_ERR) {
    187		/* Clear DSR*/
    188		map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
    189		printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
    190				map->name, dsr);
    191		print_drs_error(dsr);
    192		ret = -EIO;
    193	}
    194	chip->state = FL_READY;
    195	return ret;
    196}
    197
    198static int get_chip(struct map_info *map, struct flchip *chip, int mode)
    199{
    200	int ret;
    201	DECLARE_WAITQUEUE(wait, current);
    202
    203 retry:
    204	if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
    205		&& chip->state != FL_SYNCING) {
    206		/*
    207		 * OK. We have possibility for contension on the write/erase
    208		 * operations which are global to the real chip and not per
    209		 * partition.  So let's fight it over in the partition which
    210		 * currently has authority on the operation.
    211		 *
    212		 * The rules are as follows:
    213		 *
    214		 * - any write operation must own shared->writing.
    215		 *
    216		 * - any erase operation must own _both_ shared->writing and
    217		 *   shared->erasing.
    218		 *
    219		 * - contension arbitration is handled in the owner's context.
    220		 *
    221		 * The 'shared' struct can be read and/or written only when
    222		 * its lock is taken.
    223		 */
    224		struct flchip_shared *shared = chip->priv;
    225		struct flchip *contender;
    226		mutex_lock(&shared->lock);
    227		contender = shared->writing;
    228		if (contender && contender != chip) {
    229			/*
    230			 * The engine to perform desired operation on this
    231			 * partition is already in use by someone else.
    232			 * Let's fight over it in the context of the chip
    233			 * currently using it.  If it is possible to suspend,
    234			 * that other partition will do just that, otherwise
    235			 * it'll happily send us to sleep.  In any case, when
    236			 * get_chip returns success we're clear to go ahead.
    237			 */
    238			ret = mutex_trylock(&contender->mutex);
    239			mutex_unlock(&shared->lock);
    240			if (!ret)
    241				goto retry;
    242			mutex_unlock(&chip->mutex);
    243			ret = chip_ready(map, contender, mode);
    244			mutex_lock(&chip->mutex);
    245
    246			if (ret == -EAGAIN) {
    247				mutex_unlock(&contender->mutex);
    248				goto retry;
    249			}
    250			if (ret) {
    251				mutex_unlock(&contender->mutex);
    252				return ret;
    253			}
    254			mutex_lock(&shared->lock);
    255
    256			/* We should not own chip if it is already in FL_SYNCING
    257			 * state. Put contender and retry. */
    258			if (chip->state == FL_SYNCING) {
    259				put_chip(map, contender);
    260				mutex_unlock(&contender->mutex);
    261				goto retry;
    262			}
    263			mutex_unlock(&contender->mutex);
    264		}
    265
    266		/* Check if we have suspended erase on this chip.
    267		   Must sleep in such a case. */
    268		if (mode == FL_ERASING && shared->erasing
    269		    && shared->erasing->oldstate == FL_ERASING) {
    270			mutex_unlock(&shared->lock);
    271			set_current_state(TASK_UNINTERRUPTIBLE);
    272			add_wait_queue(&chip->wq, &wait);
    273			mutex_unlock(&chip->mutex);
    274			schedule();
    275			remove_wait_queue(&chip->wq, &wait);
    276			mutex_lock(&chip->mutex);
    277			goto retry;
    278		}
    279
    280		/* We now own it */
    281		shared->writing = chip;
    282		if (mode == FL_ERASING)
    283			shared->erasing = chip;
    284		mutex_unlock(&shared->lock);
    285	}
    286
    287	ret = chip_ready(map, chip, mode);
    288	if (ret == -EAGAIN)
    289		goto retry;
    290
    291	return ret;
    292}
    293
    294static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
    295{
    296	struct lpddr_private *lpddr = map->fldrv_priv;
    297	int ret = 0;
    298	DECLARE_WAITQUEUE(wait, current);
    299
    300	/* Prevent setting state FL_SYNCING for chip in suspended state. */
    301	if (FL_SYNCING == mode && FL_READY != chip->oldstate)
    302		goto sleep;
    303
    304	switch (chip->state) {
    305	case FL_READY:
    306	case FL_JEDEC_QUERY:
    307		return 0;
    308
    309	case FL_ERASING:
    310		if (!lpddr->qinfo->SuspEraseSupp ||
    311			!(mode == FL_READY || mode == FL_POINT))
    312			goto sleep;
    313
    314		map_write(map, CMD(LPDDR_SUSPEND),
    315			map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
    316		chip->oldstate = FL_ERASING;
    317		chip->state = FL_ERASE_SUSPENDING;
    318		ret = wait_for_ready(map, chip, 0);
    319		if (ret) {
    320			/* Oops. something got wrong. */
    321			/* Resume and pretend we weren't here.  */
    322			put_chip(map, chip);
    323			printk(KERN_ERR "%s: suspend operation failed."
    324					"State may be wrong \n", map->name);
    325			return -EIO;
    326		}
    327		chip->erase_suspended = 1;
    328		chip->state = FL_READY;
    329		return 0;
    330		/* Erase suspend */
    331	case FL_POINT:
    332		/* Only if there's no operation suspended... */
    333		if (mode == FL_READY && chip->oldstate == FL_READY)
    334			return 0;
    335		fallthrough;
    336	default:
    337sleep:
    338		set_current_state(TASK_UNINTERRUPTIBLE);
    339		add_wait_queue(&chip->wq, &wait);
    340		mutex_unlock(&chip->mutex);
    341		schedule();
    342		remove_wait_queue(&chip->wq, &wait);
    343		mutex_lock(&chip->mutex);
    344		return -EAGAIN;
    345	}
    346}
    347
    348static void put_chip(struct map_info *map, struct flchip *chip)
    349{
    350	if (chip->priv) {
    351		struct flchip_shared *shared = chip->priv;
    352		mutex_lock(&shared->lock);
    353		if (shared->writing == chip && chip->oldstate == FL_READY) {
    354			/* We own the ability to write, but we're done */
    355			shared->writing = shared->erasing;
    356			if (shared->writing && shared->writing != chip) {
    357				/* give back the ownership */
    358				struct flchip *loaner = shared->writing;
    359				mutex_lock(&loaner->mutex);
    360				mutex_unlock(&shared->lock);
    361				mutex_unlock(&chip->mutex);
    362				put_chip(map, loaner);
    363				mutex_lock(&chip->mutex);
    364				mutex_unlock(&loaner->mutex);
    365				wake_up(&chip->wq);
    366				return;
    367			}
    368			shared->erasing = NULL;
    369			shared->writing = NULL;
    370		} else if (shared->erasing == chip && shared->writing != chip) {
    371			/*
    372			 * We own the ability to erase without the ability
    373			 * to write, which means the erase was suspended
    374			 * and some other partition is currently writing.
    375			 * Don't let the switch below mess things up since
    376			 * we don't have ownership to resume anything.
    377			 */
    378			mutex_unlock(&shared->lock);
    379			wake_up(&chip->wq);
    380			return;
    381		}
    382		mutex_unlock(&shared->lock);
    383	}
    384
    385	switch (chip->oldstate) {
    386	case FL_ERASING:
    387		map_write(map, CMD(LPDDR_RESUME),
    388				map->pfow_base + PFOW_COMMAND_CODE);
    389		map_write(map, CMD(LPDDR_START_EXECUTION),
    390				map->pfow_base + PFOW_COMMAND_EXECUTE);
    391		chip->oldstate = FL_READY;
    392		chip->state = FL_ERASING;
    393		break;
    394	case FL_READY:
    395		break;
    396	default:
    397		printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
    398				map->name, chip->oldstate);
    399	}
    400	wake_up(&chip->wq);
    401}
    402
    403static int do_write_buffer(struct map_info *map, struct flchip *chip,
    404			unsigned long adr, const struct kvec **pvec,
    405			unsigned long *pvec_seek, int len)
    406{
    407	struct lpddr_private *lpddr = map->fldrv_priv;
    408	map_word datum;
    409	int ret, wbufsize, word_gap, words;
    410	const struct kvec *vec;
    411	unsigned long vec_seek;
    412	unsigned long prog_buf_ofs;
    413
    414	wbufsize = 1 << lpddr->qinfo->BufSizeShift;
    415
    416	mutex_lock(&chip->mutex);
    417	ret = get_chip(map, chip, FL_WRITING);
    418	if (ret) {
    419		mutex_unlock(&chip->mutex);
    420		return ret;
    421	}
    422	/* Figure out the number of words to write */
    423	word_gap = (-adr & (map_bankwidth(map)-1));
    424	words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
    425	if (!word_gap) {
    426		words--;
    427	} else {
    428		word_gap = map_bankwidth(map) - word_gap;
    429		adr -= word_gap;
    430		datum = map_word_ff(map);
    431	}
    432	/* Write data */
    433	/* Get the program buffer offset from PFOW register data first*/
    434	prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
    435				map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
    436	vec = *pvec;
    437	vec_seek = *pvec_seek;
    438	do {
    439		int n = map_bankwidth(map) - word_gap;
    440
    441		if (n > vec->iov_len - vec_seek)
    442			n = vec->iov_len - vec_seek;
    443		if (n > len)
    444			n = len;
    445
    446		if (!word_gap && (len < map_bankwidth(map)))
    447			datum = map_word_ff(map);
    448
    449		datum = map_word_load_partial(map, datum,
    450				vec->iov_base + vec_seek, word_gap, n);
    451
    452		len -= n;
    453		word_gap += n;
    454		if (!len || word_gap == map_bankwidth(map)) {
    455			map_write(map, datum, prog_buf_ofs);
    456			prog_buf_ofs += map_bankwidth(map);
    457			word_gap = 0;
    458		}
    459
    460		vec_seek += n;
    461		if (vec_seek == vec->iov_len) {
    462			vec++;
    463			vec_seek = 0;
    464		}
    465	} while (len);
    466	*pvec = vec;
    467	*pvec_seek = vec_seek;
    468
    469	/* GO GO GO */
    470	send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
    471	chip->state = FL_WRITING;
    472	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
    473	if (ret)	{
    474		printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
    475			map->name, ret, adr);
    476		goto out;
    477	}
    478
    479 out:	put_chip(map, chip);
    480	mutex_unlock(&chip->mutex);
    481	return ret;
    482}
    483
    484static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
    485{
    486	struct map_info *map = mtd->priv;
    487	struct lpddr_private *lpddr = map->fldrv_priv;
    488	int chipnum = adr >> lpddr->chipshift;
    489	struct flchip *chip = &lpddr->chips[chipnum];
    490	int ret;
    491
    492	mutex_lock(&chip->mutex);
    493	ret = get_chip(map, chip, FL_ERASING);
    494	if (ret) {
    495		mutex_unlock(&chip->mutex);
    496		return ret;
    497	}
    498	send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
    499	chip->state = FL_ERASING;
    500	ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
    501	if (ret) {
    502		printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
    503			map->name, ret, adr);
    504		goto out;
    505	}
    506 out:	put_chip(map, chip);
    507	mutex_unlock(&chip->mutex);
    508	return ret;
    509}
    510
    511static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
    512			size_t *retlen, u_char *buf)
    513{
    514	struct map_info *map = mtd->priv;
    515	struct lpddr_private *lpddr = map->fldrv_priv;
    516	int chipnum = adr >> lpddr->chipshift;
    517	struct flchip *chip = &lpddr->chips[chipnum];
    518	int ret = 0;
    519
    520	mutex_lock(&chip->mutex);
    521	ret = get_chip(map, chip, FL_READY);
    522	if (ret) {
    523		mutex_unlock(&chip->mutex);
    524		return ret;
    525	}
    526
    527	map_copy_from(map, buf, adr, len);
    528	*retlen = len;
    529
    530	put_chip(map, chip);
    531	mutex_unlock(&chip->mutex);
    532	return ret;
    533}
    534
    535static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
    536			size_t *retlen, void **mtdbuf, resource_size_t *phys)
    537{
    538	struct map_info *map = mtd->priv;
    539	struct lpddr_private *lpddr = map->fldrv_priv;
    540	int chipnum = adr >> lpddr->chipshift;
    541	unsigned long ofs, last_end = 0;
    542	struct flchip *chip = &lpddr->chips[chipnum];
    543	int ret = 0;
    544
    545	if (!map->virt)
    546		return -EINVAL;
    547
    548	/* ofs: offset within the first chip that the first read should start */
    549	ofs = adr - (chipnum << lpddr->chipshift);
    550	*mtdbuf = (void *)map->virt + chip->start + ofs;
    551
    552	while (len) {
    553		unsigned long thislen;
    554
    555		if (chipnum >= lpddr->numchips)
    556			break;
    557
    558		/* We cannot point across chips that are virtually disjoint */
    559		if (!last_end)
    560			last_end = chip->start;
    561		else if (chip->start != last_end)
    562			break;
    563
    564		if ((len + ofs - 1) >> lpddr->chipshift)
    565			thislen = (1<<lpddr->chipshift) - ofs;
    566		else
    567			thislen = len;
    568		/* get the chip */
    569		mutex_lock(&chip->mutex);
    570		ret = get_chip(map, chip, FL_POINT);
    571		mutex_unlock(&chip->mutex);
    572		if (ret)
    573			break;
    574
    575		chip->state = FL_POINT;
    576		chip->ref_point_counter++;
    577		*retlen += thislen;
    578		len -= thislen;
    579
    580		ofs = 0;
    581		last_end += 1 << lpddr->chipshift;
    582		chipnum++;
    583		chip = &lpddr->chips[chipnum];
    584	}
    585	return 0;
    586}
    587
    588static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
    589{
    590	struct map_info *map = mtd->priv;
    591	struct lpddr_private *lpddr = map->fldrv_priv;
    592	int chipnum = adr >> lpddr->chipshift, err = 0;
    593	unsigned long ofs;
    594
    595	/* ofs: offset within the first chip that the first read should start */
    596	ofs = adr - (chipnum << lpddr->chipshift);
    597
    598	while (len) {
    599		unsigned long thislen;
    600		struct flchip *chip;
    601
    602		chip = &lpddr->chips[chipnum];
    603		if (chipnum >= lpddr->numchips)
    604			break;
    605
    606		if ((len + ofs - 1) >> lpddr->chipshift)
    607			thislen = (1<<lpddr->chipshift) - ofs;
    608		else
    609			thislen = len;
    610
    611		mutex_lock(&chip->mutex);
    612		if (chip->state == FL_POINT) {
    613			chip->ref_point_counter--;
    614			if (chip->ref_point_counter == 0)
    615				chip->state = FL_READY;
    616		} else {
    617			printk(KERN_WARNING "%s: Warning: unpoint called on non"
    618					"pointed region\n", map->name);
    619			err = -EINVAL;
    620		}
    621
    622		put_chip(map, chip);
    623		mutex_unlock(&chip->mutex);
    624
    625		len -= thislen;
    626		ofs = 0;
    627		chipnum++;
    628	}
    629
    630	return err;
    631}
    632
    633static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
    634				size_t *retlen, const u_char *buf)
    635{
    636	struct kvec vec;
    637
    638	vec.iov_base = (void *) buf;
    639	vec.iov_len = len;
    640
    641	return lpddr_writev(mtd, &vec, 1, to, retlen);
    642}
    643
    644
    645static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
    646				unsigned long count, loff_t to, size_t *retlen)
    647{
    648	struct map_info *map = mtd->priv;
    649	struct lpddr_private *lpddr = map->fldrv_priv;
    650	int ret = 0;
    651	int chipnum;
    652	unsigned long ofs, vec_seek, i;
    653	int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
    654	size_t len = 0;
    655
    656	for (i = 0; i < count; i++)
    657		len += vecs[i].iov_len;
    658
    659	if (!len)
    660		return 0;
    661
    662	chipnum = to >> lpddr->chipshift;
    663
    664	ofs = to;
    665	vec_seek = 0;
    666
    667	do {
    668		/* We must not cross write block boundaries */
    669		int size = wbufsize - (ofs & (wbufsize-1));
    670
    671		if (size > len)
    672			size = len;
    673
    674		ret = do_write_buffer(map, &lpddr->chips[chipnum],
    675					  ofs, &vecs, &vec_seek, size);
    676		if (ret)
    677			return ret;
    678
    679		ofs += size;
    680		(*retlen) += size;
    681		len -= size;
    682
    683		/* Be nice and reschedule with the chip in a usable
    684		 * state for other processes */
    685		cond_resched();
    686
    687	} while (len);
    688
    689	return 0;
    690}
    691
    692static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
    693{
    694	unsigned long ofs, len;
    695	int ret;
    696	struct map_info *map = mtd->priv;
    697	struct lpddr_private *lpddr = map->fldrv_priv;
    698	int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
    699
    700	ofs = instr->addr;
    701	len = instr->len;
    702
    703	while (len > 0) {
    704		ret = do_erase_oneblock(mtd, ofs);
    705		if (ret)
    706			return ret;
    707		ofs += size;
    708		len -= size;
    709	}
    710
    711	return 0;
    712}
    713
    714#define DO_XXLOCK_LOCK		1
    715#define DO_XXLOCK_UNLOCK	2
    716static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
    717{
    718	int ret = 0;
    719	struct map_info *map = mtd->priv;
    720	struct lpddr_private *lpddr = map->fldrv_priv;
    721	int chipnum = adr >> lpddr->chipshift;
    722	struct flchip *chip = &lpddr->chips[chipnum];
    723
    724	mutex_lock(&chip->mutex);
    725	ret = get_chip(map, chip, FL_LOCKING);
    726	if (ret) {
    727		mutex_unlock(&chip->mutex);
    728		return ret;
    729	}
    730
    731	if (thunk == DO_XXLOCK_LOCK) {
    732		send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
    733		chip->state = FL_LOCKING;
    734	} else if (thunk == DO_XXLOCK_UNLOCK) {
    735		send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
    736		chip->state = FL_UNLOCKING;
    737	} else
    738		BUG();
    739
    740	ret = wait_for_ready(map, chip, 1);
    741	if (ret)	{
    742		printk(KERN_ERR "%s: block unlock error status %d \n",
    743				map->name, ret);
    744		goto out;
    745	}
    746out:	put_chip(map, chip);
    747	mutex_unlock(&chip->mutex);
    748	return ret;
    749}
    750
    751static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
    752{
    753	return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
    754}
    755
    756static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
    757{
    758	return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
    759}
    760
    761MODULE_LICENSE("GPL");
    762MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
    763MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");