cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smem.c (31588B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2015, Sony Mobile Communications AB.
      4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
      5 */
      6
      7#include <linux/hwspinlock.h>
      8#include <linux/io.h>
      9#include <linux/module.h>
     10#include <linux/of.h>
     11#include <linux/of_address.h>
     12#include <linux/of_reserved_mem.h>
     13#include <linux/platform_device.h>
     14#include <linux/sizes.h>
     15#include <linux/slab.h>
     16#include <linux/soc/qcom/smem.h>
     17
     18/*
     19 * The Qualcomm shared memory system is a allocate only heap structure that
     20 * consists of one of more memory areas that can be accessed by the processors
     21 * in the SoC.
     22 *
     23 * All systems contains a global heap, accessible by all processors in the SoC,
     24 * with a table of contents data structure (@smem_header) at the beginning of
     25 * the main shared memory block.
     26 *
     27 * The global header contains meta data for allocations as well as a fixed list
     28 * of 512 entries (@smem_global_entry) that can be initialized to reference
     29 * parts of the shared memory space.
     30 *
     31 *
     32 * In addition to this global heap a set of "private" heaps can be set up at
     33 * boot time with access restrictions so that only certain processor pairs can
     34 * access the data.
     35 *
     36 * These partitions are referenced from an optional partition table
     37 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
     38 * partition table entries (@smem_ptable_entry) lists the involved processors
     39 * (or hosts) and their location in the main shared memory region.
     40 *
     41 * Each partition starts with a header (@smem_partition_header) that identifies
     42 * the partition and holds properties for the two internal memory regions. The
     43 * two regions are cached and non-cached memory respectively. Each region
     44 * contain a link list of allocation headers (@smem_private_entry) followed by
     45 * their data.
     46 *
     47 * Items in the non-cached region are allocated from the start of the partition
     48 * while items in the cached region are allocated from the end. The free area
     49 * is hence the region between the cached and non-cached offsets. The header of
     50 * cached items comes after the data.
     51 *
     52 * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
     53 * for the global heap. A new global partition is created from the global heap
     54 * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
     55 * set by the bootloader.
     56 *
     57 * To synchronize allocations in the shared memory heaps a remote spinlock must
     58 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
     59 * platforms.
     60 *
     61 */
     62
     63/*
     64 * The version member of the smem header contains an array of versions for the
     65 * various software components in the SoC. We verify that the boot loader
     66 * version is a valid version as a sanity check.
     67 */
     68#define SMEM_MASTER_SBL_VERSION_INDEX	7
     69#define SMEM_GLOBAL_HEAP_VERSION	11
     70#define SMEM_GLOBAL_PART_VERSION	12
     71
     72/*
     73 * The first 8 items are only to be allocated by the boot loader while
     74 * initializing the heap.
     75 */
     76#define SMEM_ITEM_LAST_FIXED	8
     77
     78/* Highest accepted item number, for both global and private heaps */
     79#define SMEM_ITEM_COUNT		512
     80
     81/* Processor/host identifier for the application processor */
     82#define SMEM_HOST_APPS		0
     83
     84/* Processor/host identifier for the global partition */
     85#define SMEM_GLOBAL_HOST	0xfffe
     86
     87/* Max number of processors/hosts in a system */
     88#define SMEM_HOST_COUNT		15
     89
     90/**
     91  * struct smem_proc_comm - proc_comm communication struct (legacy)
     92  * @command:	current command to be executed
     93  * @status:	status of the currently requested command
     94  * @params:	parameters to the command
     95  */
     96struct smem_proc_comm {
     97	__le32 command;
     98	__le32 status;
     99	__le32 params[2];
    100};
    101
    102/**
    103 * struct smem_global_entry - entry to reference smem items on the heap
    104 * @allocated:	boolean to indicate if this entry is used
    105 * @offset:	offset to the allocated space
    106 * @size:	size of the allocated space, 8 byte aligned
    107 * @aux_base:	base address for the memory region used by this unit, or 0 for
    108 *		the default region. bits 0,1 are reserved
    109 */
    110struct smem_global_entry {
    111	__le32 allocated;
    112	__le32 offset;
    113	__le32 size;
    114	__le32 aux_base; /* bits 1:0 reserved */
    115};
    116#define AUX_BASE_MASK		0xfffffffc
    117
    118/**
    119 * struct smem_header - header found in beginning of primary smem region
    120 * @proc_comm:		proc_comm communication interface (legacy)
    121 * @version:		array of versions for the various subsystems
    122 * @initialized:	boolean to indicate that smem is initialized
    123 * @free_offset:	index of the first unallocated byte in smem
    124 * @available:		number of bytes available for allocation
    125 * @reserved:		reserved field, must be 0
    126 * @toc:		array of references to items
    127 */
    128struct smem_header {
    129	struct smem_proc_comm proc_comm[4];
    130	__le32 version[32];
    131	__le32 initialized;
    132	__le32 free_offset;
    133	__le32 available;
    134	__le32 reserved;
    135	struct smem_global_entry toc[SMEM_ITEM_COUNT];
    136};
    137
    138/**
    139 * struct smem_ptable_entry - one entry in the @smem_ptable list
    140 * @offset:	offset, within the main shared memory region, of the partition
    141 * @size:	size of the partition
    142 * @flags:	flags for the partition (currently unused)
    143 * @host0:	first processor/host with access to this partition
    144 * @host1:	second processor/host with access to this partition
    145 * @cacheline:	alignment for "cached" entries
    146 * @reserved:	reserved entries for later use
    147 */
    148struct smem_ptable_entry {
    149	__le32 offset;
    150	__le32 size;
    151	__le32 flags;
    152	__le16 host0;
    153	__le16 host1;
    154	__le32 cacheline;
    155	__le32 reserved[7];
    156};
    157
    158/**
    159 * struct smem_ptable - partition table for the private partitions
    160 * @magic:	magic number, must be SMEM_PTABLE_MAGIC
    161 * @version:	version of the partition table
    162 * @num_entries: number of partitions in the table
    163 * @reserved:	for now reserved entries
    164 * @entry:	list of @smem_ptable_entry for the @num_entries partitions
    165 */
    166struct smem_ptable {
    167	u8 magic[4];
    168	__le32 version;
    169	__le32 num_entries;
    170	__le32 reserved[5];
    171	struct smem_ptable_entry entry[];
    172};
    173
    174static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
    175
    176/**
    177 * struct smem_partition_header - header of the partitions
    178 * @magic:	magic number, must be SMEM_PART_MAGIC
    179 * @host0:	first processor/host with access to this partition
    180 * @host1:	second processor/host with access to this partition
    181 * @size:	size of the partition
    182 * @offset_free_uncached: offset to the first free byte of uncached memory in
    183 *		this partition
    184 * @offset_free_cached: offset to the first free byte of cached memory in this
    185 *		partition
    186 * @reserved:	for now reserved entries
    187 */
    188struct smem_partition_header {
    189	u8 magic[4];
    190	__le16 host0;
    191	__le16 host1;
    192	__le32 size;
    193	__le32 offset_free_uncached;
    194	__le32 offset_free_cached;
    195	__le32 reserved[3];
    196};
    197
    198/**
    199 * struct smem_partition - describes smem partition
    200 * @virt_base:	starting virtual address of partition
    201 * @phys_base:	starting physical address of partition
    202 * @cacheline:	alignment for "cached" entries
    203 * @size:	size of partition
    204 */
    205struct smem_partition {
    206	void __iomem *virt_base;
    207	phys_addr_t phys_base;
    208	size_t cacheline;
    209	size_t size;
    210};
    211
    212static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
    213
    214/**
    215 * struct smem_private_entry - header of each item in the private partition
    216 * @canary:	magic number, must be SMEM_PRIVATE_CANARY
    217 * @item:	identifying number of the smem item
    218 * @size:	size of the data, including padding bytes
    219 * @padding_data: number of bytes of padding of data
    220 * @padding_hdr: number of bytes of padding between the header and the data
    221 * @reserved:	for now reserved entry
    222 */
    223struct smem_private_entry {
    224	u16 canary; /* bytes are the same so no swapping needed */
    225	__le16 item;
    226	__le32 size; /* includes padding bytes */
    227	__le16 padding_data;
    228	__le16 padding_hdr;
    229	__le32 reserved;
    230};
    231#define SMEM_PRIVATE_CANARY	0xa5a5
    232
    233/**
    234 * struct smem_info - smem region info located after the table of contents
    235 * @magic:	magic number, must be SMEM_INFO_MAGIC
    236 * @size:	size of the smem region
    237 * @base_addr:	base address of the smem region
    238 * @reserved:	for now reserved entry
    239 * @num_items:	highest accepted item number
    240 */
    241struct smem_info {
    242	u8 magic[4];
    243	__le32 size;
    244	__le32 base_addr;
    245	__le32 reserved;
    246	__le16 num_items;
    247};
    248
    249static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
    250
    251/**
    252 * struct smem_region - representation of a chunk of memory used for smem
    253 * @aux_base:	identifier of aux_mem base
    254 * @virt_base:	virtual base address of memory with this aux_mem identifier
    255 * @size:	size of the memory region
    256 */
    257struct smem_region {
    258	phys_addr_t aux_base;
    259	void __iomem *virt_base;
    260	size_t size;
    261};
    262
    263/**
    264 * struct qcom_smem - device data for the smem device
    265 * @dev:	device pointer
    266 * @hwlock:	reference to a hwspinlock
    267 * @ptable: virtual base of partition table
    268 * @global_partition: describes for global partition when in use
    269 * @partitions: list of partitions of current processor/host
    270 * @item_count: max accepted item number
    271 * @socinfo:	platform device pointer
    272 * @num_regions: number of @regions
    273 * @regions:	list of the memory regions defining the shared memory
    274 */
    275struct qcom_smem {
    276	struct device *dev;
    277
    278	struct hwspinlock *hwlock;
    279
    280	u32 item_count;
    281	struct platform_device *socinfo;
    282	struct smem_ptable *ptable;
    283	struct smem_partition global_partition;
    284	struct smem_partition partitions[SMEM_HOST_COUNT];
    285
    286	unsigned num_regions;
    287	struct smem_region regions[];
    288};
    289
    290static void *
    291phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
    292{
    293	void *p = phdr;
    294
    295	return p + le32_to_cpu(phdr->offset_free_uncached);
    296}
    297
    298static struct smem_private_entry *
    299phdr_to_first_cached_entry(struct smem_partition_header *phdr,
    300					size_t cacheline)
    301{
    302	void *p = phdr;
    303	struct smem_private_entry *e;
    304
    305	return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
    306}
    307
    308static void *
    309phdr_to_last_cached_entry(struct smem_partition_header *phdr)
    310{
    311	void *p = phdr;
    312
    313	return p + le32_to_cpu(phdr->offset_free_cached);
    314}
    315
    316static struct smem_private_entry *
    317phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
    318{
    319	void *p = phdr;
    320
    321	return p + sizeof(*phdr);
    322}
    323
    324static struct smem_private_entry *
    325uncached_entry_next(struct smem_private_entry *e)
    326{
    327	void *p = e;
    328
    329	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
    330	       le32_to_cpu(e->size);
    331}
    332
    333static struct smem_private_entry *
    334cached_entry_next(struct smem_private_entry *e, size_t cacheline)
    335{
    336	void *p = e;
    337
    338	return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
    339}
    340
    341static void *uncached_entry_to_item(struct smem_private_entry *e)
    342{
    343	void *p = e;
    344
    345	return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
    346}
    347
    348static void *cached_entry_to_item(struct smem_private_entry *e)
    349{
    350	void *p = e;
    351
    352	return p - le32_to_cpu(e->size);
    353}
    354
    355/* Pointer to the one and only smem handle */
    356static struct qcom_smem *__smem;
    357
    358/* Timeout (ms) for the trylock of remote spinlocks */
    359#define HWSPINLOCK_TIMEOUT	1000
    360
    361static int qcom_smem_alloc_private(struct qcom_smem *smem,
    362				   struct smem_partition *part,
    363				   unsigned item,
    364				   size_t size)
    365{
    366	struct smem_private_entry *hdr, *end;
    367	struct smem_partition_header *phdr;
    368	size_t alloc_size;
    369	void *cached;
    370	void *p_end;
    371
    372	phdr = (struct smem_partition_header __force *)part->virt_base;
    373	p_end = (void *)phdr + part->size;
    374
    375	hdr = phdr_to_first_uncached_entry(phdr);
    376	end = phdr_to_last_uncached_entry(phdr);
    377	cached = phdr_to_last_cached_entry(phdr);
    378
    379	if (WARN_ON((void *)end > p_end || cached > p_end))
    380		return -EINVAL;
    381
    382	while (hdr < end) {
    383		if (hdr->canary != SMEM_PRIVATE_CANARY)
    384			goto bad_canary;
    385		if (le16_to_cpu(hdr->item) == item)
    386			return -EEXIST;
    387
    388		hdr = uncached_entry_next(hdr);
    389	}
    390
    391	if (WARN_ON((void *)hdr > p_end))
    392		return -EINVAL;
    393
    394	/* Check that we don't grow into the cached region */
    395	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
    396	if ((void *)hdr + alloc_size > cached) {
    397		dev_err(smem->dev, "Out of memory\n");
    398		return -ENOSPC;
    399	}
    400
    401	hdr->canary = SMEM_PRIVATE_CANARY;
    402	hdr->item = cpu_to_le16(item);
    403	hdr->size = cpu_to_le32(ALIGN(size, 8));
    404	hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
    405	hdr->padding_hdr = 0;
    406
    407	/*
    408	 * Ensure the header is written before we advance the free offset, so
    409	 * that remote processors that does not take the remote spinlock still
    410	 * gets a consistent view of the linked list.
    411	 */
    412	wmb();
    413	le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
    414
    415	return 0;
    416bad_canary:
    417	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
    418		le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
    419
    420	return -EINVAL;
    421}
    422
    423static int qcom_smem_alloc_global(struct qcom_smem *smem,
    424				  unsigned item,
    425				  size_t size)
    426{
    427	struct smem_global_entry *entry;
    428	struct smem_header *header;
    429
    430	header = smem->regions[0].virt_base;
    431	entry = &header->toc[item];
    432	if (entry->allocated)
    433		return -EEXIST;
    434
    435	size = ALIGN(size, 8);
    436	if (WARN_ON(size > le32_to_cpu(header->available)))
    437		return -ENOMEM;
    438
    439	entry->offset = header->free_offset;
    440	entry->size = cpu_to_le32(size);
    441
    442	/*
    443	 * Ensure the header is consistent before we mark the item allocated,
    444	 * so that remote processors will get a consistent view of the item
    445	 * even though they do not take the spinlock on read.
    446	 */
    447	wmb();
    448	entry->allocated = cpu_to_le32(1);
    449
    450	le32_add_cpu(&header->free_offset, size);
    451	le32_add_cpu(&header->available, -size);
    452
    453	return 0;
    454}
    455
    456/**
    457 * qcom_smem_alloc() - allocate space for a smem item
    458 * @host:	remote processor id, or -1
    459 * @item:	smem item handle
    460 * @size:	number of bytes to be allocated
    461 *
    462 * Allocate space for a given smem item of size @size, given that the item is
    463 * not yet allocated.
    464 */
    465int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
    466{
    467	struct smem_partition *part;
    468	unsigned long flags;
    469	int ret;
    470
    471	if (!__smem)
    472		return -EPROBE_DEFER;
    473
    474	if (item < SMEM_ITEM_LAST_FIXED) {
    475		dev_err(__smem->dev,
    476			"Rejecting allocation of static entry %d\n", item);
    477		return -EINVAL;
    478	}
    479
    480	if (WARN_ON(item >= __smem->item_count))
    481		return -EINVAL;
    482
    483	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
    484					  HWSPINLOCK_TIMEOUT,
    485					  &flags);
    486	if (ret)
    487		return ret;
    488
    489	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
    490		part = &__smem->partitions[host];
    491		ret = qcom_smem_alloc_private(__smem, part, item, size);
    492	} else if (__smem->global_partition.virt_base) {
    493		part = &__smem->global_partition;
    494		ret = qcom_smem_alloc_private(__smem, part, item, size);
    495	} else {
    496		ret = qcom_smem_alloc_global(__smem, item, size);
    497	}
    498
    499	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
    500
    501	return ret;
    502}
    503EXPORT_SYMBOL(qcom_smem_alloc);
    504
    505static void *qcom_smem_get_global(struct qcom_smem *smem,
    506				  unsigned item,
    507				  size_t *size)
    508{
    509	struct smem_header *header;
    510	struct smem_region *region;
    511	struct smem_global_entry *entry;
    512	u64 entry_offset;
    513	u32 e_size;
    514	u32 aux_base;
    515	unsigned i;
    516
    517	header = smem->regions[0].virt_base;
    518	entry = &header->toc[item];
    519	if (!entry->allocated)
    520		return ERR_PTR(-ENXIO);
    521
    522	aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
    523
    524	for (i = 0; i < smem->num_regions; i++) {
    525		region = &smem->regions[i];
    526
    527		if ((u32)region->aux_base == aux_base || !aux_base) {
    528			e_size = le32_to_cpu(entry->size);
    529			entry_offset = le32_to_cpu(entry->offset);
    530
    531			if (WARN_ON(e_size + entry_offset > region->size))
    532				return ERR_PTR(-EINVAL);
    533
    534			if (size != NULL)
    535				*size = e_size;
    536
    537			return region->virt_base + entry_offset;
    538		}
    539	}
    540
    541	return ERR_PTR(-ENOENT);
    542}
    543
    544static void *qcom_smem_get_private(struct qcom_smem *smem,
    545				   struct smem_partition *part,
    546				   unsigned item,
    547				   size_t *size)
    548{
    549	struct smem_private_entry *e, *end;
    550	struct smem_partition_header *phdr;
    551	void *item_ptr, *p_end;
    552	u32 padding_data;
    553	u32 e_size;
    554
    555	phdr = (struct smem_partition_header __force *)part->virt_base;
    556	p_end = (void *)phdr + part->size;
    557
    558	e = phdr_to_first_uncached_entry(phdr);
    559	end = phdr_to_last_uncached_entry(phdr);
    560
    561	while (e < end) {
    562		if (e->canary != SMEM_PRIVATE_CANARY)
    563			goto invalid_canary;
    564
    565		if (le16_to_cpu(e->item) == item) {
    566			if (size != NULL) {
    567				e_size = le32_to_cpu(e->size);
    568				padding_data = le16_to_cpu(e->padding_data);
    569
    570				if (WARN_ON(e_size > part->size || padding_data > e_size))
    571					return ERR_PTR(-EINVAL);
    572
    573				*size = e_size - padding_data;
    574			}
    575
    576			item_ptr = uncached_entry_to_item(e);
    577			if (WARN_ON(item_ptr > p_end))
    578				return ERR_PTR(-EINVAL);
    579
    580			return item_ptr;
    581		}
    582
    583		e = uncached_entry_next(e);
    584	}
    585
    586	if (WARN_ON((void *)e > p_end))
    587		return ERR_PTR(-EINVAL);
    588
    589	/* Item was not found in the uncached list, search the cached list */
    590
    591	e = phdr_to_first_cached_entry(phdr, part->cacheline);
    592	end = phdr_to_last_cached_entry(phdr);
    593
    594	if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
    595		return ERR_PTR(-EINVAL);
    596
    597	while (e > end) {
    598		if (e->canary != SMEM_PRIVATE_CANARY)
    599			goto invalid_canary;
    600
    601		if (le16_to_cpu(e->item) == item) {
    602			if (size != NULL) {
    603				e_size = le32_to_cpu(e->size);
    604				padding_data = le16_to_cpu(e->padding_data);
    605
    606				if (WARN_ON(e_size > part->size || padding_data > e_size))
    607					return ERR_PTR(-EINVAL);
    608
    609				*size = e_size - padding_data;
    610			}
    611
    612			item_ptr = cached_entry_to_item(e);
    613			if (WARN_ON(item_ptr < (void *)phdr))
    614				return ERR_PTR(-EINVAL);
    615
    616			return item_ptr;
    617		}
    618
    619		e = cached_entry_next(e, part->cacheline);
    620	}
    621
    622	if (WARN_ON((void *)e < (void *)phdr))
    623		return ERR_PTR(-EINVAL);
    624
    625	return ERR_PTR(-ENOENT);
    626
    627invalid_canary:
    628	dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
    629			le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
    630
    631	return ERR_PTR(-EINVAL);
    632}
    633
    634/**
    635 * qcom_smem_get() - resolve ptr of size of a smem item
    636 * @host:	the remote processor, or -1
    637 * @item:	smem item handle
    638 * @size:	pointer to be filled out with size of the item
    639 *
    640 * Looks up smem item and returns pointer to it. Size of smem
    641 * item is returned in @size.
    642 */
    643void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
    644{
    645	struct smem_partition *part;
    646	unsigned long flags;
    647	int ret;
    648	void *ptr = ERR_PTR(-EPROBE_DEFER);
    649
    650	if (!__smem)
    651		return ptr;
    652
    653	if (WARN_ON(item >= __smem->item_count))
    654		return ERR_PTR(-EINVAL);
    655
    656	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
    657					  HWSPINLOCK_TIMEOUT,
    658					  &flags);
    659	if (ret)
    660		return ERR_PTR(ret);
    661
    662	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
    663		part = &__smem->partitions[host];
    664		ptr = qcom_smem_get_private(__smem, part, item, size);
    665	} else if (__smem->global_partition.virt_base) {
    666		part = &__smem->global_partition;
    667		ptr = qcom_smem_get_private(__smem, part, item, size);
    668	} else {
    669		ptr = qcom_smem_get_global(__smem, item, size);
    670	}
    671
    672	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
    673
    674	return ptr;
    675
    676}
    677EXPORT_SYMBOL(qcom_smem_get);
    678
    679/**
    680 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
    681 * @host:	the remote processor identifying a partition, or -1
    682 *
    683 * To be used by smem clients as a quick way to determine if any new
    684 * allocations has been made.
    685 */
    686int qcom_smem_get_free_space(unsigned host)
    687{
    688	struct smem_partition *part;
    689	struct smem_partition_header *phdr;
    690	struct smem_header *header;
    691	unsigned ret;
    692
    693	if (!__smem)
    694		return -EPROBE_DEFER;
    695
    696	if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
    697		part = &__smem->partitions[host];
    698		phdr = part->virt_base;
    699		ret = le32_to_cpu(phdr->offset_free_cached) -
    700		      le32_to_cpu(phdr->offset_free_uncached);
    701
    702		if (ret > le32_to_cpu(part->size))
    703			return -EINVAL;
    704	} else if (__smem->global_partition.virt_base) {
    705		part = &__smem->global_partition;
    706		phdr = part->virt_base;
    707		ret = le32_to_cpu(phdr->offset_free_cached) -
    708		      le32_to_cpu(phdr->offset_free_uncached);
    709
    710		if (ret > le32_to_cpu(part->size))
    711			return -EINVAL;
    712	} else {
    713		header = __smem->regions[0].virt_base;
    714		ret = le32_to_cpu(header->available);
    715
    716		if (ret > __smem->regions[0].size)
    717			return -EINVAL;
    718	}
    719
    720	return ret;
    721}
    722EXPORT_SYMBOL(qcom_smem_get_free_space);
    723
    724static bool addr_in_range(void __iomem *base, size_t size, void *addr)
    725{
    726	return base && (addr >= base && addr < base + size);
    727}
    728
    729/**
    730 * qcom_smem_virt_to_phys() - return the physical address associated
    731 * with an smem item pointer (previously returned by qcom_smem_get()
    732 * @p:	the virtual address to convert
    733 *
    734 * Returns 0 if the pointer provided is not within any smem region.
    735 */
    736phys_addr_t qcom_smem_virt_to_phys(void *p)
    737{
    738	struct smem_partition *part;
    739	struct smem_region *area;
    740	u64 offset;
    741	u32 i;
    742
    743	for (i = 0; i < SMEM_HOST_COUNT; i++) {
    744		part = &__smem->partitions[i];
    745
    746		if (addr_in_range(part->virt_base, part->size, p)) {
    747			offset = p - part->virt_base;
    748
    749			return (phys_addr_t)part->phys_base + offset;
    750		}
    751	}
    752
    753	part = &__smem->global_partition;
    754
    755	if (addr_in_range(part->virt_base, part->size, p)) {
    756		offset = p - part->virt_base;
    757
    758		return (phys_addr_t)part->phys_base + offset;
    759	}
    760
    761	for (i = 0; i < __smem->num_regions; i++) {
    762		area = &__smem->regions[i];
    763
    764		if (addr_in_range(area->virt_base, area->size, p)) {
    765			offset = p - area->virt_base;
    766
    767			return (phys_addr_t)area->aux_base + offset;
    768		}
    769	}
    770
    771	return 0;
    772}
    773EXPORT_SYMBOL(qcom_smem_virt_to_phys);
    774
    775static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
    776{
    777	struct smem_header *header;
    778	__le32 *versions;
    779
    780	header = smem->regions[0].virt_base;
    781	versions = header->version;
    782
    783	return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
    784}
    785
    786static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
    787{
    788	struct smem_ptable *ptable;
    789	u32 version;
    790
    791	ptable = smem->ptable;
    792	if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
    793		return ERR_PTR(-ENOENT);
    794
    795	version = le32_to_cpu(ptable->version);
    796	if (version != 1) {
    797		dev_err(smem->dev,
    798			"Unsupported partition header version %d\n", version);
    799		return ERR_PTR(-EINVAL);
    800	}
    801	return ptable;
    802}
    803
    804static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
    805{
    806	struct smem_ptable *ptable;
    807	struct smem_info *info;
    808
    809	ptable = qcom_smem_get_ptable(smem);
    810	if (IS_ERR_OR_NULL(ptable))
    811		return SMEM_ITEM_COUNT;
    812
    813	info = (struct smem_info *)&ptable->entry[ptable->num_entries];
    814	if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
    815		return SMEM_ITEM_COUNT;
    816
    817	return le16_to_cpu(info->num_items);
    818}
    819
    820/*
    821 * Validate the partition header for a partition whose partition
    822 * table entry is supplied.  Returns a pointer to its header if
    823 * valid, or a null pointer otherwise.
    824 */
    825static struct smem_partition_header *
    826qcom_smem_partition_header(struct qcom_smem *smem,
    827		struct smem_ptable_entry *entry, u16 host0, u16 host1)
    828{
    829	struct smem_partition_header *header;
    830	u32 phys_addr;
    831	u32 size;
    832
    833	phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset);
    834	header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size));
    835
    836	if (!header)
    837		return NULL;
    838
    839	if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
    840		dev_err(smem->dev, "bad partition magic %4ph\n", header->magic);
    841		return NULL;
    842	}
    843
    844	if (host0 != le16_to_cpu(header->host0)) {
    845		dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
    846				host0, le16_to_cpu(header->host0));
    847		return NULL;
    848	}
    849	if (host1 != le16_to_cpu(header->host1)) {
    850		dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
    851				host1, le16_to_cpu(header->host1));
    852		return NULL;
    853	}
    854
    855	size = le32_to_cpu(header->size);
    856	if (size != le32_to_cpu(entry->size)) {
    857		dev_err(smem->dev, "bad partition size (%u != %u)\n",
    858			size, le32_to_cpu(entry->size));
    859		return NULL;
    860	}
    861
    862	if (le32_to_cpu(header->offset_free_uncached) > size) {
    863		dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
    864			le32_to_cpu(header->offset_free_uncached), size);
    865		return NULL;
    866	}
    867
    868	return header;
    869}
    870
    871static int qcom_smem_set_global_partition(struct qcom_smem *smem)
    872{
    873	struct smem_partition_header *header;
    874	struct smem_ptable_entry *entry;
    875	struct smem_ptable *ptable;
    876	bool found = false;
    877	int i;
    878
    879	if (smem->global_partition.virt_base) {
    880		dev_err(smem->dev, "Already found the global partition\n");
    881		return -EINVAL;
    882	}
    883
    884	ptable = qcom_smem_get_ptable(smem);
    885	if (IS_ERR(ptable))
    886		return PTR_ERR(ptable);
    887
    888	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
    889		entry = &ptable->entry[i];
    890		if (!le32_to_cpu(entry->offset))
    891			continue;
    892		if (!le32_to_cpu(entry->size))
    893			continue;
    894
    895		if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
    896			continue;
    897
    898		if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
    899			found = true;
    900			break;
    901		}
    902	}
    903
    904	if (!found) {
    905		dev_err(smem->dev, "Missing entry for global partition\n");
    906		return -EINVAL;
    907	}
    908
    909	header = qcom_smem_partition_header(smem, entry,
    910				SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
    911	if (!header)
    912		return -EINVAL;
    913
    914	smem->global_partition.virt_base = (void __iomem *)header;
    915	smem->global_partition.phys_base = smem->regions[0].aux_base +
    916								le32_to_cpu(entry->offset);
    917	smem->global_partition.size = le32_to_cpu(entry->size);
    918	smem->global_partition.cacheline = le32_to_cpu(entry->cacheline);
    919
    920	return 0;
    921}
    922
    923static int
    924qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
    925{
    926	struct smem_partition_header *header;
    927	struct smem_ptable_entry *entry;
    928	struct smem_ptable *ptable;
    929	u16 remote_host;
    930	u16 host0, host1;
    931	int i;
    932
    933	ptable = qcom_smem_get_ptable(smem);
    934	if (IS_ERR(ptable))
    935		return PTR_ERR(ptable);
    936
    937	for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
    938		entry = &ptable->entry[i];
    939		if (!le32_to_cpu(entry->offset))
    940			continue;
    941		if (!le32_to_cpu(entry->size))
    942			continue;
    943
    944		host0 = le16_to_cpu(entry->host0);
    945		host1 = le16_to_cpu(entry->host1);
    946		if (host0 == local_host)
    947			remote_host = host1;
    948		else if (host1 == local_host)
    949			remote_host = host0;
    950		else
    951			continue;
    952
    953		if (remote_host >= SMEM_HOST_COUNT) {
    954			dev_err(smem->dev, "bad host %u\n", remote_host);
    955			return -EINVAL;
    956		}
    957
    958		if (smem->partitions[remote_host].virt_base) {
    959			dev_err(smem->dev, "duplicate host %u\n", remote_host);
    960			return -EINVAL;
    961		}
    962
    963		header = qcom_smem_partition_header(smem, entry, host0, host1);
    964		if (!header)
    965			return -EINVAL;
    966
    967		smem->partitions[remote_host].virt_base = (void __iomem *)header;
    968		smem->partitions[remote_host].phys_base = smem->regions[0].aux_base +
    969										le32_to_cpu(entry->offset);
    970		smem->partitions[remote_host].size = le32_to_cpu(entry->size);
    971		smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline);
    972	}
    973
    974	return 0;
    975}
    976
    977static int qcom_smem_map_toc(struct qcom_smem *smem, struct smem_region *region)
    978{
    979	u32 ptable_start;
    980
    981	/* map starting 4K for smem header */
    982	region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K);
    983	ptable_start = region->aux_base + region->size - SZ_4K;
    984	/* map last 4k for toc */
    985	smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K);
    986
    987	if (!region->virt_base || !smem->ptable)
    988		return -ENOMEM;
    989
    990	return 0;
    991}
    992
    993static int qcom_smem_map_global(struct qcom_smem *smem, u32 size)
    994{
    995	u32 phys_addr;
    996
    997	phys_addr = smem->regions[0].aux_base;
    998
    999	smem->regions[0].size = size;
   1000	smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size);
   1001
   1002	if (!smem->regions[0].virt_base)
   1003		return -ENOMEM;
   1004
   1005	return 0;
   1006}
   1007
   1008static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
   1009				 struct smem_region *region)
   1010{
   1011	struct device *dev = smem->dev;
   1012	struct device_node *np;
   1013	struct resource r;
   1014	int ret;
   1015
   1016	np = of_parse_phandle(dev->of_node, name, 0);
   1017	if (!np) {
   1018		dev_err(dev, "No %s specified\n", name);
   1019		return -EINVAL;
   1020	}
   1021
   1022	ret = of_address_to_resource(np, 0, &r);
   1023	of_node_put(np);
   1024	if (ret)
   1025		return ret;
   1026
   1027	region->aux_base = r.start;
   1028	region->size = resource_size(&r);
   1029
   1030	return 0;
   1031}
   1032
   1033static int qcom_smem_probe(struct platform_device *pdev)
   1034{
   1035	struct smem_header *header;
   1036	struct reserved_mem *rmem;
   1037	struct qcom_smem *smem;
   1038	unsigned long flags;
   1039	size_t array_size;
   1040	int num_regions;
   1041	int hwlock_id;
   1042	u32 version;
   1043	u32 size;
   1044	int ret;
   1045	int i;
   1046
   1047	num_regions = 1;
   1048	if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
   1049		num_regions++;
   1050
   1051	array_size = num_regions * sizeof(struct smem_region);
   1052	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
   1053	if (!smem)
   1054		return -ENOMEM;
   1055
   1056	smem->dev = &pdev->dev;
   1057	smem->num_regions = num_regions;
   1058
   1059	rmem = of_reserved_mem_lookup(pdev->dev.of_node);
   1060	if (rmem) {
   1061		smem->regions[0].aux_base = rmem->base;
   1062		smem->regions[0].size = rmem->size;
   1063	} else {
   1064		/*
   1065		 * Fall back to the memory-region reference, if we're not a
   1066		 * reserved-memory node.
   1067		 */
   1068		ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
   1069		if (ret)
   1070			return ret;
   1071	}
   1072
   1073	if (num_regions > 1) {
   1074		ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
   1075		if (ret)
   1076			return ret;
   1077	}
   1078
   1079
   1080	ret = qcom_smem_map_toc(smem, &smem->regions[0]);
   1081	if (ret)
   1082		return ret;
   1083
   1084	for (i = 1; i < num_regions; i++) {
   1085		smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
   1086							     smem->regions[i].aux_base,
   1087							     smem->regions[i].size);
   1088		if (!smem->regions[i].virt_base) {
   1089			dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
   1090			return -ENOMEM;
   1091		}
   1092	}
   1093
   1094	header = smem->regions[0].virt_base;
   1095	if (le32_to_cpu(header->initialized) != 1 ||
   1096	    le32_to_cpu(header->reserved)) {
   1097		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
   1098		return -EINVAL;
   1099	}
   1100
   1101	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
   1102	if (hwlock_id < 0) {
   1103		if (hwlock_id != -EPROBE_DEFER)
   1104			dev_err(&pdev->dev, "failed to retrieve hwlock\n");
   1105		return hwlock_id;
   1106	}
   1107
   1108	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
   1109	if (!smem->hwlock)
   1110		return -ENXIO;
   1111
   1112	ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags);
   1113	if (ret)
   1114		return ret;
   1115	size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset);
   1116	hwspin_unlock_irqrestore(smem->hwlock, &flags);
   1117
   1118	version = qcom_smem_get_sbl_version(smem);
   1119	/*
   1120	 * smem header mapping is required only in heap version scheme, so unmap
   1121	 * it here. It will be remapped in qcom_smem_map_global() when whole
   1122	 * partition is mapped again.
   1123	 */
   1124	devm_iounmap(smem->dev, smem->regions[0].virt_base);
   1125	switch (version >> 16) {
   1126	case SMEM_GLOBAL_PART_VERSION:
   1127		ret = qcom_smem_set_global_partition(smem);
   1128		if (ret < 0)
   1129			return ret;
   1130		smem->item_count = qcom_smem_get_item_count(smem);
   1131		break;
   1132	case SMEM_GLOBAL_HEAP_VERSION:
   1133		qcom_smem_map_global(smem, size);
   1134		smem->item_count = SMEM_ITEM_COUNT;
   1135		break;
   1136	default:
   1137		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
   1138		return -EINVAL;
   1139	}
   1140
   1141	BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
   1142	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
   1143	if (ret < 0 && ret != -ENOENT)
   1144		return ret;
   1145
   1146	__smem = smem;
   1147
   1148	smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
   1149						      PLATFORM_DEVID_NONE, NULL,
   1150						      0);
   1151	if (IS_ERR(smem->socinfo))
   1152		dev_dbg(&pdev->dev, "failed to register socinfo device\n");
   1153
   1154	return 0;
   1155}
   1156
   1157static int qcom_smem_remove(struct platform_device *pdev)
   1158{
   1159	platform_device_unregister(__smem->socinfo);
   1160
   1161	hwspin_lock_free(__smem->hwlock);
   1162	__smem = NULL;
   1163
   1164	return 0;
   1165}
   1166
   1167static const struct of_device_id qcom_smem_of_match[] = {
   1168	{ .compatible = "qcom,smem" },
   1169	{}
   1170};
   1171MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
   1172
   1173static struct platform_driver qcom_smem_driver = {
   1174	.probe = qcom_smem_probe,
   1175	.remove = qcom_smem_remove,
   1176	.driver  = {
   1177		.name = "qcom-smem",
   1178		.of_match_table = qcom_smem_of_match,
   1179		.suppress_bind_attrs = true,
   1180	},
   1181};
   1182
   1183static int __init qcom_smem_init(void)
   1184{
   1185	return platform_driver_register(&qcom_smem_driver);
   1186}
   1187arch_initcall(qcom_smem_init);
   1188
   1189static void __exit qcom_smem_exit(void)
   1190{
   1191	platform_driver_unregister(&qcom_smem_driver);
   1192}
   1193module_exit(qcom_smem_exit)
   1194
   1195MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
   1196MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
   1197MODULE_LICENSE("GPL v2");