cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ipa_cmd.c (21584B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
      4 * Copyright (C) 2019-2021 Linaro Ltd.
      5 */
      6
      7#include <linux/types.h>
      8#include <linux/device.h>
      9#include <linux/slab.h>
     10#include <linux/bitfield.h>
     11#include <linux/dma-direction.h>
     12
     13#include "gsi.h"
     14#include "gsi_trans.h"
     15#include "ipa.h"
     16#include "ipa_endpoint.h"
     17#include "ipa_table.h"
     18#include "ipa_cmd.h"
     19#include "ipa_mem.h"
     20
     21/**
     22 * DOC:  IPA Immediate Commands
     23 *
     24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
     25 * An immediate command is generally used to request the IPA do something
     26 * other than data transfer to another endpoint.
     27 *
     28 * Immediate commands are represented by GSI transactions just like other
     29 * transfer requests, and use a single GSI TRE.  Each immediate command
     30 * has a well-defined format, having a payload of a known length.  This
     31 * allows the transfer element's length field to be used to hold an
     32 * immediate command's opcode.  The payload for a command resides in AP
     33 * memory and is described by a single scatterlist entry in its transaction.
     34 * Commands do not require a transaction completion callback, and are
     35 * (currently) always issued using gsi_trans_commit_wait().
     36 */
     37
     38/* Some commands can wait until indicated pipeline stages are clear */
     39enum pipeline_clear_options {
     40	pipeline_clear_hps		= 0x0,
     41	pipeline_clear_src_grp		= 0x1,
     42	pipeline_clear_full		= 0x2,
     43};
     44
     45/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
     46
     47struct ipa_cmd_hw_ip_fltrt_init {
     48	__le64 hash_rules_addr;
     49	__le64 flags;
     50	__le64 nhash_rules_addr;
     51};
     52
     53/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
     54#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK			GENMASK_ULL(11, 0)
     55#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK			GENMASK_ULL(27, 12)
     56#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK			GENMASK_ULL(39, 28)
     57#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK			GENMASK_ULL(55, 40)
     58
     59/* IPA_CMD_HDR_INIT_LOCAL */
     60
     61struct ipa_cmd_hw_hdr_init_local {
     62	__le64 hdr_table_addr;
     63	__le32 flags;
     64	__le32 reserved;
     65};
     66
     67/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
     68#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK		GENMASK(11, 0)
     69#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK		GENMASK(27, 12)
     70
     71/* IPA_CMD_REGISTER_WRITE */
     72
     73/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
     74#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
     75#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
     76
     77struct ipa_cmd_register_write {
     78	__le16 flags;		/* Unused/reserved prior to IPA v4.0 */
     79	__le16 offset;
     80	__le32 value;
     81	__le32 value_mask;
     82	__le32 clear_options;	/* Unused/reserved for IPA v4.0+ */
     83};
     84
     85/* Field masks for ipa_cmd_register_write structure fields */
     86/* The next field is present for IPA v4.0+ */
     87#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK		GENMASK(14, 11)
     88/* The next field is not present for IPA v4.0+ */
     89#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK		GENMASK(15, 15)
     90
     91/* The next field and its values are not present for IPA v4.0+ */
     92#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK		GENMASK(1, 0)
     93
     94/* IPA_CMD_IP_PACKET_INIT */
     95
     96struct ipa_cmd_ip_packet_init {
     97	u8 dest_endpoint;
     98	u8 reserved[7];
     99};
    100
    101/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
    102#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK		GENMASK(4, 0)
    103
    104/* IPA_CMD_DMA_SHARED_MEM */
    105
    106/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
    107
    108#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
    109#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
    110
    111struct ipa_cmd_hw_dma_mem_mem {
    112	__le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
    113	__le16 size;
    114	__le16 local_addr;
    115	__le16 flags;
    116	__le64 system_addr;
    117};
    118
    119/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
    120#define DMA_SHARED_MEM_CLEAR_AFTER_READ			GENMASK(15, 15)
    121
    122/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
    123#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK		GENMASK(0, 0)
    124/* The next two fields are not present for IPA v4.0+ */
    125#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK		GENMASK(1, 1)
    126#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK	GENMASK(3, 2)
    127
    128/* IPA_CMD_IP_PACKET_TAG_STATUS */
    129
    130struct ipa_cmd_ip_packet_tag_status {
    131	__le64 tag;
    132};
    133
    134#define IP_PACKET_TAG_STATUS_TAG_FMASK			GENMASK_ULL(63, 16)
    135
    136/* Immediate command payload */
    137union ipa_cmd_payload {
    138	struct ipa_cmd_hw_ip_fltrt_init table_init;
    139	struct ipa_cmd_hw_hdr_init_local hdr_init_local;
    140	struct ipa_cmd_register_write register_write;
    141	struct ipa_cmd_ip_packet_init ip_packet_init;
    142	struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
    143	struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
    144};
    145
    146static void ipa_cmd_validate_build(void)
    147{
    148	/* The sizes of a filter and route tables need to fit into fields
    149	 * in the ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
    150	 * might not be used, non-hashed and hashed tables have the same
    151	 * maximum size.  IPv4 and IPv6 filter tables have the same number
    152	 * of entries, as and IPv4 and IPv6 route tables have the same number
    153	 * of entries.
    154	 */
    155#define TABLE_SIZE	(TABLE_COUNT_MAX * sizeof(__le64))
    156#define TABLE_COUNT_MAX	max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
    157	BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
    158	BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
    159#undef TABLE_COUNT_MAX
    160#undef TABLE_SIZE
    161
    162	/* Hashed and non-hashed fields are assumed to be the same size */
    163	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
    164		     field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
    165	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
    166		     field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
    167
    168	/* Valid endpoint numbers must fit in the IP packet init command */
    169	BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
    170		     IPA_ENDPOINT_MAX - 1);
    171}
    172
    173/* Validate a memory region holding a table */
    174bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
    175{
    176	u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
    177	u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
    178	const char *table = route ? "route" : "filter";
    179	struct device *dev = &ipa->pdev->dev;
    180
    181	/* Size must fit in the immediate command field that holds it */
    182	if (mem->size > size_max) {
    183		dev_err(dev, "%s table region size too large\n", table);
    184		dev_err(dev, "    (0x%04x > 0x%04x)\n",
    185			mem->size, size_max);
    186
    187		return false;
    188	}
    189
    190	/* Offset must fit in the immediate command field that holds it */
    191	if (mem->offset > offset_max ||
    192	    ipa->mem_offset > offset_max - mem->offset) {
    193		dev_err(dev, "%s table region offset too large\n", table);
    194		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
    195			ipa->mem_offset, mem->offset, offset_max);
    196
    197		return false;
    198	}
    199
    200	/* Entire memory range must fit within IPA-local memory */
    201	if (mem->offset > ipa->mem_size ||
    202	    mem->size > ipa->mem_size - mem->offset) {
    203		dev_err(dev, "%s table region out of range\n", table);
    204		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
    205			mem->offset, mem->size, ipa->mem_size);
    206
    207		return false;
    208	}
    209
    210	return true;
    211}
    212
    213/* Validate the memory region that holds headers */
    214static bool ipa_cmd_header_valid(struct ipa *ipa)
    215{
    216	struct device *dev = &ipa->pdev->dev;
    217	const struct ipa_mem *mem;
    218	u32 offset_max;
    219	u32 size_max;
    220	u32 offset;
    221	u32 size;
    222
    223	/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
    224	 * the header table memory area in an immediate command.  Make sure
    225	 * the offset and size fit in the fields that need to hold them, and
    226	 * that the entire range is within the overall IPA memory range.
    227	 */
    228	offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
    229	size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
    230
    231	/* The header memory area contains both the modem and AP header
    232	 * regions.  The modem portion defines the address of the region.
    233	 */
    234	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
    235	offset = mem->offset;
    236	size = mem->size;
    237
    238	/* Make sure the offset fits in the IPA command */
    239	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
    240		dev_err(dev, "header table region offset too large\n");
    241		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
    242			ipa->mem_offset, offset, offset_max);
    243
    244		return false;
    245	}
    246
    247	/* Add the size of the AP portion (if defined) to the combined size */
    248	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
    249	if (mem)
    250		size += mem->size;
    251
    252	/* Make sure the combined size fits in the IPA command */
    253	if (size > size_max) {
    254		dev_err(dev, "header table region size too large\n");
    255		dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
    256
    257		return false;
    258	}
    259
    260	/* Make sure the entire combined area fits in IPA memory */
    261	if (size > ipa->mem_size || offset > ipa->mem_size - size) {
    262		dev_err(dev, "header table region out of range\n");
    263		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
    264			offset, size, ipa->mem_size);
    265
    266		return false;
    267	}
    268
    269	return true;
    270}
    271
    272/* Indicate whether an offset can be used with a register_write command */
    273static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
    274						const char *name, u32 offset)
    275{
    276	struct ipa_cmd_register_write *payload;
    277	struct device *dev = &ipa->pdev->dev;
    278	u32 offset_max;
    279	u32 bit_count;
    280
    281	/* The maximum offset in a register_write immediate command depends
    282	 * on the version of IPA.  A 16 bit offset is always supported,
    283	 * but starting with IPA v4.0 some additional high-order bits are
    284	 * allowed.
    285	 */
    286	bit_count = BITS_PER_BYTE * sizeof(payload->offset);
    287	if (ipa->version >= IPA_VERSION_4_0)
    288		bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
    289	BUILD_BUG_ON(bit_count > 32);
    290	offset_max = ~0U >> (32 - bit_count);
    291
    292	/* Make sure the offset can be represented by the field(s)
    293	 * that holds it.  Also make sure the offset is not outside
    294	 * the overall IPA memory range.
    295	 */
    296	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
    297		dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
    298			name, ipa->mem_offset, offset, offset_max);
    299		return false;
    300	}
    301
    302	return true;
    303}
    304
    305/* Check whether offsets passed to register_write are valid */
    306static bool ipa_cmd_register_write_valid(struct ipa *ipa)
    307{
    308	const char *name;
    309	u32 offset;
    310
    311	/* If hashed tables are supported, ensure the hash flush register
    312	 * offset will fit in a register write IPA immediate command.
    313	 */
    314	if (ipa_table_hash_support(ipa)) {
    315		offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
    316		name = "filter/route hash flush";
    317		if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
    318			return false;
    319	}
    320
    321	/* Each endpoint can have a status endpoint associated with it,
    322	 * and this is recorded in an endpoint register.  If the modem
    323	 * crashes, we reset the status endpoint for all modem endpoints
    324	 * using a register write IPA immediate command.  Make sure the
    325	 * worst case (highest endpoint number) offset of that endpoint
    326	 * fits in the register write command field(s) that must hold it.
    327	 */
    328	offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
    329	name = "maximal endpoint status";
    330	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
    331		return false;
    332
    333	return true;
    334}
    335
    336bool ipa_cmd_data_valid(struct ipa *ipa)
    337{
    338	if (!ipa_cmd_header_valid(ipa))
    339		return false;
    340
    341	if (!ipa_cmd_register_write_valid(ipa))
    342		return false;
    343
    344	return true;
    345}
    346
    347
    348int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
    349{
    350	struct gsi_trans_info *trans_info = &channel->trans_info;
    351	struct device *dev = channel->gsi->dev;
    352
    353	/* This is as good a place as any to validate build constants */
    354	ipa_cmd_validate_build();
    355
    356	/* Even though command payloads are allocated one at a time,
    357	 * a single transaction can require up to tlv_count of them,
    358	 * so we treat them as if that many can be allocated at once.
    359	 */
    360	return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
    361				       sizeof(union ipa_cmd_payload),
    362				       tre_max, channel->tlv_count);
    363}
    364
    365void ipa_cmd_pool_exit(struct gsi_channel *channel)
    366{
    367	struct gsi_trans_info *trans_info = &channel->trans_info;
    368	struct device *dev = channel->gsi->dev;
    369
    370	gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
    371}
    372
    373static union ipa_cmd_payload *
    374ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
    375{
    376	struct gsi_trans_info *trans_info;
    377	struct ipa_endpoint *endpoint;
    378
    379	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
    380	trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
    381
    382	return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
    383}
    384
    385/* If hash_size is 0, hash_offset and hash_addr ignored. */
    386void ipa_cmd_table_init_add(struct gsi_trans *trans,
    387			    enum ipa_cmd_opcode opcode, u16 size, u32 offset,
    388			    dma_addr_t addr, u16 hash_size, u32 hash_offset,
    389			    dma_addr_t hash_addr)
    390{
    391	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    392	struct ipa_cmd_hw_ip_fltrt_init *payload;
    393	union ipa_cmd_payload *cmd_payload;
    394	dma_addr_t payload_addr;
    395	u64 val;
    396
    397	/* Record the non-hash table offset and size */
    398	offset += ipa->mem_offset;
    399	val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
    400	val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
    401
    402	/* The hash table offset and address are zero if its size is 0 */
    403	if (hash_size) {
    404		/* Record the hash table offset and size */
    405		hash_offset += ipa->mem_offset;
    406		val |= u64_encode_bits(hash_offset,
    407				       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
    408		val |= u64_encode_bits(hash_size,
    409				       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
    410	}
    411
    412	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    413	payload = &cmd_payload->table_init;
    414
    415	/* Fill in all offsets and sizes and the non-hash table address */
    416	if (hash_size)
    417		payload->hash_rules_addr = cpu_to_le64(hash_addr);
    418	payload->flags = cpu_to_le64(val);
    419	payload->nhash_rules_addr = cpu_to_le64(addr);
    420
    421	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    422			  opcode);
    423}
    424
    425/* Initialize header space in IPA-local memory */
    426void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
    427				dma_addr_t addr)
    428{
    429	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    430	enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
    431	struct ipa_cmd_hw_hdr_init_local *payload;
    432	union ipa_cmd_payload *cmd_payload;
    433	dma_addr_t payload_addr;
    434	u32 flags;
    435
    436	offset += ipa->mem_offset;
    437
    438	/* With this command we tell the IPA where in its local memory the
    439	 * header tables reside.  The content of the buffer provided is
    440	 * also written via DMA into that space.  The IPA hardware owns
    441	 * the table, but the AP must initialize it.
    442	 */
    443	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    444	payload = &cmd_payload->hdr_init_local;
    445
    446	payload->hdr_table_addr = cpu_to_le64(addr);
    447	flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
    448	flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
    449	payload->flags = cpu_to_le32(flags);
    450
    451	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    452			  opcode);
    453}
    454
    455void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
    456				u32 mask, bool clear_full)
    457{
    458	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    459	struct ipa_cmd_register_write *payload;
    460	union ipa_cmd_payload *cmd_payload;
    461	u32 opcode = IPA_CMD_REGISTER_WRITE;
    462	dma_addr_t payload_addr;
    463	u32 clear_option;
    464	u32 options;
    465	u16 flags;
    466
    467	/* pipeline_clear_src_grp is not used */
    468	clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
    469
    470	/* IPA v4.0+ represents the pipeline clear options in the opcode.  It
    471	 * also supports a larger offset by encoding additional high-order
    472	 * bits in the payload flags field.
    473	 */
    474	if (ipa->version >= IPA_VERSION_4_0) {
    475		u16 offset_high;
    476		u32 val;
    477
    478		/* Opcode encodes pipeline clear options */
    479		/* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
    480		val = u16_encode_bits(clear_option,
    481				      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
    482		opcode |= val;
    483
    484		/* Extract the high 4 bits from the offset */
    485		offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
    486		offset &= (1 << 16) - 1;
    487
    488		/* Extract the top 4 bits and encode it into the flags field */
    489		flags = u16_encode_bits(offset_high,
    490				REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
    491		options = 0;	/* reserved */
    492
    493	} else {
    494		flags = 0;	/* SKIP_CLEAR flag is always 0 */
    495		options = u16_encode_bits(clear_option,
    496					  REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
    497	}
    498
    499	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    500	payload = &cmd_payload->register_write;
    501
    502	payload->flags = cpu_to_le16(flags);
    503	payload->offset = cpu_to_le16((u16)offset);
    504	payload->value = cpu_to_le32(value);
    505	payload->value_mask = cpu_to_le32(mask);
    506	payload->clear_options = cpu_to_le32(options);
    507
    508	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    509			  opcode);
    510}
    511
    512/* Skip IP packet processing on the next data transfer on a TX channel */
    513static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
    514{
    515	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    516	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
    517	struct ipa_cmd_ip_packet_init *payload;
    518	union ipa_cmd_payload *cmd_payload;
    519	dma_addr_t payload_addr;
    520
    521	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    522	payload = &cmd_payload->ip_packet_init;
    523
    524	payload->dest_endpoint = u8_encode_bits(endpoint_id,
    525					IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
    526
    527	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    528			  opcode);
    529}
    530
    531/* Use a DMA command to read or write a block of IPA-resident memory */
    532void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
    533				dma_addr_t addr, bool toward_ipa)
    534{
    535	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    536	enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
    537	struct ipa_cmd_hw_dma_mem_mem *payload;
    538	union ipa_cmd_payload *cmd_payload;
    539	dma_addr_t payload_addr;
    540	u16 flags;
    541
    542	/* size and offset must fit in 16 bit fields */
    543	WARN_ON(!size);
    544	WARN_ON(size > U16_MAX);
    545	WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
    546
    547	offset += ipa->mem_offset;
    548
    549	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    550	payload = &cmd_payload->dma_shared_mem;
    551
    552	/* payload->clear_after_read was reserved prior to IPA v4.0.  It's
    553	 * never needed for current code, so it's 0 regardless of version.
    554	 */
    555	payload->size = cpu_to_le16(size);
    556	payload->local_addr = cpu_to_le16(offset);
    557	/* payload->flags:
    558	 *   direction:		0 = write to IPA, 1 read from IPA
    559	 * Starting at v4.0 these are reserved; either way, all zero:
    560	 *   pipeline clear:	0 = wait for pipeline clear (don't skip)
    561	 *   clear_options:	0 = pipeline_clear_hps
    562	 * Instead, for v4.0+ these are encoded in the opcode.  But again
    563	 * since both values are 0 we won't bother OR'ing them in.
    564	 */
    565	flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
    566	payload->flags = cpu_to_le16(flags);
    567	payload->system_addr = cpu_to_le64(addr);
    568
    569	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    570			  opcode);
    571}
    572
    573static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
    574{
    575	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    576	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
    577	struct ipa_cmd_ip_packet_tag_status *payload;
    578	union ipa_cmd_payload *cmd_payload;
    579	dma_addr_t payload_addr;
    580
    581	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    582	payload = &cmd_payload->ip_packet_tag_status;
    583
    584	payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
    585
    586	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    587			  opcode);
    588}
    589
    590/* Issue a small command TX data transfer */
    591static void ipa_cmd_transfer_add(struct gsi_trans *trans)
    592{
    593	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    594	enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
    595	union ipa_cmd_payload *payload;
    596	dma_addr_t payload_addr;
    597
    598	/* Just transfer a zero-filled payload structure */
    599	payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
    600
    601	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
    602			  opcode);
    603}
    604
    605/* Add immediate commands to a transaction to clear the hardware pipeline */
    606void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
    607{
    608	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
    609	struct ipa_endpoint *endpoint;
    610
    611	/* This will complete when the transfer is received */
    612	reinit_completion(&ipa->completion);
    613
    614	/* Issue a no-op register write command (mask 0 means no write) */
    615	ipa_cmd_register_write_add(trans, 0, 0, 0, true);
    616
    617	/* Send a data packet through the IPA pipeline.  The packet_init
    618	 * command says to send the next packet directly to the exception
    619	 * endpoint without any other IPA processing.  The tag_status
    620	 * command requests that status be generated on completion of
    621	 * that transfer, and that it will be tagged with a value.
    622	 * Finally, the transfer command sends a small packet of data
    623	 * (instead of a command) using the command endpoint.
    624	 */
    625	endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
    626	ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
    627	ipa_cmd_ip_tag_status_add(trans);
    628	ipa_cmd_transfer_add(trans);
    629}
    630
    631/* Returns the number of commands required to clear the pipeline */
    632u32 ipa_cmd_pipeline_clear_count(void)
    633{
    634	return 4;
    635}
    636
    637void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
    638{
    639	wait_for_completion(&ipa->completion);
    640}
    641
    642/* Allocate a transaction for the command TX endpoint */
    643struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
    644{
    645	struct ipa_endpoint *endpoint;
    646
    647	if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
    648		return NULL;
    649
    650	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
    651
    652	return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
    653				       tre_count, DMA_NONE);
    654}