cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

thunderbolt.h (21379B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Thunderbolt service API
      4 *
      5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
      6 * Copyright (C) 2017, Intel Corporation
      7 * Authors: Michael Jamet <michael.jamet@intel.com>
      8 *          Mika Westerberg <mika.westerberg@linux.intel.com>
      9 */
     10
     11#ifndef THUNDERBOLT_H_
     12#define THUNDERBOLT_H_
     13
     14#include <linux/device.h>
     15#include <linux/idr.h>
     16#include <linux/list.h>
     17#include <linux/mutex.h>
     18#include <linux/mod_devicetable.h>
     19#include <linux/pci.h>
     20#include <linux/uuid.h>
     21#include <linux/workqueue.h>
     22
     23enum tb_cfg_pkg_type {
     24	TB_CFG_PKG_READ = 1,
     25	TB_CFG_PKG_WRITE = 2,
     26	TB_CFG_PKG_ERROR = 3,
     27	TB_CFG_PKG_NOTIFY_ACK = 4,
     28	TB_CFG_PKG_EVENT = 5,
     29	TB_CFG_PKG_XDOMAIN_REQ = 6,
     30	TB_CFG_PKG_XDOMAIN_RESP = 7,
     31	TB_CFG_PKG_OVERRIDE = 8,
     32	TB_CFG_PKG_RESET = 9,
     33	TB_CFG_PKG_ICM_EVENT = 10,
     34	TB_CFG_PKG_ICM_CMD = 11,
     35	TB_CFG_PKG_ICM_RESP = 12,
     36	TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
     37};
     38
     39/**
     40 * enum tb_security_level - Thunderbolt security level
     41 * @TB_SECURITY_NONE: No security, legacy mode
     42 * @TB_SECURITY_USER: User approval required at minimum
     43 * @TB_SECURITY_SECURE: One time saved key required at minimum
     44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
     45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
     46 *			 Thunderbolt dock (and Display Port). All PCIe
     47 *			 links downstream of the dock are removed.
     48 * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
     49 *			PCIe tunneling is disabled from the BIOS.
     50 */
     51enum tb_security_level {
     52	TB_SECURITY_NONE,
     53	TB_SECURITY_USER,
     54	TB_SECURITY_SECURE,
     55	TB_SECURITY_DPONLY,
     56	TB_SECURITY_USBONLY,
     57	TB_SECURITY_NOPCIE,
     58};
     59
     60/**
     61 * struct tb - main thunderbolt bus structure
     62 * @dev: Domain device
     63 * @lock: Big lock. Must be held when accessing any struct
     64 *	  tb_switch / struct tb_port.
     65 * @nhi: Pointer to the NHI structure
     66 * @ctl: Control channel for this domain
     67 * @wq: Ordered workqueue for all domain specific work
     68 * @root_switch: Root switch of this domain
     69 * @cm_ops: Connection manager specific operations vector
     70 * @index: Linux assigned domain number
     71 * @security_level: Current security level
     72 * @nboot_acl: Number of boot ACLs the domain supports
     73 * @privdata: Private connection manager specific data
     74 */
     75struct tb {
     76	struct device dev;
     77	struct mutex lock;
     78	struct tb_nhi *nhi;
     79	struct tb_ctl *ctl;
     80	struct workqueue_struct *wq;
     81	struct tb_switch *root_switch;
     82	const struct tb_cm_ops *cm_ops;
     83	int index;
     84	enum tb_security_level security_level;
     85	size_t nboot_acl;
     86	unsigned long privdata[];
     87};
     88
     89extern struct bus_type tb_bus_type;
     90extern struct device_type tb_service_type;
     91extern struct device_type tb_xdomain_type;
     92
     93#define TB_LINKS_PER_PHY_PORT	2
     94
     95static inline unsigned int tb_phy_port_from_link(unsigned int link)
     96{
     97	return (link - 1) / TB_LINKS_PER_PHY_PORT;
     98}
     99
    100/**
    101 * struct tb_property_dir - XDomain property directory
    102 * @uuid: Directory UUID or %NULL if root directory
    103 * @properties: List of properties in this directory
    104 *
    105 * User needs to provide serialization if needed.
    106 */
    107struct tb_property_dir {
    108	const uuid_t *uuid;
    109	struct list_head properties;
    110};
    111
    112enum tb_property_type {
    113	TB_PROPERTY_TYPE_UNKNOWN = 0x00,
    114	TB_PROPERTY_TYPE_DIRECTORY = 0x44,
    115	TB_PROPERTY_TYPE_DATA = 0x64,
    116	TB_PROPERTY_TYPE_TEXT = 0x74,
    117	TB_PROPERTY_TYPE_VALUE = 0x76,
    118};
    119
    120#define TB_PROPERTY_KEY_SIZE	8
    121
    122/**
    123 * struct tb_property - XDomain property
    124 * @list: Used to link properties together in a directory
    125 * @key: Key for the property (always terminated).
    126 * @type: Type of the property
    127 * @length: Length of the property data in dwords
    128 * @value: Property value
    129 *
    130 * Users use @type to determine which field in @value is filled.
    131 */
    132struct tb_property {
    133	struct list_head list;
    134	char key[TB_PROPERTY_KEY_SIZE + 1];
    135	enum tb_property_type type;
    136	size_t length;
    137	union {
    138		struct tb_property_dir *dir;
    139		u8 *data;
    140		char *text;
    141		u32 immediate;
    142	} value;
    143};
    144
    145struct tb_property_dir *tb_property_parse_dir(const u32 *block,
    146					      size_t block_len);
    147ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
    148			       size_t block_len);
    149struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
    150struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
    151void tb_property_free_dir(struct tb_property_dir *dir);
    152int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
    153			      u32 value);
    154int tb_property_add_data(struct tb_property_dir *parent, const char *key,
    155			 const void *buf, size_t buflen);
    156int tb_property_add_text(struct tb_property_dir *parent, const char *key,
    157			 const char *text);
    158int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
    159			struct tb_property_dir *dir);
    160void tb_property_remove(struct tb_property *tb_property);
    161struct tb_property *tb_property_find(struct tb_property_dir *dir,
    162			const char *key, enum tb_property_type type);
    163struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
    164					 struct tb_property *prev);
    165
    166#define tb_property_for_each(dir, property)			\
    167	for (property = tb_property_get_next(dir, NULL);	\
    168	     property;						\
    169	     property = tb_property_get_next(dir, property))
    170
    171int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
    172void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
    173
    174/**
    175 * struct tb_xdomain - Cross-domain (XDomain) connection
    176 * @dev: XDomain device
    177 * @tb: Pointer to the domain
    178 * @remote_uuid: UUID of the remote domain (host)
    179 * @local_uuid: Cached local UUID
    180 * @route: Route string the other domain can be reached
    181 * @vendor: Vendor ID of the remote domain
    182 * @device: Device ID of the demote domain
    183 * @local_max_hopid: Maximum input HopID of this host
    184 * @remote_max_hopid: Maximum input HopID of the remote host
    185 * @lock: Lock to serialize access to the following fields of this structure
    186 * @vendor_name: Name of the vendor (or %NULL if not known)
    187 * @device_name: Name of the device (or %NULL if not known)
    188 * @link_speed: Speed of the link in Gb/s
    189 * @link_width: Width of the link (1 or 2)
    190 * @is_unplugged: The XDomain is unplugged
    191 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
    192 *		queried first
    193 * @service_ids: Used to generate IDs for the services
    194 * @in_hopids: Input HopIDs for DMA tunneling
    195 * @out_hopids; Output HopIDs for DMA tunneling
    196 * @local_property_block: Local block of properties
    197 * @local_property_block_gen: Generation of @local_property_block
    198 * @local_property_block_len: Length of the @local_property_block in dwords
    199 * @remote_properties: Properties exported by the remote domain
    200 * @remote_property_block_gen: Generation of @remote_properties
    201 * @state: Next XDomain discovery state to run
    202 * @state_work: Work used to run the next state
    203 * @state_retries: Number of retries remain for the state
    204 * @properties_changed_work: Work used to notify the remote domain that
    205 *			     our properties have changed
    206 * @properties_changed_retries: Number of times left to send properties
    207 *				changed notification
    208 * @bonding_possible: True if lane bonding is possible on local side
    209 * @target_link_width: Target link width from the remote host
    210 * @link: Root switch link the remote domain is connected (ICM only)
    211 * @depth: Depth in the chain the remote domain is connected (ICM only)
    212 *
    213 * This structure represents connection across two domains (hosts).
    214 * Each XDomain contains zero or more services which are exposed as
    215 * &struct tb_service objects.
    216 *
    217 * Service drivers may access this structure if they need to enumerate
    218 * non-standard properties but they need hold @lock when doing so
    219 * because properties can be changed asynchronously in response to
    220 * changes in the remote domain.
    221 */
    222struct tb_xdomain {
    223	struct device dev;
    224	struct tb *tb;
    225	uuid_t *remote_uuid;
    226	const uuid_t *local_uuid;
    227	u64 route;
    228	u16 vendor;
    229	u16 device;
    230	unsigned int local_max_hopid;
    231	unsigned int remote_max_hopid;
    232	struct mutex lock;
    233	const char *vendor_name;
    234	const char *device_name;
    235	unsigned int link_speed;
    236	unsigned int link_width;
    237	bool is_unplugged;
    238	bool needs_uuid;
    239	struct ida service_ids;
    240	struct ida in_hopids;
    241	struct ida out_hopids;
    242	u32 *local_property_block;
    243	u32 local_property_block_gen;
    244	u32 local_property_block_len;
    245	struct tb_property_dir *remote_properties;
    246	u32 remote_property_block_gen;
    247	int state;
    248	struct delayed_work state_work;
    249	int state_retries;
    250	struct delayed_work properties_changed_work;
    251	int properties_changed_retries;
    252	bool bonding_possible;
    253	u8 target_link_width;
    254	u8 link;
    255	u8 depth;
    256};
    257
    258int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
    259void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
    260int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
    261void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
    262int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
    263void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
    264int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
    265			    int transmit_ring, int receive_path,
    266			    int receive_ring);
    267int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
    268			     int transmit_ring, int receive_path,
    269			     int receive_ring);
    270
    271static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
    272{
    273	return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
    274}
    275
    276struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
    277struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
    278
    279static inline struct tb_xdomain *
    280tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
    281{
    282	struct tb_xdomain *xd;
    283
    284	mutex_lock(&tb->lock);
    285	xd = tb_xdomain_find_by_uuid(tb, uuid);
    286	mutex_unlock(&tb->lock);
    287
    288	return xd;
    289}
    290
    291static inline struct tb_xdomain *
    292tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
    293{
    294	struct tb_xdomain *xd;
    295
    296	mutex_lock(&tb->lock);
    297	xd = tb_xdomain_find_by_route(tb, route);
    298	mutex_unlock(&tb->lock);
    299
    300	return xd;
    301}
    302
    303static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
    304{
    305	if (xd)
    306		get_device(&xd->dev);
    307	return xd;
    308}
    309
    310static inline void tb_xdomain_put(struct tb_xdomain *xd)
    311{
    312	if (xd)
    313		put_device(&xd->dev);
    314}
    315
    316static inline bool tb_is_xdomain(const struct device *dev)
    317{
    318	return dev->type == &tb_xdomain_type;
    319}
    320
    321static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
    322{
    323	if (tb_is_xdomain(dev))
    324		return container_of(dev, struct tb_xdomain, dev);
    325	return NULL;
    326}
    327
    328int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
    329			size_t size, enum tb_cfg_pkg_type type);
    330int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
    331		       size_t request_size, enum tb_cfg_pkg_type request_type,
    332		       void *response, size_t response_size,
    333		       enum tb_cfg_pkg_type response_type,
    334		       unsigned int timeout_msec);
    335
    336/**
    337 * tb_protocol_handler - Protocol specific handler
    338 * @uuid: XDomain messages with this UUID are dispatched to this handler
    339 * @callback: Callback called with the XDomain message. Returning %1
    340 *	      here tells the XDomain core that the message was handled
    341 *	      by this handler and should not be forwared to other
    342 *	      handlers.
    343 * @data: Data passed with the callback
    344 * @list: Handlers are linked using this
    345 *
    346 * Thunderbolt services can hook into incoming XDomain requests by
    347 * registering protocol handler. Only limitation is that the XDomain
    348 * discovery protocol UUID cannot be registered since it is handled by
    349 * the core XDomain code.
    350 *
    351 * The @callback must check that the message is really directed to the
    352 * service the driver implements.
    353 */
    354struct tb_protocol_handler {
    355	const uuid_t *uuid;
    356	int (*callback)(const void *buf, size_t size, void *data);
    357	void *data;
    358	struct list_head list;
    359};
    360
    361int tb_register_protocol_handler(struct tb_protocol_handler *handler);
    362void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
    363
    364/**
    365 * struct tb_service - Thunderbolt service
    366 * @dev: XDomain device
    367 * @id: ID of the service (shown in sysfs)
    368 * @key: Protocol key from the properties directory
    369 * @prtcid: Protocol ID from the properties directory
    370 * @prtcvers: Protocol version from the properties directory
    371 * @prtcrevs: Protocol software revision from the properties directory
    372 * @prtcstns: Protocol settings mask from the properties directory
    373 * @debugfs_dir: Pointer to the service debugfs directory. Always created
    374 *		 when debugfs is enabled. Can be used by service drivers to
    375 *		 add their own entries under the service.
    376 *
    377 * Each domain exposes set of services it supports as collection of
    378 * properties. For each service there will be one corresponding
    379 * &struct tb_service. Service drivers are bound to these.
    380 */
    381struct tb_service {
    382	struct device dev;
    383	int id;
    384	const char *key;
    385	u32 prtcid;
    386	u32 prtcvers;
    387	u32 prtcrevs;
    388	u32 prtcstns;
    389	struct dentry *debugfs_dir;
    390};
    391
    392static inline struct tb_service *tb_service_get(struct tb_service *svc)
    393{
    394	if (svc)
    395		get_device(&svc->dev);
    396	return svc;
    397}
    398
    399static inline void tb_service_put(struct tb_service *svc)
    400{
    401	if (svc)
    402		put_device(&svc->dev);
    403}
    404
    405static inline bool tb_is_service(const struct device *dev)
    406{
    407	return dev->type == &tb_service_type;
    408}
    409
    410static inline struct tb_service *tb_to_service(struct device *dev)
    411{
    412	if (tb_is_service(dev))
    413		return container_of(dev, struct tb_service, dev);
    414	return NULL;
    415}
    416
    417/**
    418 * tb_service_driver - Thunderbolt service driver
    419 * @driver: Driver structure
    420 * @probe: Called when the driver is probed
    421 * @remove: Called when the driver is removed (optional)
    422 * @shutdown: Called at shutdown time to stop the service (optional)
    423 * @id_table: Table of service identifiers the driver supports
    424 */
    425struct tb_service_driver {
    426	struct device_driver driver;
    427	int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
    428	void (*remove)(struct tb_service *svc);
    429	void (*shutdown)(struct tb_service *svc);
    430	const struct tb_service_id *id_table;
    431};
    432
    433#define TB_SERVICE(key, id)				\
    434	.match_flags = TBSVC_MATCH_PROTOCOL_KEY |	\
    435		       TBSVC_MATCH_PROTOCOL_ID,		\
    436	.protocol_key = (key),				\
    437	.protocol_id = (id)
    438
    439int tb_register_service_driver(struct tb_service_driver *drv);
    440void tb_unregister_service_driver(struct tb_service_driver *drv);
    441
    442static inline void *tb_service_get_drvdata(const struct tb_service *svc)
    443{
    444	return dev_get_drvdata(&svc->dev);
    445}
    446
    447static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
    448{
    449	dev_set_drvdata(&svc->dev, data);
    450}
    451
    452static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
    453{
    454	return tb_to_xdomain(svc->dev.parent);
    455}
    456
    457/**
    458 * struct tb_nhi - thunderbolt native host interface
    459 * @lock: Must be held during ring creation/destruction. Is acquired by
    460 *	  interrupt_work when dispatching interrupts to individual rings.
    461 * @pdev: Pointer to the PCI device
    462 * @ops: NHI specific optional ops
    463 * @iobase: MMIO space of the NHI
    464 * @tx_rings: All Tx rings available on this host controller
    465 * @rx_rings: All Rx rings available on this host controller
    466 * @msix_ida: Used to allocate MSI-X vectors for rings
    467 * @going_away: The host controller device is about to disappear so when
    468 *		this flag is set, avoid touching the hardware anymore.
    469 * @iommu_dma_protection: An IOMMU will isolate external-facing ports.
    470 * @interrupt_work: Work scheduled to handle ring interrupt when no
    471 *		    MSI-X is used.
    472 * @hop_count: Number of rings (end point hops) supported by NHI.
    473 * @quirks: NHI specific quirks if any
    474 */
    475struct tb_nhi {
    476	spinlock_t lock;
    477	struct pci_dev *pdev;
    478	const struct tb_nhi_ops *ops;
    479	void __iomem *iobase;
    480	struct tb_ring **tx_rings;
    481	struct tb_ring **rx_rings;
    482	struct ida msix_ida;
    483	bool going_away;
    484	bool iommu_dma_protection;
    485	struct work_struct interrupt_work;
    486	u32 hop_count;
    487	unsigned long quirks;
    488};
    489
    490/**
    491 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
    492 * @lock: Lock serializing actions to this ring. Must be acquired after
    493 *	  nhi->lock.
    494 * @nhi: Pointer to the native host controller interface
    495 * @size: Size of the ring
    496 * @hop: Hop (DMA channel) associated with this ring
    497 * @head: Head of the ring (write next descriptor here)
    498 * @tail: Tail of the ring (complete next descriptor here)
    499 * @descriptors: Allocated descriptors for this ring
    500 * @queue: Queue holding frames to be transferred over this ring
    501 * @in_flight: Queue holding frames that are currently in flight
    502 * @work: Interrupt work structure
    503 * @is_tx: Is the ring Tx or Rx
    504 * @running: Is the ring running
    505 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
    506 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
    507 * @flags: Ring specific flags
    508 * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
    509 *		RX ring. For TX ring this should be set to %0.
    510 * @sof_mask: Bit mask used to detect start of frame PDF
    511 * @eof_mask: Bit mask used to detect end of frame PDF
    512 * @start_poll: Called when ring interrupt is triggered to start
    513 *		polling. Passing %NULL keeps the ring in interrupt mode.
    514 * @poll_data: Data passed to @start_poll
    515 */
    516struct tb_ring {
    517	spinlock_t lock;
    518	struct tb_nhi *nhi;
    519	int size;
    520	int hop;
    521	int head;
    522	int tail;
    523	struct ring_desc *descriptors;
    524	dma_addr_t descriptors_dma;
    525	struct list_head queue;
    526	struct list_head in_flight;
    527	struct work_struct work;
    528	bool is_tx:1;
    529	bool running:1;
    530	int irq;
    531	u8 vector;
    532	unsigned int flags;
    533	int e2e_tx_hop;
    534	u16 sof_mask;
    535	u16 eof_mask;
    536	void (*start_poll)(void *data);
    537	void *poll_data;
    538};
    539
    540/* Leave ring interrupt enabled on suspend */
    541#define RING_FLAG_NO_SUSPEND	BIT(0)
    542/* Configure the ring to be in frame mode */
    543#define RING_FLAG_FRAME		BIT(1)
    544/* Enable end-to-end flow control */
    545#define RING_FLAG_E2E		BIT(2)
    546
    547struct ring_frame;
    548typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
    549
    550/**
    551 * enum ring_desc_flags - Flags for DMA ring descriptor
    552 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
    553 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
    554 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
    555 * %RING_DESC_POSTED: Always set this
    556 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
    557 * %RING_DESC_INTERRUPT: Request an interrupt on completion
    558 */
    559enum ring_desc_flags {
    560	RING_DESC_ISOCH = 0x1,
    561	RING_DESC_CRC_ERROR = 0x1,
    562	RING_DESC_COMPLETED = 0x2,
    563	RING_DESC_POSTED = 0x4,
    564	RING_DESC_BUFFER_OVERRUN = 0x04,
    565	RING_DESC_INTERRUPT = 0x8,
    566};
    567
    568/**
    569 * struct ring_frame - For use with ring_rx/ring_tx
    570 * @buffer_phy: DMA mapped address of the frame
    571 * @callback: Callback called when the frame is finished (optional)
    572 * @list: Frame is linked to a queue using this
    573 * @size: Size of the frame in bytes (%0 means %4096)
    574 * @flags: Flags for the frame (see &enum ring_desc_flags)
    575 * @eof: End of frame protocol defined field
    576 * @sof: Start of frame protocol defined field
    577 */
    578struct ring_frame {
    579	dma_addr_t buffer_phy;
    580	ring_cb callback;
    581	struct list_head list;
    582	u32 size:12;
    583	u32 flags:12;
    584	u32 eof:4;
    585	u32 sof:4;
    586};
    587
    588/* Minimum size for ring_rx */
    589#define TB_FRAME_SIZE		0x100
    590
    591struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
    592				 unsigned int flags);
    593struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
    594				 unsigned int flags, int e2e_tx_hop,
    595				 u16 sof_mask, u16 eof_mask,
    596				 void (*start_poll)(void *), void *poll_data);
    597void tb_ring_start(struct tb_ring *ring);
    598void tb_ring_stop(struct tb_ring *ring);
    599void tb_ring_free(struct tb_ring *ring);
    600
    601int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
    602
    603/**
    604 * tb_ring_rx() - enqueue a frame on an RX ring
    605 * @ring: Ring to enqueue the frame
    606 * @frame: Frame to enqueue
    607 *
    608 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
    609 * contain at least %TB_FRAME_SIZE bytes.
    610 *
    611 * @frame->callback will be invoked with @frame->size, @frame->flags,
    612 * @frame->eof, @frame->sof set once the frame has been received.
    613 *
    614 * If ring_stop() is called after the packet has been enqueued
    615 * @frame->callback will be called with canceled set to true.
    616 *
    617 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
    618 */
    619static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
    620{
    621	WARN_ON(ring->is_tx);
    622	return __tb_ring_enqueue(ring, frame);
    623}
    624
    625/**
    626 * tb_ring_tx() - enqueue a frame on an TX ring
    627 * @ring: Ring the enqueue the frame
    628 * @frame: Frame to enqueue
    629 *
    630 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
    631 * @frame->sof have to be set.
    632 *
    633 * @frame->callback will be invoked with once the frame has been transmitted.
    634 *
    635 * If ring_stop() is called after the packet has been enqueued @frame->callback
    636 * will be called with canceled set to true.
    637 *
    638 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
    639 */
    640static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
    641{
    642	WARN_ON(!ring->is_tx);
    643	return __tb_ring_enqueue(ring, frame);
    644}
    645
    646/* Used only when the ring is in polling mode */
    647struct ring_frame *tb_ring_poll(struct tb_ring *ring);
    648void tb_ring_poll_complete(struct tb_ring *ring);
    649
    650/**
    651 * tb_ring_dma_device() - Return device used for DMA mapping
    652 * @ring: Ring whose DMA device is retrieved
    653 *
    654 * Use this function when you are mapping DMA for buffers that are
    655 * passed to the ring for sending/receiving.
    656 */
    657static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
    658{
    659	return &ring->nhi->pdev->dev;
    660}
    661
    662#endif /* THUNDERBOLT_H_ */