cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

host1x.h (12519B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
      4 */
      5
      6#ifndef __LINUX_HOST1X_H
      7#define __LINUX_HOST1X_H
      8
      9#include <linux/device.h>
     10#include <linux/dma-direction.h>
     11#include <linux/spinlock.h>
     12#include <linux/types.h>
     13
     14enum host1x_class {
     15	HOST1X_CLASS_HOST1X = 0x1,
     16	HOST1X_CLASS_GR2D = 0x51,
     17	HOST1X_CLASS_GR2D_SB = 0x52,
     18	HOST1X_CLASS_VIC = 0x5D,
     19	HOST1X_CLASS_GR3D = 0x60,
     20	HOST1X_CLASS_NVDEC = 0xF0,
     21	HOST1X_CLASS_NVDEC1 = 0xF5,
     22};
     23
     24struct host1x;
     25struct host1x_client;
     26struct iommu_group;
     27
     28u64 host1x_get_dma_mask(struct host1x *host1x);
     29
     30/**
     31 * struct host1x_bo_cache - host1x buffer object cache
     32 * @mappings: list of mappings
     33 * @lock: synchronizes accesses to the list of mappings
     34 *
     35 * Note that entries are not periodically evicted from this cache and instead need to be
     36 * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
     37 * released when the last reference to a buffer object represented by a mapping in this
     38 * cache is dropped.
     39 */
     40struct host1x_bo_cache {
     41	struct list_head mappings;
     42	struct mutex lock;
     43};
     44
     45static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
     46{
     47	INIT_LIST_HEAD(&cache->mappings);
     48	mutex_init(&cache->lock);
     49}
     50
     51static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
     52{
     53	/* XXX warn if not empty? */
     54	mutex_destroy(&cache->lock);
     55}
     56
     57/**
     58 * struct host1x_client_ops - host1x client operations
     59 * @early_init: host1x client early initialization code
     60 * @init: host1x client initialization code
     61 * @exit: host1x client tear down code
     62 * @late_exit: host1x client late tear down code
     63 * @suspend: host1x client suspend code
     64 * @resume: host1x client resume code
     65 */
     66struct host1x_client_ops {
     67	int (*early_init)(struct host1x_client *client);
     68	int (*init)(struct host1x_client *client);
     69	int (*exit)(struct host1x_client *client);
     70	int (*late_exit)(struct host1x_client *client);
     71	int (*suspend)(struct host1x_client *client);
     72	int (*resume)(struct host1x_client *client);
     73};
     74
     75/**
     76 * struct host1x_client - host1x client structure
     77 * @list: list node for the host1x client
     78 * @host: pointer to struct device representing the host1x controller
     79 * @dev: pointer to struct device backing this host1x client
     80 * @group: IOMMU group that this client is a member of
     81 * @ops: host1x client operations
     82 * @class: host1x class represented by this client
     83 * @channel: host1x channel associated with this client
     84 * @syncpts: array of syncpoints requested for this client
     85 * @num_syncpts: number of syncpoints requested for this client
     86 * @parent: pointer to parent structure
     87 * @usecount: reference count for this structure
     88 * @lock: mutex for mutually exclusive concurrency
     89 * @cache: host1x buffer object cache
     90 */
     91struct host1x_client {
     92	struct list_head list;
     93	struct device *host;
     94	struct device *dev;
     95	struct iommu_group *group;
     96
     97	const struct host1x_client_ops *ops;
     98
     99	enum host1x_class class;
    100	struct host1x_channel *channel;
    101
    102	struct host1x_syncpt **syncpts;
    103	unsigned int num_syncpts;
    104
    105	struct host1x_client *parent;
    106	unsigned int usecount;
    107	struct mutex lock;
    108
    109	struct host1x_bo_cache cache;
    110};
    111
    112/*
    113 * host1x buffer objects
    114 */
    115
    116struct host1x_bo;
    117struct sg_table;
    118
    119struct host1x_bo_mapping {
    120	struct kref ref;
    121	struct dma_buf_attachment *attach;
    122	enum dma_data_direction direction;
    123	struct list_head list;
    124	struct host1x_bo *bo;
    125	struct sg_table *sgt;
    126	unsigned int chunks;
    127	struct device *dev;
    128	dma_addr_t phys;
    129	size_t size;
    130
    131	struct host1x_bo_cache *cache;
    132	struct list_head entry;
    133};
    134
    135static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
    136{
    137	return container_of(ref, struct host1x_bo_mapping, ref);
    138}
    139
    140struct host1x_bo_ops {
    141	struct host1x_bo *(*get)(struct host1x_bo *bo);
    142	void (*put)(struct host1x_bo *bo);
    143	struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
    144					 enum dma_data_direction dir);
    145	void (*unpin)(struct host1x_bo_mapping *map);
    146	void *(*mmap)(struct host1x_bo *bo);
    147	void (*munmap)(struct host1x_bo *bo, void *addr);
    148};
    149
    150struct host1x_bo {
    151	const struct host1x_bo_ops *ops;
    152	struct list_head mappings;
    153	spinlock_t lock;
    154};
    155
    156static inline void host1x_bo_init(struct host1x_bo *bo,
    157				  const struct host1x_bo_ops *ops)
    158{
    159	INIT_LIST_HEAD(&bo->mappings);
    160	spin_lock_init(&bo->lock);
    161	bo->ops = ops;
    162}
    163
    164static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
    165{
    166	return bo->ops->get(bo);
    167}
    168
    169static inline void host1x_bo_put(struct host1x_bo *bo)
    170{
    171	bo->ops->put(bo);
    172}
    173
    174struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
    175					enum dma_data_direction dir,
    176					struct host1x_bo_cache *cache);
    177void host1x_bo_unpin(struct host1x_bo_mapping *map);
    178
    179static inline void *host1x_bo_mmap(struct host1x_bo *bo)
    180{
    181	return bo->ops->mmap(bo);
    182}
    183
    184static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
    185{
    186	bo->ops->munmap(bo, addr);
    187}
    188
    189/*
    190 * host1x syncpoints
    191 */
    192
    193#define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
    194#define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
    195
    196struct host1x_syncpt_base;
    197struct host1x_syncpt;
    198struct host1x;
    199
    200struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
    201struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
    202struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
    203u32 host1x_syncpt_id(struct host1x_syncpt *sp);
    204u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
    205u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
    206u32 host1x_syncpt_read(struct host1x_syncpt *sp);
    207int host1x_syncpt_incr(struct host1x_syncpt *sp);
    208u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
    209int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
    210		       u32 *value);
    211struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
    212					    unsigned long flags);
    213void host1x_syncpt_put(struct host1x_syncpt *sp);
    214struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
    215					  unsigned long flags,
    216					  const char *name);
    217
    218struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
    219u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
    220
    221void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
    222					      u32 syncpt_id);
    223
    224struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
    225
    226/*
    227 * host1x channel
    228 */
    229
    230struct host1x_channel;
    231struct host1x_job;
    232
    233struct host1x_channel *host1x_channel_request(struct host1x_client *client);
    234struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
    235void host1x_channel_stop(struct host1x_channel *channel);
    236void host1x_channel_put(struct host1x_channel *channel);
    237int host1x_job_submit(struct host1x_job *job);
    238
    239/*
    240 * host1x job
    241 */
    242
    243#define HOST1X_RELOC_READ	(1 << 0)
    244#define HOST1X_RELOC_WRITE	(1 << 1)
    245
    246struct host1x_reloc {
    247	struct {
    248		struct host1x_bo *bo;
    249		unsigned long offset;
    250	} cmdbuf;
    251	struct {
    252		struct host1x_bo *bo;
    253		unsigned long offset;
    254	} target;
    255	unsigned long shift;
    256	unsigned long flags;
    257};
    258
    259struct host1x_job {
    260	/* When refcount goes to zero, job can be freed */
    261	struct kref ref;
    262
    263	/* List entry */
    264	struct list_head list;
    265
    266	/* Channel where job is submitted to */
    267	struct host1x_channel *channel;
    268
    269	/* client where the job originated */
    270	struct host1x_client *client;
    271
    272	/* Gathers and their memory */
    273	struct host1x_job_cmd *cmds;
    274	unsigned int num_cmds;
    275
    276	/* Array of handles to be pinned & unpinned */
    277	struct host1x_reloc *relocs;
    278	unsigned int num_relocs;
    279	struct host1x_job_unpin_data *unpins;
    280	unsigned int num_unpins;
    281
    282	dma_addr_t *addr_phys;
    283	dma_addr_t *gather_addr_phys;
    284	dma_addr_t *reloc_addr_phys;
    285
    286	/* Sync point id, number of increments and end related to the submit */
    287	struct host1x_syncpt *syncpt;
    288	u32 syncpt_incrs;
    289	u32 syncpt_end;
    290
    291	/* Completion waiter ref */
    292	void *waiter;
    293
    294	/* Maximum time to wait for this job */
    295	unsigned int timeout;
    296
    297	/* Job has timed out and should be released */
    298	bool cancelled;
    299
    300	/* Index and number of slots used in the push buffer */
    301	unsigned int first_get;
    302	unsigned int num_slots;
    303
    304	/* Copy of gathers */
    305	size_t gather_copy_size;
    306	dma_addr_t gather_copy;
    307	u8 *gather_copy_mapped;
    308
    309	/* Check if register is marked as an address reg */
    310	int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
    311
    312	/* Check if class belongs to the unit */
    313	int (*is_valid_class)(u32 class);
    314
    315	/* Request a SETCLASS to this class */
    316	u32 class;
    317
    318	/* Add a channel wait for previous ops to complete */
    319	bool serialize;
    320
    321	/* Fast-forward syncpoint increments on job timeout */
    322	bool syncpt_recovery;
    323
    324	/* Callback called when job is freed */
    325	void (*release)(struct host1x_job *job);
    326	void *user_data;
    327
    328	/* Whether host1x-side firewall should be ran for this job or not */
    329	bool enable_firewall;
    330};
    331
    332struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
    333				    u32 num_cmdbufs, u32 num_relocs,
    334				    bool skip_firewall);
    335void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
    336			   unsigned int words, unsigned int offset);
    337void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
    338			 bool relative, u32 next_class);
    339struct host1x_job *host1x_job_get(struct host1x_job *job);
    340void host1x_job_put(struct host1x_job *job);
    341int host1x_job_pin(struct host1x_job *job, struct device *dev);
    342void host1x_job_unpin(struct host1x_job *job);
    343
    344/*
    345 * subdevice probe infrastructure
    346 */
    347
    348struct host1x_device;
    349
    350/**
    351 * struct host1x_driver - host1x logical device driver
    352 * @driver: core driver
    353 * @subdevs: table of OF device IDs matching subdevices for this driver
    354 * @list: list node for the driver
    355 * @probe: called when the host1x logical device is probed
    356 * @remove: called when the host1x logical device is removed
    357 * @shutdown: called when the host1x logical device is shut down
    358 */
    359struct host1x_driver {
    360	struct device_driver driver;
    361
    362	const struct of_device_id *subdevs;
    363	struct list_head list;
    364
    365	int (*probe)(struct host1x_device *device);
    366	int (*remove)(struct host1x_device *device);
    367	void (*shutdown)(struct host1x_device *device);
    368};
    369
    370static inline struct host1x_driver *
    371to_host1x_driver(struct device_driver *driver)
    372{
    373	return container_of(driver, struct host1x_driver, driver);
    374}
    375
    376int host1x_driver_register_full(struct host1x_driver *driver,
    377				struct module *owner);
    378void host1x_driver_unregister(struct host1x_driver *driver);
    379
    380#define host1x_driver_register(driver) \
    381	host1x_driver_register_full(driver, THIS_MODULE)
    382
    383struct host1x_device {
    384	struct host1x_driver *driver;
    385	struct list_head list;
    386	struct device dev;
    387
    388	struct mutex subdevs_lock;
    389	struct list_head subdevs;
    390	struct list_head active;
    391
    392	struct mutex clients_lock;
    393	struct list_head clients;
    394
    395	bool registered;
    396
    397	struct device_dma_parameters dma_parms;
    398};
    399
    400static inline struct host1x_device *to_host1x_device(struct device *dev)
    401{
    402	return container_of(dev, struct host1x_device, dev);
    403}
    404
    405int host1x_device_init(struct host1x_device *device);
    406int host1x_device_exit(struct host1x_device *device);
    407
    408void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
    409void host1x_client_exit(struct host1x_client *client);
    410
    411#define host1x_client_init(client)			\
    412	({						\
    413		static struct lock_class_key __key;	\
    414		__host1x_client_init(client, &__key);	\
    415	})
    416
    417int __host1x_client_register(struct host1x_client *client);
    418
    419/*
    420 * Note that this wrapper calls __host1x_client_init() for compatibility
    421 * with existing callers. Callers that want to separately initialize and
    422 * register a host1x client must first initialize using either of the
    423 * __host1x_client_init() or host1x_client_init() functions and then use
    424 * the low-level __host1x_client_register() function to avoid the client
    425 * getting reinitialized.
    426 */
    427#define host1x_client_register(client)			\
    428	({						\
    429		static struct lock_class_key __key;	\
    430		__host1x_client_init(client, &__key);	\
    431		__host1x_client_register(client);	\
    432	})
    433
    434int host1x_client_unregister(struct host1x_client *client);
    435
    436int host1x_client_suspend(struct host1x_client *client);
    437int host1x_client_resume(struct host1x_client *client);
    438
    439struct tegra_mipi_device;
    440
    441struct tegra_mipi_device *tegra_mipi_request(struct device *device,
    442					     struct device_node *np);
    443void tegra_mipi_free(struct tegra_mipi_device *device);
    444int tegra_mipi_enable(struct tegra_mipi_device *device);
    445int tegra_mipi_disable(struct tegra_mipi_device *device);
    446int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
    447int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
    448
    449#endif