From ea9da4e4608104108c6d5eca7b178cec2720ab22 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Apr 2015 10:35:08 +0100 Subject: drm/i915: Allow disabling the destination colorkey for overlay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes userspace wants a true overlay that is never clipped. In such cases, we need to disable the destination colorkey. However, it is currently unconditionally enabled in the overlay with no means of disabling. So rectify that by always default to on, and extending the UPDATE_ATTR ioctl to support explicit disabling of the colorkey. This is contrast to the spite code which requires explicit enabling of either the destination or source colorkey. Handling source colorkey is still todo for the overlay. (Of course it may be worth migrating overlay to sprite before then.) Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- include/uapi/drm/i915_drm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 551b6737f5df..4851d660243c 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -996,6 +996,7 @@ struct drm_intel_overlay_put_image { /* flags */ #define I915_OVERLAY_UPDATE_ATTRS (1<<0) #define I915_OVERLAY_UPDATE_GAMMA (1<<1) +#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) struct drm_intel_overlay_attrs { __u32 flags; __u32 color_key; -- cgit v1.2.3-71-gd317 From e2f5d2ea479b9b2619965d43db70939589afe43a Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 22 May 2015 13:34:51 +0100 Subject: drm/mode: Add user blob-creation ioctl Add an ioctl which allows users to create blob properties from supplied data. Currently this only supports modes, creating a drm_display_mode from the userspace drm_mode_modeinfo. v2: Removed size/type checks. Rebased on new patches to allow error propagation from create_blob, as well as avoiding double-allocation. Signed-off-by: Daniel Stone Reviewed-by: Maarten Lankhorst Tested-by: Sean Paul Signed-off-by: Daniel Vetter --- drivers/gpu/drm/drm_crtc.c | 139 +++++++++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/drm_fops.c | 5 +- drivers/gpu/drm/drm_ioctl.c | 2 + include/drm/drmP.h | 4 ++ include/drm/drm_crtc.h | 9 ++- include/uapi/drm/drm.h | 2 + include/uapi/drm/drm_mode.h | 20 +++++++ 7 files changed, 176 insertions(+), 5 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f661589b1dea..e548c50edc94 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4173,6 +4173,9 @@ drm_property_create_blob(struct drm_device *dev, size_t length, if (!blob) return ERR_PTR(-ENOMEM); + /* This must be explicitly initialised, so we can safely call list_del + * on it in the removal handler, even if it isn't in a file list. */ + INIT_LIST_HEAD(&blob->head_file); blob->length = length; blob->dev = dev; @@ -4190,7 +4193,8 @@ drm_property_create_blob(struct drm_device *dev, size_t length, kref_init(&blob->refcount); - list_add_tail(&blob->head, &dev->mode_config.property_blob_list); + list_add_tail(&blob->head_global, + &dev->mode_config.property_blob_list); mutex_unlock(&dev->mode_config.blob_lock); @@ -4212,7 +4216,8 @@ static void drm_property_free_blob(struct kref *kref) WARN_ON(!mutex_is_locked(&blob->dev->mode_config.blob_lock)); - list_del(&blob->head); + list_del(&blob->head_global); + list_del(&blob->head_file); drm_mode_object_put(blob->dev, &blob->base); kfree(blob); @@ -4263,6 +4268,26 @@ static void drm_property_unreference_blob_locked(struct drm_property_blob *blob) kref_put(&blob->refcount, drm_property_free_blob); } +/** + * drm_property_destroy_user_blobs - destroy all blobs created by this client + * @dev: DRM device + * @file_priv: destroy all blobs owned by this file handle + */ +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv) +{ + struct drm_property_blob *blob, *bt; + + mutex_lock(&dev->mode_config.blob_lock); + + list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) { + list_del_init(&blob->head_file); + drm_property_unreference_blob_locked(blob); + } + + mutex_unlock(&dev->mode_config.blob_lock); +} + /** * drm_property_reference_blob - Take a reference on an existing property * @@ -4452,6 +4477,114 @@ done: return ret; } +/** + * drm_mode_createblob_ioctl - create a new blob property + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * This function creates a new blob property with user-defined values. In order + * to give us sensible validation and checking when creating, rather than at + * every potential use, we also require a type to be provided upfront. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_create_blob *out_resp = data; + struct drm_property_blob *blob; + void __user *blob_ptr; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + blob = drm_property_create_blob(dev, out_resp->length, NULL); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + blob_ptr = (void __user *)(unsigned long)out_resp->data; + if (copy_from_user(blob->data, blob_ptr, out_resp->length)) { + ret = -EFAULT; + goto out_blob; + } + + /* Dropping the lock between create_blob and our access here is safe + * as only the same file_priv can remove the blob; at this point, it is + * not associated with any file_priv. */ + mutex_lock(&dev->mode_config.blob_lock); + out_resp->blob_id = blob->base.id; + list_add_tail(&file_priv->blobs, &blob->head_file); + mutex_unlock(&dev->mode_config.blob_lock); + + return 0; + +out_blob: + drm_property_unreference_blob(blob); + return ret; +} + +/** + * drm_mode_destroyblob_ioctl - destroy a user blob property + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Destroy an existing user-defined blob property. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_destroy_blob *out_resp = data; + struct drm_property_blob *blob = NULL, *bt; + bool found = false; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + mutex_lock(&dev->mode_config.blob_lock); + blob = __drm_property_lookup_blob(dev, out_resp->blob_id); + if (!blob) { + ret = -ENOENT; + goto err; + } + + /* Ensure the property was actually created by this user. */ + list_for_each_entry(bt, &file_priv->blobs, head_file) { + if (bt == blob) { + found = true; + break; + } + } + + if (!found) { + ret = -EPERM; + goto err; + } + + /* We must drop head_file here, because we may not be the last + * reference on the blob. */ + list_del_init(&blob->head_file); + drm_property_unreference_blob_locked(blob); + mutex_unlock(&dev->mode_config.blob_lock); + + return 0; + +err: + mutex_unlock(&dev->mode_config.blob_lock); + return ret; +} + /** * drm_mode_connector_set_path_property - set tile property on connector * @connector: connector to set property on. @@ -5655,7 +5788,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) } list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, - head) { + head_global) { drm_property_unreference_blob(blob); } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 0f6a5c8801e3..c59ce4d0ef75 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -167,6 +167,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) INIT_LIST_HEAD(&priv->lhead); INIT_LIST_HEAD(&priv->fbs); mutex_init(&priv->fbs_lock); + INIT_LIST_HEAD(&priv->blobs); INIT_LIST_HEAD(&priv->event_list); init_waitqueue_head(&priv->event_wait); priv->event_space = 4096; /* set aside 4k for event buffer */ @@ -405,8 +406,10 @@ int drm_release(struct inode *inode, struct file *filp) drm_events_release(file_priv); - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_fb_release(file_priv); + drm_property_destroy_user_blobs(dev, file_priv); + } if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file_priv); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 266dcd6cdf3b..9bac1b7479af 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -641,6 +641,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/include/drm/drmP.h b/include/drm/drmP.h index df6d9970d9a4..9fa6366f47c2 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -326,6 +326,10 @@ struct drm_file { struct list_head fbs; struct mutex fbs_lock; + /** User-created blob properties; this retains a reference on the + * property. */ + struct list_head blobs; + wait_queue_head_t event_wait; struct list_head event_list; int event_space; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index dace1b635685..72b60dbe0891 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -218,7 +218,8 @@ struct drm_property_blob { struct drm_mode_object base; struct drm_device *dev; struct kref refcount; - struct list_head head; + struct list_head head_global; + struct list_head head_file; size_t length; unsigned char data[]; }; @@ -1315,6 +1316,8 @@ extern const char *drm_get_dvi_i_select_name(int val); extern const char *drm_get_tv_subconnector_name(int val); extern const char *drm_get_tv_select_name(int val); extern void drm_fb_release(struct drm_file *file_priv); +extern void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); extern void drm_mode_group_destroy(struct drm_mode_group *group); extern void drm_reinit_primary_mode_group(struct drm_device *dev); @@ -1460,6 +1463,10 @@ extern int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getencoder(struct drm_device *dev, diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index ff6ef62d084b..3801584a0c53 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -786,6 +786,8 @@ struct drm_prime_handle { #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) #define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) +#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) +#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) /** * Device specific ioctls should only be in their respective headers diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index dbeba949462a..359107ab629e 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -558,4 +558,24 @@ struct drm_mode_atomic { __u64 user_data; }; +/** + * Create a new 'blob' data property, copying length bytes from data pointer, + * and returning new blob ID. + */ +struct drm_mode_create_blob { + /** Pointer to data to copy. */ + __u64 data; + /** Length of data to copy. */ + __u32 length; + /** Return: new property ID. */ + __u32 blob_id; +}; + +/** + * Destroy a user-created blob property. + */ +struct drm_mode_destroy_blob { + __u32 blob_id; +}; + #endif -- cgit v1.2.3-71-gd317 From 72b9076b2887add930d3b102760f09d02ffbfbe7 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Wed, 29 Apr 2015 19:40:33 +0200 Subject: drm/radeon: add a GPU reset counter queryable by userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Userspace will be able to tell whether a GPU reset occured by comparing an old referece value of the counter with a new value. Reviewed-by: Christian König Signed-off-by: Marek Olšák Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_device.c | 2 ++ drivers/gpu/drm/radeon/radeon_drv.c | 3 ++- drivers/gpu/drm/radeon/radeon_kms.c | 3 +++ include/uapi/drm/radeon_drm.h | 1 + 5 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 46eb0fa75a61..352870cbb8b8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2435,6 +2435,7 @@ struct radeon_device { atomic64_t vram_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic_t gpu_reset_counter; /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b7ca4c514621..13e207e0dff0 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1725,6 +1725,8 @@ int radeon_gpu_reset(struct radeon_device *rdev) return 0; } + atomic_inc(&rdev->gpu_reset_counter); + radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 7d620d4b3f31..5751446677d3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -90,9 +90,10 @@ * CS to GPU on >= r600 * 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support * 2.42.0 - Add VCE/VUI (Video Usability Information) support + * 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 42 +#define KMS_DRIVER_MINOR 43 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 7b2a7335cc5d..9632e886ddc3 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (radeon_get_allowed_info_register(rdev, *value, value)) return -EINVAL; break; + case RADEON_INFO_GPU_RESET_COUNTER: + *value = atomic_read(&rdev->gpu_reset_counter); + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 871e73f99a4d..573cb86a3d6e 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -1038,6 +1038,7 @@ struct drm_radeon_cs { #define RADEON_INFO_CURRENT_GPU_SCLK 0x22 #define RADEON_INFO_CURRENT_GPU_MCLK 0x23 #define RADEON_INFO_READ_REG 0x24 +#define RADEON_INFO_GPU_RESET_COUNTER 0x25 struct drm_radeon_info { uint32_t request; -- cgit v1.2.3-71-gd317 From 21631f10ea08a9551eb32651448baad5ef64de6c Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Tue, 26 May 2015 14:57:19 +0100 Subject: drm/i915: Fix the confusing comment about the ioctl limits It was reported that this comment was confusing, and indeed it is. v2: (one year later!) Add the range for the DRM_I915_* iotcl defines (Daniel) Signed-off-by: Damien Lespiau Signed-off-by: Daniel Vetter --- include/uapi/drm/i915_drm.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 4851d660243c..6e1a2ed116cb 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -171,8 +171,12 @@ typedef struct _drm_i915_sarea { #define I915_BOX_TEXTURE_LOAD 0x8 #define I915_BOX_LOST_CONTEXT 0x10 -/* I915 specific ioctls - * The device specific ioctl range is 0x40 to 0x79. +/* + * i915 specific ioctls. + * + * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie + * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset + * against DRM_COMMAND_BASE and should be between [0x0, 0x60). */ #define DRM_I915_INIT 0x00 #define DRM_I915_FLUSH 0x01 -- cgit v1.2.3-71-gd317 From 81629cba1f12683f2a312e6fb41a3aa662f99f89 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Apr 2015 16:42:01 -0400 Subject: drm/amdgpu: add amdgpu uapi header (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This header defines the ioctl interface to the driver. v2: remove stale tiling defines v3: add appropriate padding v4: remove executable bits on header Acked-by: Christian König Acked-by: Jammy Zhou Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 590 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 590 insertions(+) create mode 100644 include/uapi/drm/amdgpu_drm.h (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h new file mode 100644 index 000000000000..9e771fb858b4 --- /dev/null +++ b/include/uapi/drm/amdgpu_drm.h @@ -0,0 +1,590 @@ +/* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- + * + * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Fremont, California. + * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Kevin E. Martin + * Gareth Hughes + * Keith Whitwell + */ + +#ifndef __AMDGPU_DRM_H__ +#define __AMDGPU_DRM_H__ + +#include + +#define DRM_AMDGPU_GEM_CREATE 0x00 +#define DRM_AMDGPU_GEM_MMAP 0x01 +#define DRM_AMDGPU_CTX 0x02 +#define DRM_AMDGPU_BO_LIST 0x03 +#define DRM_AMDGPU_CS 0x04 +#define DRM_AMDGPU_INFO 0x05 +#define DRM_AMDGPU_GEM_METADATA 0x06 +#define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 +#define DRM_AMDGPU_GEM_VA 0x08 +#define DRM_AMDGPU_WAIT_CS 0x09 +#define DRM_AMDGPU_GEM_OP 0x10 +#define DRM_AMDGPU_GEM_USERPTR 0x11 + +#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) +#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) +#define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) +#define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) +#define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) +#define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) +#define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) +#define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) +#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va) +#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) +#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) +#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) + +#define AMDGPU_GEM_DOMAIN_CPU 0x1 +#define AMDGPU_GEM_DOMAIN_GTT 0x2 +#define AMDGPU_GEM_DOMAIN_VRAM 0x4 +#define AMDGPU_GEM_DOMAIN_GDS 0x8 +#define AMDGPU_GEM_DOMAIN_GWS 0x10 +#define AMDGPU_GEM_DOMAIN_OA 0x20 + +#define AMDGPU_GEM_DOMAIN_MASK 0x3F + +/* Flag that CPU access will be required for the case of VRAM domain */ +#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) +/* Flag that CPU access will not work, this VRAM domain is invisible */ +#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) +/* Flag that un-cached attributes should be used for GTT */ +#define AMDGPU_GEM_CREATE_CPU_GTT_UC (1 << 2) +/* Flag that USWC attributes should be used for GTT */ +#define AMDGPU_GEM_CREATE_CPU_GTT_WC (1 << 3) + +/* Flag mask for GTT domain_flags */ +#define AMDGPU_GEM_CREATE_CPU_GTT_MASK \ + (AMDGPU_GEM_CREATE_CPU_GTT_WC | \ + AMDGPU_GEM_CREATE_CPU_GTT_UC | \ + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \ + AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + +struct drm_amdgpu_gem_create_in { + /** the requested memory size */ + uint64_t bo_size; + /** physical start_addr alignment in bytes for some HW requirements */ + uint64_t alignment; + /** the requested memory domains */ + uint64_t domains; + /** allocation flags */ + uint64_t domain_flags; +}; + +struct drm_amdgpu_gem_create_out { + /** returned GEM object handle */ + uint32_t handle; + uint32_t _pad; +}; + +union drm_amdgpu_gem_create { + struct drm_amdgpu_gem_create_in in; + struct drm_amdgpu_gem_create_out out; +}; + +/** Opcode to create new residency list. */ +#define AMDGPU_BO_LIST_OP_CREATE 0 +/** Opcode to destroy previously created residency list */ +#define AMDGPU_BO_LIST_OP_DESTROY 1 +/** Opcode to update resource information in the list */ +#define AMDGPU_BO_LIST_OP_UPDATE 2 + +struct drm_amdgpu_bo_list_in { + /** Type of operation */ + uint32_t operation; + /** Handle of list or 0 if we want to create one */ + uint32_t list_handle; + /** Number of BOs in list */ + uint32_t bo_number; + /** Size of each element describing BO */ + uint32_t bo_info_size; + /** Pointer to array describing BOs */ + uint64_t bo_info_ptr; +}; + +struct drm_amdgpu_bo_list_entry { + /** Handle of BO */ + uint32_t bo_handle; + /** New (if specified) BO priority to be used during migration */ + uint32_t bo_priority; +}; + +struct drm_amdgpu_bo_list_out { + /** Handle of resource list */ + uint32_t list_handle; + uint32_t _pad; +}; + +union drm_amdgpu_bo_list { + struct drm_amdgpu_bo_list_in in; + struct drm_amdgpu_bo_list_out out; +}; + +/* context related */ +#define AMDGPU_CTX_OP_ALLOC_CTX 1 +#define AMDGPU_CTX_OP_FREE_CTX 2 +#define AMDGPU_CTX_OP_QUERY_STATE 3 + +#define AMDGPU_CTX_OP_STATE_RUNNING 1 + +struct drm_amdgpu_ctx_in { + uint32_t op; + uint32_t flags; + uint32_t ctx_id; + uint32_t _pad; +}; + +union drm_amdgpu_ctx_out { + struct { + uint32_t ctx_id; + uint32_t _pad; + } alloc; + + struct { + uint64_t flags; + uint64_t hangs; + } state; +}; + +union drm_amdgpu_ctx { + struct drm_amdgpu_ctx_in in; + union drm_amdgpu_ctx_out out; +}; + +/* + * This is not a reliable API and you should expect it to fail for any + * number of reasons and have fallback path that do not use userptr to + * perform any operation. + */ +#define AMDGPU_GEM_USERPTR_READONLY (1 << 0) +#define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) +#define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) +#define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) + +struct drm_amdgpu_gem_userptr { + uint64_t addr; + uint64_t size; + uint32_t flags; + uint32_t handle; +}; + +#define AMDGPU_TILING_MACRO 0x1 +#define AMDGPU_TILING_MICRO 0x2 +#define AMDGPU_TILING_SWAP_16BIT 0x4 +#define AMDGPU_TILING_R600_NO_SCANOUT AMDGPU_TILING_SWAP_16BIT +#define AMDGPU_TILING_SWAP_32BIT 0x8 +/* this object requires a surface when mapped - i.e. front buffer */ +#define AMDGPU_TILING_SURFACE 0x10 +#define AMDGPU_TILING_MICRO_SQUARE 0x20 +#define AMDGPU_TILING_EG_BANKW_SHIFT 8 +#define AMDGPU_TILING_EG_BANKW_MASK 0xf +#define AMDGPU_TILING_EG_BANKH_SHIFT 12 +#define AMDGPU_TILING_EG_BANKH_MASK 0xf +#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 +#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf +#define AMDGPU_TILING_EG_TILE_SPLIT_SHIFT 24 +#define AMDGPU_TILING_EG_TILE_SPLIT_MASK 0xf +#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 +#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf + +#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 +#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 + +/** The same structure is shared for input/output */ +struct drm_amdgpu_gem_metadata { + uint32_t handle; /* GEM Object handle */ + uint32_t op; /** Do we want get or set metadata */ + struct { + uint64_t flags; + uint64_t tiling_info; /* family specific tiling info */ + uint32_t data_size_bytes; + uint32_t data[64]; + } data; +}; + +struct drm_amdgpu_gem_mmap_in { + uint32_t handle; /** the GEM object handle */ + uint32_t _pad; +}; + +struct drm_amdgpu_gem_mmap_out { + uint64_t addr_ptr; /** mmap offset from the vma offset manager */ +}; + +union drm_amdgpu_gem_mmap { + struct drm_amdgpu_gem_mmap_in in; + struct drm_amdgpu_gem_mmap_out out; +}; + +struct drm_amdgpu_gem_wait_idle_in { + uint32_t handle; /* GEM object handle */ + uint32_t flags; + uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */ +}; + +struct drm_amdgpu_gem_wait_idle_out { + uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */ + uint32_t domain; /* Returned current memory domain */ +}; + +union drm_amdgpu_gem_wait_idle { + struct drm_amdgpu_gem_wait_idle_in in; + struct drm_amdgpu_gem_wait_idle_out out; +}; + +struct drm_amdgpu_wait_cs_in { + uint64_t handle; + uint64_t timeout; + uint32_t ip_type; + uint32_t ip_instance; + uint32_t ring; + uint32_t _pad; +}; + +struct drm_amdgpu_wait_cs_out { + uint64_t status; +}; + +union drm_amdgpu_wait_cs { + struct drm_amdgpu_wait_cs_in in; + struct drm_amdgpu_wait_cs_out out; +}; + +/* Sets or returns a value associated with a buffer. */ +struct drm_amdgpu_gem_op { + uint32_t handle; /* buffer */ + uint32_t op; /* AMDGPU_GEM_OP_* */ + uint64_t value; /* input or return value */ +}; + +#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 +#define AMDGPU_GEM_OP_SET_INITIAL_DOMAIN 1 + +#define AMDGPU_VA_OP_MAP 1 +#define AMDGPU_VA_OP_UNMAP 2 + +#define AMDGPU_VA_RESULT_OK 0 +#define AMDGPU_VA_RESULT_ERROR 1 +#define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2 + +/* Mapping flags */ +/* readable mapping */ +#define AMDGPU_VM_PAGE_READABLE (1 << 1) +/* writable mapping */ +#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) +/* executable mapping, new for VI */ +#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) + +struct drm_amdgpu_gem_va_in { + /* GEM object handle */ + uint32_t handle; + uint32_t _pad; + /* map or unmap*/ + uint32_t operation; + /* specify mapping flags */ + uint32_t flags; + /* va address to assign . Must be correctly aligned.*/ + uint64_t va_address; + /* Specify offset inside of BO to assign. Must be correctly aligned.*/ + uint64_t offset_in_bo; + /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/ + /* Must be correctly aligned. */ + uint64_t map_size; +}; + +struct drm_amdgpu_gem_va_out { + uint32_t result; + uint32_t _pad; +}; + +union drm_amdgpu_gem_va { + struct drm_amdgpu_gem_va_in in; + struct drm_amdgpu_gem_va_out out; +}; + +#define AMDGPU_HW_IP_GFX 0 +#define AMDGPU_HW_IP_COMPUTE 1 +#define AMDGPU_HW_IP_DMA 2 +#define AMDGPU_HW_IP_UVD 3 +#define AMDGPU_HW_IP_VCE 4 +#define AMDGPU_HW_IP_NUM 5 + +#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 + +#define AMDGPU_CHUNK_ID_IB 0x01 +#define AMDGPU_CHUNK_ID_FENCE 0x02 +struct drm_amdgpu_cs_chunk { + uint32_t chunk_id; + uint32_t length_dw; + uint64_t chunk_data; +}; + +struct drm_amdgpu_cs_in { + /** Rendering context id */ + uint32_t ctx_id; + /** Handle of resource list associated with CS */ + uint32_t bo_list_handle; + uint32_t num_chunks; + uint32_t _pad; + /* this points to uint64_t * which point to cs chunks */ + uint64_t chunks; +}; + +struct drm_amdgpu_cs_out { + uint64_t handle; +}; + +union drm_amdgpu_cs { + struct drm_amdgpu_cs_in in; + struct drm_amdgpu_cs_out out; +}; + +/* Specify flags to be used for IB */ + +/* This IB should be submitted to CE */ +#define AMDGPU_IB_FLAG_CE (1<<0) + +/* GDS is used by this IB */ +#define AMDGPU_IB_FLAG_GDS (1<<1) + +struct drm_amdgpu_cs_chunk_ib { + /** + * Handle of GEM object to be used as IB or 0 if it is already in + * residency list. + */ + uint32_t handle; + uint32_t flags; /* IB Flags */ + uint64_t va_start; /* Virtual address to begin IB execution */ + uint32_t ib_bytes; /* Size of submission */ + uint32_t ip_type; /* HW IP to submit to */ + uint32_t ip_instance; /* HW IP index of the same type to submit to */ + uint32_t ring; /* Ring index to submit to */ +}; + +struct drm_amdgpu_cs_chunk_fence { + uint32_t handle; + uint32_t offset; +}; + +struct drm_amdgpu_cs_chunk_data { + union { + struct drm_amdgpu_cs_chunk_ib ib_data; + struct drm_amdgpu_cs_chunk_fence fence_data; + }; +}; + +/** + * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU + * + */ +#define AMDGPU_IDS_FLAGS_FUSION 0x1 + +/* indicate if acceleration can be working */ +#define AMDGPU_INFO_ACCEL_WORKING 0x00 +/* get the crtc_id from the mode object id? */ +#define AMDGPU_INFO_CRTC_FROM_ID 0x01 +/* query hw IP info */ +#define AMDGPU_INFO_HW_IP_INFO 0x02 +/* query hw IP instance count for the specified type */ +#define AMDGPU_INFO_HW_IP_COUNT 0x03 +/* timestamp for GL_ARB_timer_query */ +#define AMDGPU_INFO_TIMESTAMP 0x05 +/* Query the firmware version */ +#define AMDGPU_INFO_FW_VERSION 0x0e + /* Subquery id: Query VCE firmware version */ + #define AMDGPU_INFO_FW_VCE 0x1 + /* Subquery id: Query UVD firmware version */ + #define AMDGPU_INFO_FW_UVD 0x2 + /* Subquery id: Query GMC firmware version */ + #define AMDGPU_INFO_FW_GMC 0x03 + /* Subquery id: Query GFX ME firmware version */ + #define AMDGPU_INFO_FW_GFX_ME 0x04 + /* Subquery id: Query GFX PFP firmware version */ + #define AMDGPU_INFO_FW_GFX_PFP 0x05 + /* Subquery id: Query GFX CE firmware version */ + #define AMDGPU_INFO_FW_GFX_CE 0x06 + /* Subquery id: Query GFX RLC firmware version */ + #define AMDGPU_INFO_FW_GFX_RLC 0x07 + /* Subquery id: Query GFX MEC firmware version */ + #define AMDGPU_INFO_FW_GFX_MEC 0x08 + /* Subquery id: Query SMC firmware version */ + #define AMDGPU_INFO_FW_SMC 0x0a + /* Subquery id: Query SDMA firmware version */ + #define AMDGPU_INFO_FW_SDMA 0x0b +/* number of bytes moved for TTM migration */ +#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f +/* the used VRAM size */ +#define AMDGPU_INFO_VRAM_USAGE 0x10 +/* the used GTT size */ +#define AMDGPU_INFO_GTT_USAGE 0x11 +/* Information about GDS, etc. resource configuration */ +#define AMDGPU_INFO_GDS_CONFIG 0x13 +/* Query information about VRAM and GTT domains */ +#define AMDGPU_INFO_VRAM_GTT 0x14 +/* Query information about register in MMR address space*/ +#define AMDGPU_INFO_READ_MMR_REG 0x15 +/* Query information about device: rev id, family, etc. */ +#define AMDGPU_INFO_DEV_INFO 0x16 +/* visible vram usage */ +#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 + +#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 +#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff +#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 +#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff + +/* Input structure for the INFO ioctl */ +struct drm_amdgpu_info { + /* Where the return value will be stored */ + uint64_t return_pointer; + /* The size of the return value. Just like "size" in "snprintf", + * it limits how many bytes the kernel can write. */ + uint32_t return_size; + /* The query request id. */ + uint32_t query; + + union { + struct { + uint32_t id; + uint32_t _pad; + } mode_crtc; + + struct { + /** AMDGPU_HW_IP_* */ + uint32_t type; + /** + * Index of the IP if there are more IPs of the same type. + * Ignored by AMDGPU_INFO_HW_IP_COUNT. + */ + uint32_t ip_instance; + } query_hw_ip; + + struct { + uint32_t dword_offset; + uint32_t count; /* number of registers to read */ + uint32_t instance; + uint32_t flags; + } read_mmr_reg; + + struct { + /** AMDGPU_INFO_FW_* */ + uint32_t fw_type; + /** Index of the IP if there are more IPs of the same type. */ + uint32_t ip_instance; + /** + * Index of the engine. Whether this is used depends + * on the firmware type. (e.g. MEC, SDMA) + */ + uint32_t index; + uint32_t _pad; + } query_fw; + }; +}; + +struct drm_amdgpu_info_gds { + /** GDS GFX partition size */ + uint32_t gds_gfx_partition_size; + /** GDS compute partition size */ + uint32_t compute_partition_size; + /** total GDS memory size */ + uint32_t gds_total_size; + /** GWS size per GFX partition */ + uint32_t gws_per_gfx_partition; + /** GSW size per compute partition */ + uint32_t gws_per_compute_partition; + /** OA size per GFX partition */ + uint32_t oa_per_gfx_partition; + /** OA size per compute partition */ + uint32_t oa_per_compute_partition; + uint32_t _pad; +}; + +struct drm_amdgpu_info_vram_gtt { + uint64_t vram_size; + uint64_t vram_cpu_accessible_size; + uint64_t gtt_size; +}; + +struct drm_amdgpu_info_firmware { + uint32_t ver; + uint32_t feature; +}; + +struct drm_amdgpu_info_device { + /** PCI Device ID */ + uint32_t device_id; + /** Internal chip revision: A0, A1, etc.) */ + uint32_t chip_rev; + uint32_t external_rev; + /** Revision id in PCI Config space */ + uint32_t pci_rev; + uint32_t family; + uint32_t num_shader_engines; + uint32_t num_shader_arrays_per_engine; + uint32_t gpu_counter_freq; /* in KHz */ + uint64_t max_engine_clock; /* in KHz */ + /* cu information */ + uint32_t cu_active_number; + uint32_t cu_ao_mask; + uint32_t cu_bitmap[4][4]; + /** Render backend pipe mask. One render backend is CB+DB. */ + uint32_t enabled_rb_pipes_mask; + uint32_t num_rb_pipes; + uint32_t num_hw_gfx_contexts; + uint32_t _pad; + uint64_t ids_flags; + /** Starting virtual address for UMDs. */ + uint64_t virtual_address_offset; + /** Required alignment of virtual addresses. */ + uint32_t virtual_address_alignment; + /** Page table entry - fragment size */ + uint32_t pte_fragment_size; + uint32_t gart_page_size; +}; + +struct drm_amdgpu_info_hw_ip { + /** Version of h/w IP */ + uint32_t hw_ip_version_major; + uint32_t hw_ip_version_minor; + /** Capabilities */ + uint64_t capabilities_flags; + /** Bitmask of available rings. Bit 0 means ring 0, etc. */ + uint32_t available_rings; + uint32_t _pad; +}; + +/* + * Supported GPU families + */ +#define AMDGPU_FAMILY_UNKNOWN 0 +#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ +#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ +#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ +#define AMDGPU_FAMILY_CZ 135 /* Carrizo */ + +#endif -- cgit v1.2.3-71-gd317 From 886712881da15b7f455c43a4ce4121f20035c0fa Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Wed, 6 May 2015 18:44:29 +0800 Subject: drm/amdgpu: remove AMDGPU_GEM_CREATE_CPU_GTT_UC This flag isn't used by user mode drivers, remove it to avoid confusion. And rename GTT_WC to GTT_USWC to make it clear. Signed-off-by: Jammy Zhou Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 ++-------- include/uapi/drm/amdgpu_drm.h | 7 ++----- 2 files changed, 4 insertions(+), 13 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f5e17f95e812..992b7f5843bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -132,10 +132,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) } if (domain & AMDGPU_GEM_DOMAIN_GTT) { - if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; - } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) { + if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; @@ -146,10 +143,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) } if (domain & AMDGPU_GEM_DOMAIN_CPU) { - if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; - } else if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_WC) { + if (rbo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { rbo->placements[c].fpfn = 0; rbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_UNCACHED; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 9e771fb858b4..77bc5740fd7c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -73,15 +73,12 @@ #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) /* Flag that CPU access will not work, this VRAM domain is invisible */ #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) -/* Flag that un-cached attributes should be used for GTT */ -#define AMDGPU_GEM_CREATE_CPU_GTT_UC (1 << 2) /* Flag that USWC attributes should be used for GTT */ -#define AMDGPU_GEM_CREATE_CPU_GTT_WC (1 << 3) +#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) /* Flag mask for GTT domain_flags */ #define AMDGPU_GEM_CREATE_CPU_GTT_MASK \ - (AMDGPU_GEM_CREATE_CPU_GTT_WC | \ - AMDGPU_GEM_CREATE_CPU_GTT_UC | \ + (AMDGPU_GEM_CREATE_CPU_GTT_USWC | \ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \ AMDGPU_GEM_CREATE_NO_CPU_ACCESS) -- cgit v1.2.3-71-gd317 From 66b3cf2ab38f47db2d07fe24a00972fbf822cd74 Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Fri, 8 May 2015 17:29:40 +0800 Subject: drm/amdgpu: add ctx_id to the WAIT_CS IOCTL (v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is required to support fence per context. v2: add amdgpu_ctx_get/put v3: improve get/put v4: squash hlock fix Signed-off-by: Jammy Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 30 ++++++++++++++++++++++++++++++ include/uapi/drm/amdgpu_drm.h | 2 +- 4 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cef3a43ab0aa..bf0c607de195 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1902,6 +1902,8 @@ int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id,struct amdgpu_ctx_state *state); void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); +struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); +int amdgpu_ctx_put(struct amdgpu_ctx *ctx); extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ffbe9aa9f232..86b93245bf9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -768,8 +768,13 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, uint64_t seq[AMDGPU_MAX_RINGS] = {0}; struct amdgpu_ring *ring = NULL; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); + struct amdgpu_ctx *ctx; long r; + ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); + if (ctx == NULL) + return -EINVAL; + r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, wait->in.ring, &ring); if (r) @@ -778,6 +783,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, seq[ring->idx] = wait->in.handle; r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout); + amdgpu_ctx_put(ctx); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0dc3a4ebd5d3..bcd332e085f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -151,3 +151,33 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, return r; } + +struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) +{ + struct amdgpu_ctx *ctx; + struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + + mutex_lock(&mgr->lock); + ctx = idr_find(&mgr->ctx_handles, id); + if (ctx) + kref_get(&ctx->refcount); + mutex_unlock(&mgr->lock); + return ctx; +} + +int amdgpu_ctx_put(struct amdgpu_ctx *ctx) +{ + struct amdgpu_fpriv *fpriv; + struct amdgpu_ctx_mgr *mgr; + + if (ctx == NULL) + return -EINVAL; + + fpriv = ctx->fpriv; + mgr = &fpriv->ctx_mgr; + mutex_lock(&mgr->lock); + kref_put(&ctx->refcount, amdgpu_ctx_do_release); + mutex_unlock(&mgr->lock); + + return 0; +} diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 77bc5740fd7c..ca0ea1efa3f4 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -260,7 +260,7 @@ struct drm_amdgpu_wait_cs_in { uint32_t ip_type; uint32_t ip_instance; uint32_t ring; - uint32_t _pad; + uint32_t ctx_id; }; struct drm_amdgpu_wait_cs_out { -- cgit v1.2.3-71-gd317 From aa2bdb2476206c7de4473850039daa705230c27b Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Mon, 11 May 2015 23:49:34 +0800 Subject: drm/amdgpu: add CE preamble flag v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CE preamble IB can be dropped for the same context v2: use the flags directly v3: remove 'CE' for potential preamble usage by other rings Signed-off-by: Jammy Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 +++++++ include/uapi/drm/amdgpu_drm.h | 3 +++ 3 files changed, 17 insertions(+) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7428c4305418..cec46ebae5f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2518,6 +2518,13 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; + + /* drop the CE preamble IB for the same context */ + if ((ring->type == AMDGPU_RING_TYPE_GFX) && + (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && + !ring->need_ctx_switch) + return; + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) control |= INDIRECT_BUFFER_VALID; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 48de9204ff5e..fc8c46209db4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3647,6 +3647,13 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; + + /* drop the CE preamble IB for the same context */ + if ((ring->type == AMDGPU_RING_TYPE_GFX) && + (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && + !ring->need_ctx_switch) + return; + if (ring->type == AMDGPU_RING_TYPE_COMPUTE) control |= INDIRECT_BUFFER_VALID; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index ca0ea1efa3f4..fb428fe54186 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -369,6 +369,9 @@ union drm_amdgpu_cs { /* GDS is used by this IB */ #define AMDGPU_IB_FLAG_GDS (1<<1) +/* CE Preamble */ +#define AMDGPU_IB_FLAG_PREAMBLE (1<<2) + struct drm_amdgpu_cs_chunk_ib { /** * Handle of GEM object to be used as IB or 0 if it is already in -- cgit v1.2.3-71-gd317 From 02b70c8c9f0351f5ddf70716b9049f3fe50d62e7 Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Tue, 12 May 2015 22:46:45 +0800 Subject: drm/amdgpu: expose the max virtual address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jammy Zhou Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 + include/uapi/drm/amdgpu_drm.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 02c450d0be1a..35185d6b7d46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -428,6 +428,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (adev->flags & AMDGPU_IS_APU) dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; + dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL); dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * AMDGPU_GPU_PAGE_SIZE; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index fb428fe54186..65da7cd16c0f 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -560,6 +560,8 @@ struct drm_amdgpu_info_device { uint64_t ids_flags; /** Starting virtual address for UMDs. */ uint64_t virtual_address_offset; + /** The maximum virtual address */ + uint64_t virtual_address_max; /** Required alignment of virtual addresses. */ uint32_t virtual_address_alignment; /** Page table entry - fragment size */ -- cgit v1.2.3-71-gd317 From d94aed5a6c947b1fda346aff1fa316dacf4a1a5a Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Tue, 5 May 2015 21:13:49 +0200 Subject: drm/amdgpu: add and implement the GPU reset status query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marek Olšák Reviewed-by: Christian König Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 36 +++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/uapi/drm/amdgpu_drm.h | 11 ++++++++- 4 files changed, 37 insertions(+), 17 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 66b5bd058799..ebff89eb2f4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1040,7 +1040,7 @@ struct amdgpu_vm_manager { struct amdgpu_ctx_state { uint64_t flags; - uint64_t hangs; + uint32_t hangs; }; struct amdgpu_ctx { @@ -1049,6 +1049,7 @@ struct amdgpu_ctx { struct amdgpu_fpriv *fpriv; struct amdgpu_ctx_state state; uint32_t id; + unsigned reset_counter; }; struct amdgpu_ctx_mgr { @@ -1897,8 +1898,6 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, uint32_t *id,uint32_t flags); int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id); -int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, - uint32_t id,struct amdgpu_ctx_state *state); void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); @@ -2006,6 +2005,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic_t gpu_reset_counter; /* display */ struct amdgpu_mode_info mode_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index bcd332e085f6..6c66ac8a1891 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -81,21 +81,36 @@ int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint return -EINVAL; } -int amdgpu_ctx_query(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, struct amdgpu_ctx_state *state) +static int amdgpu_ctx_query(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, uint32_t id, + union drm_amdgpu_ctx_out *out) { struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; + unsigned reset_counter; mutex_lock(&mgr->lock); ctx = idr_find(&mgr->ctx_handles, id); - if (ctx) { - /* state should alter with CS activity */ - *state = ctx->state; + if (!ctx) { mutex_unlock(&mgr->lock); - return 0; + return -EINVAL; } + + /* TODO: these two are always zero */ + out->state.flags = ctx->state.flags; + out->state.hangs = ctx->state.hangs; + + /* determine if a GPU reset has occured since the last call */ + reset_counter = atomic_read(&adev->gpu_reset_counter); + /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ + if (ctx->reset_counter == reset_counter) + out->state.reset_status = AMDGPU_CTX_NO_RESET; + else + out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; + ctx->reset_counter = reset_counter; + mutex_unlock(&mgr->lock); - return -EINVAL; + return 0; } void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) @@ -115,12 +130,11 @@ void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) } int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, - struct drm_file *filp) + struct drm_file *filp) { int r; uint32_t id; uint32_t flags; - struct amdgpu_ctx_state state; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -139,11 +153,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, r = amdgpu_ctx_free(adev, fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: - r = amdgpu_ctx_query(adev, fpriv, id, &state); - if (r == 0) { - args->out.state.flags = state.flags; - args->out.state.hangs = state.hangs; - } + r = amdgpu_ctx_query(adev, fpriv, id, &args->out); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 61cf5ad78857..3448d9fe88cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1781,6 +1781,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) } adev->needs_reset = false; + atomic_inc(&adev->gpu_reset_counter); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 65da7cd16c0f..46580e950036 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -149,6 +149,12 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_STATE_RUNNING 1 +/* GPU reset status */ +#define AMDGPU_CTX_NO_RESET 0 +#define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ +#define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ +#define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ + struct drm_amdgpu_ctx_in { uint32_t op; uint32_t flags; @@ -164,7 +170,10 @@ union drm_amdgpu_ctx_out { struct { uint64_t flags; - uint64_t hangs; + /** Number of resets caused by this context so far. */ + uint32_t hangs; + /** Reset status since the last call of the ioctl. */ + uint32_t reset_status; } state; }; -- cgit v1.2.3-71-gd317 From fbd76d59efe061c89d4ba14eef3a2cac1e3056c2 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Thu, 14 May 2015 23:48:26 +0200 Subject: drm/amdgpu: rework tiling flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marek Olšák Reviewed-by: Alex Deucher Acked-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 43 +------------- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 92 ++++------------------------- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 93 ++++-------------------------- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 91 ++++------------------------- include/uapi/drm/amdgpu_drm.h | 40 +++++++------ 6 files changed, 58 insertions(+), 304 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index ef611986b2b6..73b7aad5a872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -32,6 +32,7 @@ #include #include #include "amdgpu.h" +#include "cikd.h" #include @@ -135,7 +136,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, rbo = gem_to_amdgpu_bo(gobj); if (fb_tiled) - tiling_flags = AMDGPU_TILING_MACRO; + tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); ret = amdgpu_bo_reserve(rbo, false); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a721f5044557..b545f614628c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -459,49 +459,8 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) { - unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; - - bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; - bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; - mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; - tilesplit = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; - stilesplit = (tiling_flags >> AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK; - switch (bankw) { - case 0: - case 1: - case 2: - case 4: - case 8: - break; - default: + if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) return -EINVAL; - } - switch (bankh) { - case 0: - case 1: - case 2: - case 4: - case 8: - break; - default: - return -EINVAL; - } - switch (mtaspect) { - case 0: - case 1: - case 2: - case 4: - case 8: - break; - default: - return -EINVAL; - } - if (tilesplit > 6) { - return -EINVAL; - } - if (stilesplit > 6) { - return -EINVAL; - } bo->tiling_flags = tiling_flags; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index d412291ed70e..37b96236fe2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2008,61 +2008,6 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); } -static void dce_v10_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, - unsigned *bankh, unsigned *mtaspect, - unsigned *tile_split) -{ - *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; - *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; - *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; - *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; - switch (*bankw) { - default: - case 1: - *bankw = ADDR_SURF_BANK_WIDTH_1; - break; - case 2: - *bankw = ADDR_SURF_BANK_WIDTH_2; - break; - case 4: - *bankw = ADDR_SURF_BANK_WIDTH_4; - break; - case 8: - *bankw = ADDR_SURF_BANK_WIDTH_8; - break; - } - switch (*bankh) { - default: - case 1: - *bankh = ADDR_SURF_BANK_HEIGHT_1; - break; - case 2: - *bankh = ADDR_SURF_BANK_HEIGHT_2; - break; - case 4: - *bankh = ADDR_SURF_BANK_HEIGHT_4; - break; - case 8: - *bankh = ADDR_SURF_BANK_HEIGHT_8; - break; - } - switch (*mtaspect) { - default: - case 1: - *mtaspect = ADDR_SURF_MACRO_ASPECT_1; - break; - case 2: - *mtaspect = ADDR_SURF_MACRO_ASPECT_2; - break; - case 4: - *mtaspect = ADDR_SURF_MACRO_ASPECT_4; - break; - case 8: - *mtaspect = ADDR_SURF_MACRO_ASPECT_8; - break; - } -} - static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) @@ -2076,10 +2021,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_bo *rbo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; - unsigned bankw, bankh, mtaspect, tile_split; u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); - /* XXX change to VI */ - u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 pipe_config; u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; @@ -2121,6 +2064,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); amdgpu_bo_unreserve(rbo); + pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + switch (target_fb->pixel_format) { case DRM_FORMAT_C8: fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); @@ -2198,27 +2143,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & AMDGPU_TILING_MACRO) { - unsigned tileb, index, num_banks, tile_split_bytes; - - dce_v10_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); - /* Set NUM_BANKS. */ - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } - - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; - } + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, ARRAY_2D_TILED_THIN1); @@ -2230,14 +2163,11 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, mtaspect); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, ADDR_SURF_MICRO_TILING_DISPLAY); - } else if (tiling_flags & AMDGPU_TILING_MICRO) { + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, ARRAY_1D_TILED_THIN1); } - /* Read the pipe config from the 2D TILED SCANOUT mode. - * It should be the same for the other modes too, but not all - * modes set the pipe config field. */ fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, pipe_config); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 55fef15a4fcf..04a5d4cd75b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2006,61 +2006,6 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); } -static void dce_v11_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, - unsigned *bankh, unsigned *mtaspect, - unsigned *tile_split) -{ - *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; - *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; - *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; - *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; - switch (*bankw) { - default: - case 1: - *bankw = ADDR_SURF_BANK_WIDTH_1; - break; - case 2: - *bankw = ADDR_SURF_BANK_WIDTH_2; - break; - case 4: - *bankw = ADDR_SURF_BANK_WIDTH_4; - break; - case 8: - *bankw = ADDR_SURF_BANK_WIDTH_8; - break; - } - switch (*bankh) { - default: - case 1: - *bankh = ADDR_SURF_BANK_HEIGHT_1; - break; - case 2: - *bankh = ADDR_SURF_BANK_HEIGHT_2; - break; - case 4: - *bankh = ADDR_SURF_BANK_HEIGHT_4; - break; - case 8: - *bankh = ADDR_SURF_BANK_HEIGHT_8; - break; - } - switch (*mtaspect) { - default: - case 1: - *mtaspect = ADDR_SURF_MACRO_ASPECT_1; - break; - case 2: - *mtaspect = ADDR_SURF_MACRO_ASPECT_2; - break; - case 4: - *mtaspect = ADDR_SURF_MACRO_ASPECT_4; - break; - case 8: - *mtaspect = ADDR_SURF_MACRO_ASPECT_8; - break; - } -} - static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) @@ -2074,10 +2019,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_bo *rbo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; - unsigned bankw, bankh, mtaspect, tile_split; u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); - /* XXX change to VI */ - u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 pipe_config; u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; @@ -2119,6 +2062,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); amdgpu_bo_unreserve(rbo); + pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + switch (target_fb->pixel_format) { case DRM_FORMAT_C8: fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); @@ -2196,28 +2141,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & AMDGPU_TILING_MACRO) { - unsigned tileb, index, num_banks, tile_split_bytes; - - dce_v11_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); - /* Set NUM_BANKS. */ - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } - - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; - } + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - /* XXX fix me for VI */ - num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, ARRAY_2D_TILED_THIN1); @@ -2229,14 +2161,11 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, mtaspect); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, ADDR_SURF_MICRO_TILING_DISPLAY); - } else if (tiling_flags & AMDGPU_TILING_MICRO) { + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, ARRAY_1D_TILED_THIN1); } - /* Read the pipe config from the 2D TILED SCANOUT mode. - * It should be the same for the other modes too, but not all - * modes set the pipe config field. */ fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, pipe_config); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c1bc6935c88e..9f2ff8d374f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1976,61 +1976,6 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); } -static void dce_v8_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, - unsigned *bankh, unsigned *mtaspect, - unsigned *tile_split) -{ - *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; - *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; - *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; - *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; - switch (*bankw) { - default: - case 1: - *bankw = ADDR_SURF_BANK_WIDTH_1; - break; - case 2: - *bankw = ADDR_SURF_BANK_WIDTH_2; - break; - case 4: - *bankw = ADDR_SURF_BANK_WIDTH_4; - break; - case 8: - *bankw = ADDR_SURF_BANK_WIDTH_8; - break; - } - switch (*bankh) { - default: - case 1: - *bankh = ADDR_SURF_BANK_HEIGHT_1; - break; - case 2: - *bankh = ADDR_SURF_BANK_HEIGHT_2; - break; - case 4: - *bankh = ADDR_SURF_BANK_HEIGHT_4; - break; - case 8: - *bankh = ADDR_SURF_BANK_HEIGHT_8; - break; - } - switch (*mtaspect) { - default: - case 1: - *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_1; - break; - case 2: - *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_2; - break; - case 4: - *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_4; - break; - case 8: - *mtaspect = ADDR_SURF_MACRO_TILE_ASPECT_8; - break; - } -} - static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, int atomic) @@ -2044,9 +1989,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_bo *rbo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; - unsigned bankw, bankh, mtaspect, tile_split; u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); - u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; + u32 pipe_config; u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; @@ -2088,6 +2032,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); amdgpu_bo_unreserve(rbo); + pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + switch (target_fb->pixel_format) { case DRM_FORMAT_C8: fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | @@ -2158,27 +2104,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & AMDGPU_TILING_MACRO) { - unsigned tileb, index, num_banks, tile_split_bytes; - - dce_v8_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); - /* Set NUM_BANKS. */ - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } - - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; - } + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); - num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); @@ -2186,13 +2120,10 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT); - } else if (tiling_flags & AMDGPU_TILING_MICRO) { + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); } - /* Read the pipe config from the 2D TILED SCANOUT mode. - * It should be the same for the other modes too, but not all - * modes set the pipe config field. */ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); dce_v8_0_vga_enable(crtc, false); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 46580e950036..d9b9b6f8de2b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -199,24 +199,28 @@ struct drm_amdgpu_gem_userptr { uint32_t handle; }; -#define AMDGPU_TILING_MACRO 0x1 -#define AMDGPU_TILING_MICRO 0x2 -#define AMDGPU_TILING_SWAP_16BIT 0x4 -#define AMDGPU_TILING_R600_NO_SCANOUT AMDGPU_TILING_SWAP_16BIT -#define AMDGPU_TILING_SWAP_32BIT 0x8 -/* this object requires a surface when mapped - i.e. front buffer */ -#define AMDGPU_TILING_SURFACE 0x10 -#define AMDGPU_TILING_MICRO_SQUARE 0x20 -#define AMDGPU_TILING_EG_BANKW_SHIFT 8 -#define AMDGPU_TILING_EG_BANKW_MASK 0xf -#define AMDGPU_TILING_EG_BANKH_SHIFT 12 -#define AMDGPU_TILING_EG_BANKH_MASK 0xf -#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 -#define AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf -#define AMDGPU_TILING_EG_TILE_SPLIT_SHIFT 24 -#define AMDGPU_TILING_EG_TILE_SPLIT_MASK 0xf -#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 -#define AMDGPU_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf +/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ +#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 +#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf +#define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 +#define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f +#define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 +#define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 +#define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 +#define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 +#define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 +#define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 +#define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 +#define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 +#define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 +#define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 +#define AMDGPU_TILING_NUM_BANKS_SHIFT 21 +#define AMDGPU_TILING_NUM_BANKS_MASK 0x3 + +#define AMDGPU_TILING_SET(field, value) \ + (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) +#define AMDGPU_TILING_GET(value, field) \ + (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 -- cgit v1.2.3-71-gd317 From dcc357e63727b63995dd869f015a748c9235eb42 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 19 May 2015 16:08:02 +0200 Subject: drm/amdgpu: drop allocation flag masks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed any more. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Monk Liu --- include/uapi/drm/amdgpu_drm.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index d9b9b6f8de2b..64a07ac3b4b9 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -67,8 +67,6 @@ #define AMDGPU_GEM_DOMAIN_GWS 0x10 #define AMDGPU_GEM_DOMAIN_OA 0x20 -#define AMDGPU_GEM_DOMAIN_MASK 0x3F - /* Flag that CPU access will be required for the case of VRAM domain */ #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) /* Flag that CPU access will not work, this VRAM domain is invisible */ @@ -76,12 +74,6 @@ /* Flag that USWC attributes should be used for GTT */ #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) -/* Flag mask for GTT domain_flags */ -#define AMDGPU_GEM_CREATE_CPU_GTT_MASK \ - (AMDGPU_GEM_CREATE_CPU_GTT_USWC | \ - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \ - AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - struct drm_amdgpu_gem_create_in { /** the requested memory size */ uint64_t bo_size; -- cgit v1.2.3-71-gd317 From d8f65a2376268dfb2963152754d41208dc43d906 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Wed, 27 May 2015 14:30:38 +0200 Subject: drm/amdgpu: rename GEM_OP_SET_INITIAL_DOMAIN -> GEM_OP_SET_PLACEMENT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marek Olšák Reviewed-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- include/uapi/drm/amdgpu_drm.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index c90b74da0027..ad5b9c676fd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -638,7 +638,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, r = -EFAULT; break; } - case AMDGPU_GEM_OP_SET_INITIAL_DOMAIN: + case AMDGPU_GEM_OP_SET_PLACEMENT: if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { r = -EPERM; break; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 64a07ac3b4b9..cd54891b1d5c 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -284,8 +284,8 @@ struct drm_amdgpu_gem_op { uint64_t value; /* input or return value */ }; -#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 -#define AMDGPU_GEM_OP_SET_INITIAL_DOMAIN 1 +#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 +#define AMDGPU_GEM_OP_SET_PLACEMENT 1 #define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_UNMAP 2 -- cgit v1.2.3-71-gd317 From 32bf7106e072b59cade754062ed86023309f50d9 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 3 Jun 2015 17:36:54 +0800 Subject: drm/amdgpu add max_memory_clock for interface query (v2) Add a query for the max memory clock. v2: handle the dpm enabled case properly Signed-off-by: Ken Wang Reviewd-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 ++++++-- include/uapi/drm/amdgpu_drm.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b6dd3751d9a5..3c182b6c5a27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -414,11 +414,15 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; /* return all clocks in KHz */ dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; - if (adev->pm.dpm_enabled) + if (adev->pm.dpm_enabled) { dev_info.max_engine_clock = adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; - else + dev_info.max_memory_clock = + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10; + } else { dev_info.max_engine_clock = adev->pm.default_sclk * 10; + dev_info.max_memory_clock = adev->pm.default_mclk * 10; + } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index cd54891b1d5c..420c762f2ed7 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -553,6 +553,7 @@ struct drm_amdgpu_info_device { uint32_t num_shader_arrays_per_engine; uint32_t gpu_counter_freq; /* in KHz */ uint64_t max_engine_clock; /* in KHz */ + uint64_t max_memory_clock; /* in KHz */ /* cu information */ uint32_t cu_active_number; uint32_t cu_ao_mask; -- cgit v1.2.3-71-gd317 From a101a8995ab8072125d0bb4d95425c9fb37ff809 Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 3 Jun 2015 17:47:54 +0800 Subject: drm/amdgpu add ce_ram_size for interface query Add a query for the CE ram size. User mode drivers will want to use this to determine how much size of the cache on the CE. Signed-off-by: Ken Wang Reviewd-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 2 ++ include/uapi/drm/amdgpu_drm.h | 2 ++ 5 files changed, 9 insertions(+) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 4300e3d4b1cd..6c99b7560a27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1136,6 +1136,8 @@ struct amdgpu_gfx { uint32_t gfx_current_status; /* sync signal for const engine */ unsigned ce_sync_offs; + /* ce ram size*/ + unsigned ce_ram_size; }; int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 3c182b6c5a27..9ede2446dcd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -441,6 +441,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file amdgpu_asic_get_cu_info(adev, &cu_info); dev_info.cu_active_number = cu_info.number; dev_info.cu_ao_mask = cu_info.ao_cu_mask; + dev_info.ce_ram_size = adev->gfx.ce_ram_size; memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); return copy_to_user(out, &dev_info, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index ed2f35de1d4f..faa39b38f0f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4820,6 +4820,8 @@ static int gfx_v7_0_hw_init(void *handle) if (r) return r; + adev->gfx.ce_ram_size = 0x8000; + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ffdba1965029..1895de433446 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -895,6 +895,8 @@ static int gfx_v8_0_sw_init(void *handle) if (r) return r; + adev->gfx.ce_ram_size = 0x8000; + return 0; } diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 420c762f2ed7..e24cc2e318df 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -573,6 +573,8 @@ struct drm_amdgpu_info_device { /** Page table entry - fragment size */ uint32_t pte_fragment_size; uint32_t gart_page_size; + /** constant engine ram size*/ + uint32_t ce_ram_size; }; struct drm_amdgpu_info_hw_ip { -- cgit v1.2.3-71-gd317 From 71062f435eaf0ff7867a1188e5c7887b0a5871ff Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Thu, 4 Jun 2015 21:26:57 +0800 Subject: drm/amdgpu: add ib_size/start_alignment interface query Query the IB alignment requirements from the kernel rather than hardcoding them in the user mode drivers. Signed-off-by: Ken Wang Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 14 ++++++++++++++ include/uapi/drm/amdgpu_drm.h | 4 ++++ 2 files changed, 18 insertions(+) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 12b756e8c6b8..f1e5d87ef1f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -188,6 +188,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_hw_ip ip = {}; enum amd_ip_block_type type; uint32_t ring_mask = 0; + uint32_t ib_start_alignment = 0; + uint32_t ib_size_alignment = 0; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) return -EINVAL; @@ -197,25 +199,35 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_gfx_rings; i++) ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i); + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; + ib_size_alignment = 8; break; case AMDGPU_HW_IP_COMPUTE: type = AMD_IP_BLOCK_TYPE_GFX; for (i = 0; i < adev->gfx.num_compute_rings; i++) ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i); + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; + ib_size_alignment = 8; break; case AMDGPU_HW_IP_DMA: type = AMD_IP_BLOCK_TYPE_SDMA; ring_mask = adev->sdma[0].ring.ready ? 1 : 0; ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; + ib_size_alignment = 1; break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; + ib_size_alignment = 8; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; + ib_size_alignment = 8; break; default: return -EINVAL; @@ -228,6 +240,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ip.hw_ip_version_minor = adev->ip_blocks[i].minor; ip.capabilities_flags = 0; ip.available_rings = ring_mask; + ip.ib_start_alignment = ib_start_alignment; + ip.ib_size_alignment = ib_size_alignment; break; } } diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index e24cc2e318df..3af5bd0e23a8 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -583,6 +583,10 @@ struct drm_amdgpu_info_hw_ip { uint32_t hw_ip_version_minor; /** Capabilities */ uint64_t capabilities_flags; + /** command buffer address start alignment*/ + uint32_t ib_start_alignment; + /** command buffer size alignment*/ + uint32_t ib_size_alignment; /** Bitmask of available rings. Bit 0 means ring 0, etc. */ uint32_t available_rings; uint32_t _pad; -- cgit v1.2.3-71-gd317 From 81c59f54125f9ff84546b6ba26c321662562703d Mon Sep 17 00:00:00 2001 From: Ken Wang Date: Wed, 3 Jun 2015 21:02:01 +0800 Subject: drm/amdgpu: add vram_type and vram_bit_width for interface query (v2) Track the type of vram on the board and provide a query for it. User mode drivers and tools want this information for determining bandwidth information and form informational purposes. v2: fix build when CI support is not enabled Signed-off-by: Ken Wang Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 ++ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/cikd.h | 11 ++++++++--- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 32 +++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 33 ++++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/vid.h | 11 ++++++++--- include/uapi/drm/amdgpu_drm.h | 13 +++++++++++++ 8 files changed, 89 insertions(+), 27 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 4bdc3265b410..149b76913091 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -767,7 +767,7 @@ struct amdgpu_mc { const struct firmware *fw; /* MC firmware */ uint32_t fw_version; struct amdgpu_irq_src vm_fault; - bool is_gddr5; + uint32_t vram_type; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index f1e5d87ef1f7..5533434c7a8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -457,6 +457,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.cu_ao_mask = cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); + dev_info.vram_type = adev->mc.vram_type; + dev_info.vram_bit_width = adev->mc.vram_width; return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index b1a4fbc22e69..82e8d0730517 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -891,7 +891,7 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) { u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); - u32 switch_limit = adev->mc.is_gddr5 ? 450 : 300; + u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; if (vblank_time < switch_limit) return true; @@ -2920,7 +2920,7 @@ static int ci_calculate_mclk_params(struct amdgpu_device *adev, mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); - if (adev->mc.is_gddr5) { + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | @@ -3043,7 +3043,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, (memory_clock <= pi->mclk_strobe_mode_threshold)) memory_level->StrobeEnable = 1; - if (adev->mc.is_gddr5) { + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); if (pi->mclk_edc_enable_threshold && @@ -3681,7 +3681,7 @@ static int ci_init_smc_table(struct amdgpu_device *adev) if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - if (adev->mc.is_gddr5) + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; if (ulv->supported) { @@ -4498,14 +4498,14 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev, for (k = 0; k < table->num_entries; k++) { table->mc_reg_table_entry[k].mc_data[j] = (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - if (!adev->mc.is_gddr5) + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; - if (!adev->mc.is_gddr5) { + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; for (k = 0; k < table->num_entries; k++) { diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 11828e2cdf34..220865a44814 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -24,9 +24,14 @@ #ifndef CIK_H #define CIK_H -#define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c -#define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0__GDDR5_VALUE 5 +#define MC_SEQ_MISC0__MT__MASK 0xf0000000 +#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 +#define MC_SEQ_MISC0__MT__DDR2 0x20000000 +#define MC_SEQ_MISC0__MT__GDDR3 0x30000000 +#define MC_SEQ_MISC0__MT__GDDR4 0x40000000 +#define MC_SEQ_MISC0__MT__GDDR5 0x50000000 +#define MC_SEQ_MISC0__MT__HBM 0x60000000 +#define MC_SEQ_MISC0__MT__DDR3 0xB0000000 #define CP_ME_TABLE_SIZE 96 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 01cd6b207d26..ae37fce36520 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -812,6 +812,28 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, WREG32(mmHDP_MEM_POWER_LS, data); } +static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) +{ + switch (mc_seq_vram_type) { + case MC_SEQ_MISC0__MT__GDDR1: + return AMDGPU_VRAM_TYPE_GDDR1; + case MC_SEQ_MISC0__MT__DDR2: + return AMDGPU_VRAM_TYPE_DDR2; + case MC_SEQ_MISC0__MT__GDDR3: + return AMDGPU_VRAM_TYPE_GDDR3; + case MC_SEQ_MISC0__MT__GDDR4: + return AMDGPU_VRAM_TYPE_GDDR4; + case MC_SEQ_MISC0__MT__GDDR5: + return AMDGPU_VRAM_TYPE_GDDR5; + case MC_SEQ_MISC0__MT__HBM: + return AMDGPU_VRAM_TYPE_HBM; + case MC_SEQ_MISC0__MT__DDR3: + return AMDGPU_VRAM_TYPE_DDR3; + default: + return AMDGPU_VRAM_TYPE_UNKNOWN; + } +} + static int gmc_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -820,15 +842,11 @@ static int gmc_v7_0_early_init(void *handle) gmc_v7_0_set_irq_funcs(adev); if (adev->flags & AMDGPU_IS_APU) { - adev->mc.is_gddr5 = false; + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); - - if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> - MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) - adev->mc.is_gddr5 = true; - else - adev->mc.is_gddr5 = false; + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 675483a612c2..6206fcd39df9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -38,6 +38,7 @@ #include "vid.h" #include "vi.h" + static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); @@ -786,6 +787,28 @@ static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, "write" : "read", block, mc_client, mc_id); } +static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type) +{ + switch (mc_seq_vram_type) { + case MC_SEQ_MISC0__MT__GDDR1: + return AMDGPU_VRAM_TYPE_GDDR1; + case MC_SEQ_MISC0__MT__DDR2: + return AMDGPU_VRAM_TYPE_DDR2; + case MC_SEQ_MISC0__MT__GDDR3: + return AMDGPU_VRAM_TYPE_GDDR3; + case MC_SEQ_MISC0__MT__GDDR4: + return AMDGPU_VRAM_TYPE_GDDR4; + case MC_SEQ_MISC0__MT__GDDR5: + return AMDGPU_VRAM_TYPE_GDDR5; + case MC_SEQ_MISC0__MT__HBM: + return AMDGPU_VRAM_TYPE_HBM; + case MC_SEQ_MISC0__MT__DDR3: + return AMDGPU_VRAM_TYPE_DDR3; + default: + return AMDGPU_VRAM_TYPE_UNKNOWN; + } +} + static int gmc_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -794,15 +817,11 @@ static int gmc_v8_0_early_init(void *handle) gmc_v8_0_set_irq_funcs(adev); if (adev->flags & AMDGPU_IS_APU) { - adev->mc.is_gddr5 = false; + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; } else { u32 tmp = RREG32(mmMC_SEQ_MISC0); - - if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> - MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) - adev->mc.is_gddr5 = true; - else - adev->mc.is_gddr5 = false; + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 385267c31d11..31bb89452e12 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -68,9 +68,14 @@ #define RB_BITMAP_WIDTH_PER_SH 2 -#define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c -#define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000 -#define MC_SEQ_MISC0__GDDR5_VALUE 5 +#define MC_SEQ_MISC0__MT__MASK 0xf0000000 +#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 +#define MC_SEQ_MISC0__MT__DDR2 0x20000000 +#define MC_SEQ_MISC0__MT__GDDR3 0x30000000 +#define MC_SEQ_MISC0__MT__GDDR4 0x40000000 +#define MC_SEQ_MISC0__MT__GDDR5 0x50000000 +#define MC_SEQ_MISC0__MT__HBM 0x60000000 +#define MC_SEQ_MISC0__MT__DDR3 0xB0000000 /* * PM4 diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3af5bd0e23a8..c90f4f0d059e 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -540,6 +540,15 @@ struct drm_amdgpu_info_firmware { uint32_t feature; }; +#define AMDGPU_VRAM_TYPE_UNKNOWN 0 +#define AMDGPU_VRAM_TYPE_GDDR1 1 +#define AMDGPU_VRAM_TYPE_DDR2 2 +#define AMDGPU_VRAM_TYPE_GDDR3 3 +#define AMDGPU_VRAM_TYPE_GDDR4 4 +#define AMDGPU_VRAM_TYPE_GDDR5 5 +#define AMDGPU_VRAM_TYPE_HBM 6 +#define AMDGPU_VRAM_TYPE_DDR3 7 + struct drm_amdgpu_info_device { /** PCI Device ID */ uint32_t device_id; @@ -575,6 +584,10 @@ struct drm_amdgpu_info_device { uint32_t gart_page_size; /** constant engine ram size*/ uint32_t ce_ram_size; + /** video memory type infro*/ + uint32_t vram_type; + /** video memory bit width*/ + uint32_t vram_bit_width; }; struct drm_amdgpu_info_hw_ip { -- cgit v1.2.3-71-gd317 From 3ccec53c294cbec2af44b6b24f70349637c45428 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Tue, 2 Jun 2015 17:44:49 +0200 Subject: drm/amdgpu: only support IBs in the buffer list (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_cs_find_mapping doesn't work without all buffers being validated, so the TTM validation must be done first. v2: only use amdgpu_cs_find_mapping for UVD/VCE VM emulation Signed-off-by: Marek Olšák Reviewed-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 95 +++++++++------------------------- include/uapi/drm/amdgpu_drm.h | 6 +-- 3 files changed, 25 insertions(+), 77 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 149b76913091..c33c1af36fa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1191,7 +1191,6 @@ struct amdgpu_cs_parser { struct amdgpu_cs_chunk *chunks; /* relocations */ struct amdgpu_bo_list_entry *vm_bos; - struct amdgpu_bo_list_entry *ib_bos; struct list_head validated; struct amdgpu_ib *ibs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fefa48a59a7d..f6b224a69b3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -230,11 +230,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto out; } - p->ib_bos = kcalloc(p->num_ibs, sizeof(struct amdgpu_bo_list_entry), - GFP_KERNEL); - if (!p->ib_bos) - r = -ENOMEM; - out: kfree(chunk_array); return r; @@ -373,13 +368,6 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, &p->validated); - for (i = 0; i < p->num_ibs; i++) { - if (!p->ib_bos[i].robj) - continue; - - list_add(&p->ib_bos[i].tv.head, &p->validated); - } - if (need_mmap_lock) down_read(¤t->mm->mmap_sem); @@ -457,15 +445,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - for (i = 0; i < parser->num_ibs; i++) { - struct amdgpu_bo *bo = parser->ib_bos[i].robj; + for (i = 0; i < parser->num_ibs; i++) amdgpu_ib_free(parser->adev, &parser->ibs[i]); - - if (bo) - drm_gem_object_unreference_unlocked(&bo->gem_base); - } kfree(parser->ibs); - kfree(parser->ib_bos); if (parser->uf.bo) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); } @@ -505,21 +487,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, } } - for (i = 0; i < p->num_ibs; i++) { - bo = p->ib_bos[i].robj; - if (!bo) - continue; - - bo_va = p->ib_bos[i].bo_va; - if (!bo_va) - continue; - - r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); - if (r) - return r; - - amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update); - } return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); } @@ -581,11 +548,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, struct amdgpu_cs_chunk *chunk; struct amdgpu_ib *ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib; - struct amdgpu_bo_list_entry *ib_bo; struct amdgpu_ring *ring; - struct drm_gem_object *gobj; - struct amdgpu_bo *aobj; - void *kptr; chunk = &parser->chunks[i]; ib = &parser->ibs[j]; @@ -594,66 +557,49 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) continue; - gobj = drm_gem_object_lookup(adev->ddev, parser->filp, chunk_ib->handle); - if (gobj == NULL) - return -ENOENT; - aobj = gem_to_amdgpu_bo(gobj); - r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, chunk_ib->ip_instance, chunk_ib->ring, &ring); - if (r) { - drm_gem_object_unreference_unlocked(gobj); + if (r) return r; - } if (ring->funcs->parse_cs) { - r = amdgpu_bo_reserve(aobj, false); - if (r) { - drm_gem_object_unreference_unlocked(gobj); - return r; + struct amdgpu_bo *aobj = NULL; + void *kptr; + + amdgpu_cs_find_mapping(parser, chunk_ib->va_start, &aobj); + if (!aobj) { + DRM_ERROR("IB va_start is invalid\n"); + return -EINVAL; } + /* the IB should be reserved at this point */ r = amdgpu_bo_kmap(aobj, &kptr); if (r) { - amdgpu_bo_unreserve(aobj); - drm_gem_object_unreference_unlocked(gobj); return r; } r = amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); - amdgpu_bo_unreserve(aobj); - drm_gem_object_unreference_unlocked(gobj); return r; } memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); amdgpu_bo_kunmap(aobj); - amdgpu_bo_unreserve(aobj); } else { r = amdgpu_ib_get(ring, vm, 0, ib); if (r) { DRM_ERROR("Failed to get ib !\n"); - drm_gem_object_unreference_unlocked(gobj); return r; } ib->gpu_addr = chunk_ib->va_start; } - ib->length_dw = chunk_ib->ib_bytes / 4; + ib->length_dw = chunk_ib->ib_bytes / 4; ib->flags = chunk_ib->flags; ib->ctx = parser->ctx; - - ib_bo = &parser->ib_bos[j]; - ib_bo->robj = aobj; - ib_bo->prefered_domains = aobj->initial_domain; - ib_bo->allowed_domains = aobj->initial_domain; - ib_bo->priority = 0; - ib_bo->tv.bo = &aobj->tbo; - ib_bo->tv.shared = true; j++; } @@ -702,6 +648,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_cs *cs = data; struct amdgpu_cs_parser parser; int r, i; + bool reserved_buffers = false; down_read(&adev->exclusive_lock); if (!adev->accel_working) { @@ -721,15 +668,21 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; } - r = amdgpu_cs_ib_fill(adev, &parser); - if (!r) { - r = amdgpu_cs_parser_relocs(&parser); - if (r && r != -ERESTARTSYS) - DRM_ERROR("Failed to parse relocation %d!\n", r); + r = amdgpu_cs_parser_relocs(&parser); + if (r) { + if (r != -ERESTARTSYS) { + if (r == -ENOMEM) + DRM_ERROR("Not enough memory for command submission!\n"); + else + DRM_ERROR("Failed to process the buffer list %d!\n", r); + } + } else { + reserved_buffers = true; + r = amdgpu_cs_ib_fill(adev, &parser); } if (r) { - amdgpu_cs_parser_fini(&parser, r, false); + amdgpu_cs_parser_fini(&parser, r, reserved_buffers); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index c90f4f0d059e..780a5815fb12 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -378,11 +378,7 @@ union drm_amdgpu_cs { #define AMDGPU_IB_FLAG_PREAMBLE (1<<2) struct drm_amdgpu_cs_chunk_ib { - /** - * Handle of GEM object to be used as IB or 0 if it is already in - * residency list. - */ - uint32_t handle; + uint32_t _pad; uint32_t flags; /* IB Flags */ uint64_t va_start; /* Virtual address to begin IB execution */ uint32_t ib_bytes; /* Size of submission */ -- cgit v1.2.3-71-gd317 From cab6d57c09ece2ceb03602dd44ea2f4ce9333ec9 Mon Sep 17 00:00:00 2001 From: Jammy Zhou Date: Sat, 6 Jun 2015 04:49:22 +0800 Subject: drm/amdgpu: remove unused AMDGPU_IB_FLAG_GDS Signed-off-by: Jammy Zhou Reviewed-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 780a5815fb12..3b911b604fdf 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -371,11 +371,8 @@ union drm_amdgpu_cs { /* This IB should be submitted to CE */ #define AMDGPU_IB_FLAG_CE (1<<0) -/* GDS is used by this IB */ -#define AMDGPU_IB_FLAG_GDS (1<<1) - /* CE Preamble */ -#define AMDGPU_IB_FLAG_PREAMBLE (1<<2) +#define AMDGPU_IB_FLAG_PREAMBLE (1<<1) struct drm_amdgpu_cs_chunk_ib { uint32_t _pad; @@ -580,7 +577,7 @@ struct drm_amdgpu_info_device { uint32_t gart_page_size; /** constant engine ram size*/ uint32_t ce_ram_size; - /** video memory type infro*/ + /** video memory type info*/ uint32_t vram_type; /** video memory bit width*/ uint32_t vram_bit_width; -- cgit v1.2.3-71-gd317 From 34b5f6a6d6d0e482c7ce498f60bce261e533821e Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 8 Jun 2015 15:03:00 +0200 Subject: drm/amdgpu: cleanup VA IOCTL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the unnecessary returned status and make the IOCTL write only. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 64 +++++++++++---------------------- include/uapi/drm/amdgpu_drm.h | 18 ++-------- 2 files changed, 23 insertions(+), 59 deletions(-) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e8409fea4bf1..0ec222295fee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -505,7 +505,7 @@ error_free: int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - union drm_amdgpu_gem_va *args = data; + struct drm_amdgpu_gem_va *args = data; struct drm_gem_object *gobj; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; @@ -514,95 +514,73 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, uint32_t invalid_flags, va_flags = 0; int r = 0; - if (!adev->vm_manager.enabled) { - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; + if (!adev->vm_manager.enabled) return -ENOTTY; - } - if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) { + if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { dev_err(&dev->pdev->dev, "va_address 0x%lX is in reserved area 0x%X\n", - (unsigned long)args->in.va_address, + (unsigned long)args->va_address, AMDGPU_VA_RESERVED_SIZE); - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; return -EINVAL; } invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); - if ((args->in.flags & invalid_flags)) { + if ((args->flags & invalid_flags)) { dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", - args->in.flags, invalid_flags); - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; + args->flags, invalid_flags); return -EINVAL; } - switch (args->in.operation) { + switch (args->operation) { case AMDGPU_VA_OP_MAP: case AMDGPU_VA_OP_UNMAP: break; default: dev_err(&dev->pdev->dev, "unsupported operation %d\n", - args->in.operation); - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; + args->operation); return -EINVAL; } - gobj = drm_gem_object_lookup(dev, filp, args->in.handle); - if (gobj == NULL) { - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) return -ENOENT; - } + rbo = gem_to_amdgpu_bo(gobj); r = amdgpu_bo_reserve(rbo, false); if (r) { - if (r != -ERESTARTSYS) { - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; - } drm_gem_object_unreference_unlocked(gobj); return r; } + bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); if (!bo_va) { - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; - drm_gem_object_unreference_unlocked(gobj); + amdgpu_bo_unreserve(rbo); return -ENOENT; } - switch (args->in.operation) { + switch (args->operation) { case AMDGPU_VA_OP_MAP: - if (args->in.flags & AMDGPU_VM_PAGE_READABLE) + if (args->flags & AMDGPU_VM_PAGE_READABLE) va_flags |= AMDGPU_PTE_READABLE; - if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE) + if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) va_flags |= AMDGPU_PTE_WRITEABLE; - if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE) + if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) va_flags |= AMDGPU_PTE_EXECUTABLE; - r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, - args->in.offset_in_bo, args->in.map_size, + r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, + args->offset_in_bo, args->map_size, va_flags); break; case AMDGPU_VA_OP_UNMAP: - r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address); + r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); break; default: break; } - if (!r) { + if (!r) amdgpu_gem_va_update_vm(adev, bo_va); - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_OK; - } else { - memset(args, 0, sizeof(*args)); - args->out.result = AMDGPU_VA_RESULT_ERROR; - } drm_gem_object_unreference_unlocked(gobj); return r; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 3b911b604fdf..4c465e10a5a6 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -55,7 +55,7 @@ #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) -#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, union drm_amdgpu_gem_va) +#define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) @@ -290,10 +290,6 @@ struct drm_amdgpu_gem_op { #define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_UNMAP 2 -#define AMDGPU_VA_RESULT_OK 0 -#define AMDGPU_VA_RESULT_ERROR 1 -#define AMDGPU_VA_RESULT_VA_INVALID_ALIGNMENT 2 - /* Mapping flags */ /* readable mapping */ #define AMDGPU_VM_PAGE_READABLE (1 << 1) @@ -302,7 +298,7 @@ struct drm_amdgpu_gem_op { /* executable mapping, new for VI */ #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) -struct drm_amdgpu_gem_va_in { +struct drm_amdgpu_gem_va { /* GEM object handle */ uint32_t handle; uint32_t _pad; @@ -319,16 +315,6 @@ struct drm_amdgpu_gem_va_in { uint64_t map_size; }; -struct drm_amdgpu_gem_va_out { - uint32_t result; - uint32_t _pad; -}; - -union drm_amdgpu_gem_va { - struct drm_amdgpu_gem_va_in in; - struct drm_amdgpu_gem_va_out out; -}; - #define AMDGPU_HW_IP_GFX 0 #define AMDGPU_HW_IP_COMPUTE 1 #define AMDGPU_HW_IP_DMA 2 -- cgit v1.2.3-71-gd317 From 692a59e696afe1a4e777d0e4359325336ab0ad89 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 9 Jun 2015 15:21:35 +0200 Subject: drm/amdgpu: remove AMDGPU_CTX_OP_STATE_RUNNING MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not used. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 4c465e10a5a6..a82c0601a294 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -139,8 +139,6 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_FREE_CTX 2 #define AMDGPU_CTX_OP_QUERY_STATE 3 -#define AMDGPU_CTX_OP_STATE_RUNNING 1 - /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 #define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ -- cgit v1.2.3-71-gd317 From 675da0ddd6fefa5500488a5a3d500aaaefa95e5d Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 9 Jun 2015 15:54:37 +0200 Subject: drm/amdgpu: cleanup UAPI comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change. Signed-off-by: Christian König Reviewed-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 119 +++++++++++++++++++++++++++--------------- 1 file changed, 78 insertions(+), 41 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index a82c0601a294..d3f4832db289 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -141,12 +141,17 @@ union drm_amdgpu_bo_list { /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 -#define AMDGPU_CTX_GUILTY_RESET 1 /* this the context caused it */ -#define AMDGPU_CTX_INNOCENT_RESET 2 /* some other context caused it */ -#define AMDGPU_CTX_UNKNOWN_RESET 3 /* unknown cause */ +/* this the context caused it */ +#define AMDGPU_CTX_GUILTY_RESET 1 +/* some other context caused it */ +#define AMDGPU_CTX_INNOCENT_RESET 2 +/* unknown cause */ +#define AMDGPU_CTX_UNKNOWN_RESET 3 struct drm_amdgpu_ctx_in { + /** AMDGPU_CTX_OP_* */ uint32_t op; + /** For future use, no flags defined so far */ uint32_t flags; uint32_t ctx_id; uint32_t _pad; @@ -159,6 +164,7 @@ union drm_amdgpu_ctx_out { } alloc; struct { + /** For future use, no flags defined so far */ uint64_t flags; /** Number of resets caused by this context so far. */ uint32_t hangs; @@ -185,7 +191,9 @@ union drm_amdgpu_ctx { struct drm_amdgpu_gem_userptr { uint64_t addr; uint64_t size; + /* AMDGPU_GEM_USERPTR_* */ uint32_t flags; + /* Resulting GEM handle */ uint32_t handle; }; @@ -217,23 +225,29 @@ struct drm_amdgpu_gem_userptr { /** The same structure is shared for input/output */ struct drm_amdgpu_gem_metadata { - uint32_t handle; /* GEM Object handle */ - uint32_t op; /** Do we want get or set metadata */ + /** GEM Object handle */ + uint32_t handle; + /** Do we want get or set metadata */ + uint32_t op; struct { + /** For future use, no flags defined so far */ uint64_t flags; - uint64_t tiling_info; /* family specific tiling info */ + /** family specific tiling info */ + uint64_t tiling_info; uint32_t data_size_bytes; uint32_t data[64]; } data; }; struct drm_amdgpu_gem_mmap_in { - uint32_t handle; /** the GEM object handle */ + /** the GEM object handle */ + uint32_t handle; uint32_t _pad; }; struct drm_amdgpu_gem_mmap_out { - uint64_t addr_ptr; /** mmap offset from the vma offset manager */ + /** mmap offset from the vma offset manager */ + uint64_t addr_ptr; }; union drm_amdgpu_gem_mmap { @@ -242,14 +256,19 @@ union drm_amdgpu_gem_mmap { }; struct drm_amdgpu_gem_wait_idle_in { - uint32_t handle; /* GEM object handle */ + /** GEM object handle */ + uint32_t handle; + /** For future use, no flags defined so far */ uint32_t flags; - uint64_t timeout; /* Timeout to wait. If 0 then returned immediately with the status */ + /** Absolute timeout to wait */ + uint64_t timeout; }; struct drm_amdgpu_gem_wait_idle_out { - uint32_t status; /* BO status: 0 - BO is idle, 1 - BO is busy */ - uint32_t domain; /* Returned current memory domain */ + /** BO status: 0 - BO is idle, 1 - BO is busy */ + uint32_t status; + /** Returned current memory domain */ + uint32_t domain; }; union drm_amdgpu_gem_wait_idle { @@ -258,7 +277,9 @@ union drm_amdgpu_gem_wait_idle { }; struct drm_amdgpu_wait_cs_in { + /** Command submission handle */ uint64_t handle; + /** Absolute timeout to wait */ uint64_t timeout; uint32_t ip_type; uint32_t ip_instance; @@ -267,6 +288,7 @@ struct drm_amdgpu_wait_cs_in { }; struct drm_amdgpu_wait_cs_out { + /** CS status: 0 - CS completed, 1 - CS still busy */ uint64_t status; }; @@ -275,16 +297,19 @@ union drm_amdgpu_wait_cs { struct drm_amdgpu_wait_cs_out out; }; +#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 +#define AMDGPU_GEM_OP_SET_PLACEMENT 1 + /* Sets or returns a value associated with a buffer. */ struct drm_amdgpu_gem_op { - uint32_t handle; /* buffer */ - uint32_t op; /* AMDGPU_GEM_OP_* */ - uint64_t value; /* input or return value */ + /** GEM object handle */ + uint32_t handle; + /** AMDGPU_GEM_OP_* */ + uint32_t op; + /** Input or return value */ + uint64_t value; }; -#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 -#define AMDGPU_GEM_OP_SET_PLACEMENT 1 - #define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_UNMAP 2 @@ -297,19 +322,18 @@ struct drm_amdgpu_gem_op { #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) struct drm_amdgpu_gem_va { - /* GEM object handle */ + /** GEM object handle */ uint32_t handle; uint32_t _pad; - /* map or unmap*/ + /** AMDGPU_VA_OP_* */ uint32_t operation; - /* specify mapping flags */ + /** AMDGPU_VM_PAGE_* */ uint32_t flags; - /* va address to assign . Must be correctly aligned.*/ + /** va address to assign . Must be correctly aligned.*/ uint64_t va_address; - /* Specify offset inside of BO to assign. Must be correctly aligned.*/ + /** Specify offset inside of BO to assign. Must be correctly aligned.*/ uint64_t offset_in_bo; - /* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/ - /* Must be correctly aligned. */ + /** Specify mapping size. Must be correctly aligned. */ uint64_t map_size; }; @@ -324,6 +348,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 + struct drm_amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; @@ -337,7 +362,7 @@ struct drm_amdgpu_cs_in { uint32_t bo_list_handle; uint32_t num_chunks; uint32_t _pad; - /* this points to uint64_t * which point to cs chunks */ + /** this points to uint64_t * which point to cs chunks */ uint64_t chunks; }; @@ -346,8 +371,8 @@ struct drm_amdgpu_cs_out { }; union drm_amdgpu_cs { - struct drm_amdgpu_cs_in in; - struct drm_amdgpu_cs_out out; + struct drm_amdgpu_cs_in in; + struct drm_amdgpu_cs_out out; }; /* Specify flags to be used for IB */ @@ -360,12 +385,18 @@ union drm_amdgpu_cs { struct drm_amdgpu_cs_chunk_ib { uint32_t _pad; - uint32_t flags; /* IB Flags */ - uint64_t va_start; /* Virtual address to begin IB execution */ - uint32_t ib_bytes; /* Size of submission */ - uint32_t ip_type; /* HW IP to submit to */ - uint32_t ip_instance; /* HW IP index of the same type to submit to */ - uint32_t ring; /* Ring index to submit to */ + /** AMDGPU_IB_FLAG_* */ + uint32_t flags; + /** Virtual address to begin IB execution */ + uint64_t va_start; + /** Size of submission */ + uint32_t ib_bytes; + /** HW IP to submit to */ + uint32_t ip_type; + /** HW IP index of the same type to submit to */ + uint32_t ip_instance; + /** Ring index to submit to */ + uint32_t ring; }; struct drm_amdgpu_cs_chunk_fence { @@ -460,23 +491,28 @@ struct drm_amdgpu_info { /** AMDGPU_HW_IP_* */ uint32_t type; /** - * Index of the IP if there are more IPs of the same type. - * Ignored by AMDGPU_INFO_HW_IP_COUNT. + * Index of the IP if there are more IPs of the same + * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. */ uint32_t ip_instance; } query_hw_ip; struct { uint32_t dword_offset; - uint32_t count; /* number of registers to read */ + /** number of registers to read */ + uint32_t count; uint32_t instance; + /** For future use, no flags defined so far */ uint32_t flags; } read_mmr_reg; struct { /** AMDGPU_INFO_FW_* */ uint32_t fw_type; - /** Index of the IP if there are more IPs of the same type. */ + /** + * Index of the IP if there are more IPs of + * the same type. + */ uint32_t ip_instance; /** * Index of the engine. Whether this is used depends @@ -537,9 +573,10 @@ struct drm_amdgpu_info_device { uint32_t family; uint32_t num_shader_engines; uint32_t num_shader_arrays_per_engine; - uint32_t gpu_counter_freq; /* in KHz */ - uint64_t max_engine_clock; /* in KHz */ - uint64_t max_memory_clock; /* in KHz */ + /* in KHz */ + uint32_t gpu_counter_freq; + uint64_t max_engine_clock; + uint64_t max_memory_clock; /* cu information */ uint32_t cu_active_number; uint32_t cu_ao_mask; -- cgit v1.2.3-71-gd317 From 570655b09b065d2fff1b8ab9bdb8308f4c5a05a3 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 30 Jan 2015 20:18:11 +0530 Subject: drm/msm/mdp4: Support NV12MT format in mdp4 Using fb modifier flag, support NV12MT format in MDP4. v2: - rework the modifier's description [Daniel Vetter's comment] - drop .set_mode_config() callback [Rob Clark's comment] v3: - change VENDOR's name and restrict usage to NV12 [pointed by Daniel] Signed-off-by: Rob Clark --- drivers/gpu/drm/drm_crtc.c | 18 ++++++++++++++++++ drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 2 ++ drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 22 ++++++++++++++++++++++ include/uapi/drm/drm_fourcc.h | 15 +++++++++++++++ 4 files changed, 57 insertions(+) (limited to 'include/uapi/drm') diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 77f87b23a6e7..b69ed97d447c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3255,6 +3255,24 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) r->modifier[i], i); return -EINVAL; } + + /* modifier specific checks: */ + switch (r->modifier[i]) { + case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: + /* NOTE: the pitch restriction may be lifted later if it turns + * out that no hw has this restriction: + */ + if (r->pixel_format != DRM_FORMAT_NV12 || + width % 128 || height % 32 || + r->pitches[i] % 128) { + DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); + return -EINVAL; + } + break; + + default: + break; + } } for (i = num_planes; i < 4; i++) { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index d847b9436194..88a75cbc8f71 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -119,6 +119,8 @@ static int mdp4_hw_init(struct msm_kms *kms) if (mdp4_kms->rev > 1) mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); + dev->mode_config.allow_fb_modifiers = true; + out: pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index dbc068988377..0d1dbb737933 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -33,6 +33,21 @@ struct mdp4_plane { }; #define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) +/* MDP format helper functions */ +static inline +enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) +{ + bool is_tile = false; + + if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) + is_tile = true; + + if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile) + return FRAME_TILE_YCBCR_420; + + return FRAME_LINEAR; +} + static void mdp4_plane_set_scanout(struct drm_plane *plane, struct drm_framebuffer *fb); static int mdp4_plane_mode_set(struct drm_plane *plane, @@ -205,6 +220,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t op_mode = 0; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; + enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); if (!(crtc && fb)) { DBG("%s: disabled!", mdp4_plane->name); @@ -304,6 +320,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | + MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), @@ -324,6 +341,11 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); + if (frame_type != FRAME_LINEAR) + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe), + MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) | + MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h)); + return 0; } diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 07735822a28f..2f295cde657e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -207,4 +207,19 @@ */ #define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) +/* + * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks + * + * Macroblocks are laid in a Z-shape, and each pixel data is following the + * standard NV12 style. + * As for NV12, an image is the result of two frame buffers: one for Y, + * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer). + * Alignment requirements are (for each buffer): + * - multiple of 128 pixels for the width + * - multiple of 32 pixels for the height + * + * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html + */ +#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) + #endif /* DRM_FOURCC_H */ -- cgit v1.2.3-71-gd317 From 7f8fc88613d6310993e8d7d827e0b956b3a744fa Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sat, 30 May 2015 17:38:08 +0200 Subject: drm/msm: use __s32, __s64, __u32 and __u64 from linux/types.h for uabi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation errors like: error: unknown type name ‘uint32_t’ Signed-off-by: Mikko Rapeli Signed-off-by: Rob Clark --- include/uapi/drm/msm_drm.h | 76 +++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'include/uapi/drm') diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 0664c31f010c..75a232b9a970 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -23,7 +23,7 @@ /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints: - * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit + * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit * user/kernel compatibility * 2) Keep fields aligned to their size * 3) Because of how drm_ioctl() works, we can add new fields at @@ -44,8 +44,8 @@ * same as 'struct timespec' but 32/64b ABI safe. */ struct drm_msm_timespec { - int64_t tv_sec; /* seconds */ - int64_t tv_nsec; /* nanoseconds */ + __s64 tv_sec; /* seconds */ + __s64 tv_nsec; /* nanoseconds */ }; #define MSM_PARAM_GPU_ID 0x01 @@ -53,9 +53,9 @@ struct drm_msm_timespec { #define MSM_PARAM_CHIP_ID 0x03 struct drm_msm_param { - uint32_t pipe; /* in, MSM_PIPE_x */ - uint32_t param; /* in, MSM_PARAM_x */ - uint64_t value; /* out (get_param) or in (set_param) */ + __u32 pipe; /* in, MSM_PIPE_x */ + __u32 param; /* in, MSM_PARAM_x */ + __u64 value; /* out (get_param) or in (set_param) */ }; /* @@ -77,15 +77,15 @@ struct drm_msm_param { MSM_BO_UNCACHED) struct drm_msm_gem_new { - uint64_t size; /* in */ - uint32_t flags; /* in, mask of MSM_BO_x */ - uint32_t handle; /* out */ + __u64 size; /* in */ + __u32 flags; /* in, mask of MSM_BO_x */ + __u32 handle; /* out */ }; struct drm_msm_gem_info { - uint32_t handle; /* in */ - uint32_t pad; - uint64_t offset; /* out, offset to pass to mmap() */ + __u32 handle; /* in */ + __u32 pad; + __u64 offset; /* out, offset to pass to mmap() */ }; #define MSM_PREP_READ 0x01 @@ -95,13 +95,13 @@ struct drm_msm_gem_info { #define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC) struct drm_msm_gem_cpu_prep { - uint32_t handle; /* in */ - uint32_t op; /* in, mask of MSM_PREP_x */ + __u32 handle; /* in */ + __u32 op; /* in, mask of MSM_PREP_x */ struct drm_msm_timespec timeout; /* in */ }; struct drm_msm_gem_cpu_fini { - uint32_t handle; /* in */ + __u32 handle; /* in */ }; /* @@ -120,11 +120,11 @@ struct drm_msm_gem_cpu_fini { * otherwise EINVAL. */ struct drm_msm_gem_submit_reloc { - uint32_t submit_offset; /* in, offset from submit_bo */ - uint32_t or; /* in, value OR'd with result */ - int32_t shift; /* in, amount of left shift (can be negative) */ - uint32_t reloc_idx; /* in, index of reloc_bo buffer */ - uint64_t reloc_offset; /* in, offset from start of reloc_bo */ + __u32 submit_offset; /* in, offset from submit_bo */ + __u32 or; /* in, value OR'd with result */ + __s32 shift; /* in, amount of left shift (can be negative) */ + __u32 reloc_idx; /* in, index of reloc_bo buffer */ + __u64 reloc_offset; /* in, offset from start of reloc_bo */ }; /* submit-types: @@ -139,13 +139,13 @@ struct drm_msm_gem_submit_reloc { #define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002 #define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003 struct drm_msm_gem_submit_cmd { - uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */ - uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */ - uint32_t submit_offset; /* in, offset into submit_bo */ - uint32_t size; /* in, cmdstream size */ - uint32_t pad; - uint32_t nr_relocs; /* in, number of submit_reloc's */ - uint64_t __user relocs; /* in, ptr to array of submit_reloc's */ + __u32 type; /* in, one of MSM_SUBMIT_CMD_x */ + __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */ + __u32 submit_offset; /* in, offset into submit_bo */ + __u32 size; /* in, cmdstream size */ + __u32 pad; + __u32 nr_relocs; /* in, number of submit_reloc's */ + __u64 __user relocs; /* in, ptr to array of submit_reloc's */ }; /* Each buffer referenced elsewhere in the cmdstream submit (ie. the @@ -165,9 +165,9 @@ struct drm_msm_gem_submit_cmd { #define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) struct drm_msm_gem_submit_bo { - uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */ - uint32_t handle; /* in, GEM handle */ - uint64_t presumed; /* in/out, presumed buffer address */ + __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */ + __u32 handle; /* in, GEM handle */ + __u64 presumed; /* in/out, presumed buffer address */ }; /* Each cmdstream submit consists of a table of buffers involved, and @@ -175,12 +175,12 @@ struct drm_msm_gem_submit_bo { * (context-restore), and IB buffers needed for per tile/bin draw cmds. */ struct drm_msm_gem_submit { - uint32_t pipe; /* in, MSM_PIPE_x */ - uint32_t fence; /* out */ - uint32_t nr_bos; /* in, number of submit_bo's */ - uint32_t nr_cmds; /* in, number of submit_cmd's */ - uint64_t __user bos; /* in, ptr to array of submit_bo's */ - uint64_t __user cmds; /* in, ptr to array of submit_cmd's */ + __u32 pipe; /* in, MSM_PIPE_x */ + __u32 fence; /* out */ + __u32 nr_bos; /* in, number of submit_bo's */ + __u32 nr_cmds; /* in, number of submit_cmd's */ + __u64 __user bos; /* in, ptr to array of submit_bo's */ + __u64 __user cmds; /* in, ptr to array of submit_cmd's */ }; /* The normal way to synchronize with the GPU is just to CPU_PREP on @@ -191,8 +191,8 @@ struct drm_msm_gem_submit { * APIs without requiring a dummy bo to synchronize on. */ struct drm_msm_wait_fence { - uint32_t fence; /* in */ - uint32_t pad; + __u32 fence; /* in */ + __u32 pad; struct drm_msm_timespec timeout; /* in */ }; -- cgit v1.2.3-71-gd317