diff options
author | Maxime Ripard <maxime.ripard@bootlin.com> | 2019-02-11 12:35:35 +0300 |
---|---|---|
committer | Maxime Ripard <maxime.ripard@bootlin.com> | 2019-02-11 12:35:35 +0300 |
commit | d588100baa28dae6a5c32d02bfe744d0792ed2ad (patch) | |
tree | 3be3984cea35a07b4259f8ebe55ce0e52e416d1c /include | |
parent | 7bd0a3271e239d34bae5fa51e4f44ed9ee861a59 (diff) | |
parent | 16065fcdd19ddb9e093192914ac863884f308766 (diff) | |
download | linux-d588100baa28dae6a5c32d02bfe744d0792ed2ad.tar.xz |
Merge drm/drm-next into drm-misc-next
We need to backmerge drm-next to fix the komeda build failure.
Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/drm/drm_color_mgmt.h | 28 | ||||
-rw-r--r-- | include/drm/gpu_scheduler.h | 13 | ||||
-rw-r--r-- | include/drm/i915_pciids.h | 8 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_api.h | 28 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 9 | ||||
-rw-r--r-- | include/trace/events/host1x.h | 26 | ||||
-rw-r--r-- | include/uapi/drm/amdgpu_drm.h | 6 | ||||
-rw-r--r-- | include/uapi/drm/drm_fourcc.h | 3 | ||||
-rw-r--r-- | include/uapi/drm/i915_drm.h | 64 |
9 files changed, 150 insertions, 35 deletions
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 90ef9996d9a4..d1c662d92ab7 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -69,4 +69,32 @@ int drm_plane_create_color_properties(struct drm_plane *plane, u32 supported_ranges, enum drm_color_encoding default_encoding, enum drm_color_range default_range); + +/** + * enum drm_color_lut_tests - hw-specific LUT tests to perform + * + * The drm_color_lut_check() function takes a bitmask of the values here to + * determine which tests to apply to a userspace-provided LUT. + */ +enum drm_color_lut_tests { + /** + * @DRM_COLOR_LUT_EQUAL_CHANNELS: + * + * Checks whether the entries of a LUT all have equal values for the + * red, green, and blue channels. Intended for hardware that only + * accepts a single value per LUT entry and assumes that value applies + * to all three color components. + */ + DRM_COLOR_LUT_EQUAL_CHANNELS = BIT(0), + + /** + * @DRM_COLOR_LUT_NON_DECREASING: + * + * Checks whether the entries of a LUT are always flat or increasing + * (never decreasing). + */ + DRM_COLOR_LUT_NON_DECREASING = BIT(1), +}; + +int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests); #endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 47e19796c450..0daca4d8dad9 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -138,10 +138,6 @@ struct drm_sched_fence { struct dma_fence finished; /** - * @cb: the callback for the parent fence below. - */ - struct dma_fence_cb cb; - /** * @parent: the fence returned by &drm_sched_backend_ops.run_job * when scheduling the job on hardware. We signal the * &drm_sched_fence.finished fence once parent is signalled. @@ -181,6 +177,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * be scheduled further. * @s_priority: the priority of the job. * @entity: the entity to which this job belongs. + * @cb: the callback for the parent fence in s_fence. * * A job is created by the driver using drm_sched_job_init(), and * should call drm_sched_entity_push_job() once it wants the scheduler @@ -197,6 +194,7 @@ struct drm_sched_job { atomic_t karma; enum drm_sched_priority s_priority; struct drm_sched_entity *entity; + struct dma_fence_cb cb; }; static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, @@ -298,9 +296,10 @@ int drm_sched_job_init(struct drm_sched_job *job, void *owner); void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup(struct drm_gpu_scheduler *sched); -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, - struct drm_sched_job *job); -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched); +void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_increase_karma(struct drm_sched_job *bad); bool drm_sched_dependency_optimized(struct dma_fence* fence, struct drm_sched_entity *entity); void drm_sched_fault(struct drm_gpu_scheduler *sched); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 192667144693..d2fad7b0fcf6 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -394,6 +394,9 @@ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ +#define INTEL_CFL_H_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3E9C, info) + #define INTEL_CFL_H_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ @@ -426,6 +429,7 @@ #define INTEL_CFL_IDS(info) \ INTEL_CFL_S_GT1_IDS(info), \ INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT1_IDS(info), \ INTEL_CFL_H_GT2_IDS(info), \ INTEL_CFL_U_GT2_IDS(info), \ INTEL_CFL_U_GT3_IDS(info), \ @@ -457,9 +461,13 @@ INTEL_VGA_DEVICE(0x8A51, info), \ INTEL_VGA_DEVICE(0x8A5C, info), \ INTEL_VGA_DEVICE(0x8A5D, info), \ + INTEL_VGA_DEVICE(0x8A59, info), \ + INTEL_VGA_DEVICE(0x8A58, info), \ INTEL_VGA_DEVICE(0x8A52, info), \ INTEL_VGA_DEVICE(0x8A5A, info), \ INTEL_VGA_DEVICE(0x8A5B, info), \ + INTEL_VGA_DEVICE(0x8A57, info), \ + INTEL_VGA_DEVICE(0x8A56, info), \ INTEL_VGA_DEVICE(0x8A71, info), \ INTEL_VGA_DEVICE(0x8A70, info) diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 3fc4854dce49..49d9cdfc58f2 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -296,23 +296,6 @@ static inline void ttm_bo_get(struct ttm_buffer_object *bo) } /** - * ttm_bo_reference - reference a struct ttm_buffer_object - * - * @bo: The buffer object. - * - * Returns a refcounted pointer to a buffer object. - * - * This function is deprecated. Use @ttm_bo_get instead. - */ - -static inline struct ttm_buffer_object * -ttm_bo_reference(struct ttm_buffer_object *bo) -{ - ttm_bo_get(bo); - return bo; -} - -/** * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless * its refcount has already reached zero. * @bo: The buffer object. @@ -387,17 +370,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, void ttm_bo_put(struct ttm_buffer_object *bo); /** - * ttm_bo_unref - * - * @bo: The buffer object. - * - * Unreference and clear a pointer to a buffer object. - * - * This function is deprecated. Use @ttm_bo_put instead. - */ -void ttm_bo_unref(struct ttm_buffer_object **bo); - -/** * ttm_bo_add_to_lru * * @bo: The buffer object. diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 56642fbb6f73..cbf3180cb612 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -381,6 +381,15 @@ struct ttm_bo_driver { */ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write); + + /** + * struct ttm_bo_driver member del_from_lru_notify + * + * @bo: the buffer object deleted from lru + * + * notify driver that a BO was deleted from LRU. + */ + void (*del_from_lru_notify)(struct ttm_buffer_object *bo); }; /** diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h index a37ef73092e5..3d340b6f1ea3 100644 --- a/include/trace/events/host1x.h +++ b/include/trace/events/host1x.h @@ -80,6 +80,32 @@ TRACE_EVENT(host1x_cdma_push, __entry->name, __entry->op1, __entry->op2) ); +TRACE_EVENT(host1x_cdma_push_wide, + TP_PROTO(const char *name, u32 op1, u32 op2, u32 op3, u32 op4), + + TP_ARGS(name, op1, op2, op3, op4), + + TP_STRUCT__entry( + __field(const char *, name) + __field(u32, op1) + __field(u32, op2) + __field(u32, op3) + __field(u32, op4) + ), + + TP_fast_assign( + __entry->name = name; + __entry->op1 = op1; + __entry->op2 = op2; + __entry->op3 = op3; + __entry->op4 = op4; + ), + + TP_printk("name=%s, op1=%08x, op2=%08x, op3=%08x op4=%08x", + __entry->name, __entry->op1, __entry->op2, __entry->op3, + __entry->op4) +); + TRACE_EVENT(host1x_cdma_push_gather, TP_PROTO(const char *name, struct host1x_bo *bo, u32 words, u32 offset, void *cmdbuf), diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index be84e43c1e19..2acbc8bc465b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -523,6 +523,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 +#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -565,6 +566,11 @@ union drm_amdgpu_cs { * caches (L2/vL1/sL1/I$). */ #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3) +/* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER. + * This will reset wave ID counters for the IB. + */ +#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4) + struct drm_amdgpu_cs_chunk_ib { __u32 _pad; /** AMDGPU_IB_FLAG_* */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 0fa360223255..bab20298f422 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -595,6 +595,9 @@ extern "C" { * AFBC has several features which may be supported and/or used, which are * represented using bits in the modifier. Not all combinations are valid, * and different devices or use-cases may support different combinations. + * + * Further information on the use of AFBC modifiers can be found in + * Documentation/gpu/afbc.rst */ #define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode) diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 298b2e197744..397810fa2d33 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ #define I915_CONTEXT_DEFAULT_PRIORITY 0 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ + /* + * When using the following param, value should be a pointer to + * drm_i915_gem_context_param_sseu. + */ +#define I915_CONTEXT_PARAM_SSEU 0x7 __u64 value; }; +/** + * Context SSEU programming + * + * It may be necessary for either functional or performance reason to configure + * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ + * Sub-slice/EU). + * + * This is done by configuring SSEU configuration using the below + * @struct drm_i915_gem_context_param_sseu for every supported engine which + * userspace intends to use. + * + * Not all GPUs or engines support this functionality in which case an error + * code -ENODEV will be returned. + * + * Also, flexibility of possible SSEU configuration permutations varies between + * GPU generations and software imposed limitations. Requesting such a + * combination will return an error code of -EINVAL. + * + * NOTE: When perf/OA is active the context's SSEU configuration is ignored in + * favour of a single global setting. + */ +struct drm_i915_gem_context_param_sseu { + /* + * Engine class & instance to be configured or queried. + */ + __u16 engine_class; + __u16 engine_instance; + + /* + * Unused for now. Must be cleared to zero. + */ + __u32 flags; + + /* + * Mask of slices to enable for the context. Valid values are a subset + * of the bitmask value returned for I915_PARAM_SLICE_MASK. + */ + __u64 slice_mask; + + /* + * Mask of subslices to enable for the context. Valid values are a + * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. + */ + __u64 subslice_mask; + + /* + * Minimum/Maximum number of EUs to enable per subslice for the + * context. min_eus_per_subslice must be inferior or equal to + * max_eus_per_subslice. + */ + __u16 min_eus_per_subslice; + __u16 max_eus_per_subslice; + + /* + * Unused for now. Must be cleared to zero. + */ + __u32 rsvd; +}; + enum drm_i915_oa_format { I915_OA_FORMAT_A13 = 1, /* HSW only */ I915_OA_FORMAT_A29, /* HSW only */ |