diff options
author | Tony Lindgren <tony@atomide.com> | 2016-03-30 20:36:06 +0300 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2016-03-30 20:36:06 +0300 |
commit | 1809de7e7d37c585e01a1bcc583ea92b78fc759d (patch) | |
tree | 76c5b35c2b04eafce86a1a729c02ab705eba44bc /drivers/gpu/drm/amd/amdgpu/amdgpu.h | |
parent | ebf24414809200915b9ddf7f109bba7c278c8210 (diff) | |
parent | 3ca4a238106dedc285193ee47f494a6584b6fd2f (diff) | |
download | linux-1809de7e7d37c585e01a1bcc583ea92b78fc759d.tar.xz |
Merge tag 'for-v4.6-rc/omap-fixes-a' of git://git.kernel.org/pub/scm/linux/kernel/git/pjw/omap-pending into omap-for-v4.6/fixes
ARM: OMAP2+: first hwmod fix for v4.6-rc
Fix a longstanding bug in the hwmod code that could cause
hardware SYSCONFIG register values to not match the kernel's
idea of what they should be, and that could result in lower
performance during IP block idle entry.
Basic build, boot, and PM test logs are available here:
http://www.pwsan.com/omap/testlogs/omap-hwmod-fixes-a-for-v4.6-rc/20160326231727/
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 412 |
1 files changed, 182 insertions, 230 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 313b0cc8d676..c4a21c6428f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -53,6 +53,7 @@ #include "amdgpu_ucode.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" +#include "amdgpu_acp.h" #include "gpu_scheduler.h" @@ -74,7 +75,6 @@ extern int amdgpu_dpm; extern int amdgpu_smc_load_fw; extern int amdgpu_aspm; extern int amdgpu_runtime_pm; -extern int amdgpu_hard_reset; extern unsigned amdgpu_ip_block_mask; extern int amdgpu_bapm; extern int amdgpu_deep_color; @@ -82,11 +82,11 @@ extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; -extern int amdgpu_enable_scheduler; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; -extern int amdgpu_enable_semaphores; extern int amdgpu_powerplay; +extern unsigned amdgpu_pcie_gen_cap; +extern unsigned amdgpu_pcie_lane_cap; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -106,9 +106,6 @@ extern int amdgpu_powerplay; /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 -/* number of hw syncs before falling back on blocking */ -#define AMDGPU_NUM_SYNCS 4 - /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (8 << 20) @@ -132,47 +129,6 @@ extern int amdgpu_powerplay; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -/* CG block flags */ -#define AMDGPU_CG_BLOCK_GFX (1 << 0) -#define AMDGPU_CG_BLOCK_MC (1 << 1) -#define AMDGPU_CG_BLOCK_SDMA (1 << 2) -#define AMDGPU_CG_BLOCK_UVD (1 << 3) -#define AMDGPU_CG_BLOCK_VCE (1 << 4) -#define AMDGPU_CG_BLOCK_HDP (1 << 5) -#define AMDGPU_CG_BLOCK_BIF (1 << 6) - -/* CG flags */ -#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) -#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) -#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) -#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) -#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) -#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) -#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) -#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) -#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) -#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) -#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) -#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) -#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) -#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) -#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) -#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) -#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) - -/* PG flags */ -#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) -#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) -#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) -#define AMDGPU_PG_SUPPORT_UVD (1 << 3) -#define AMDGPU_PG_SUPPORT_VCE (1 << 4) -#define AMDGPU_PG_SUPPORT_CP (1 << 5) -#define AMDGPU_PG_SUPPORT_GDS (1 << 6) -#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) -#define AMDGPU_PG_SUPPORT_SDMA (1 << 8) -#define AMDGPU_PG_SUPPORT_ACP (1 << 9) -#define AMDGPU_PG_SUPPORT_SAMU (1 << 10) - /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_SAFE_MODE 0x00000001L @@ -185,11 +141,9 @@ extern int amdgpu_powerplay; #define CIK_CURSOR_HEIGHT 128 struct amdgpu_device; -struct amdgpu_fence; struct amdgpu_ib; struct amdgpu_vm; struct amdgpu_ring; -struct amdgpu_semaphore; struct amdgpu_cs_parser; struct amdgpu_job; struct amdgpu_irq_src; @@ -287,7 +241,7 @@ struct amdgpu_vm_pte_funcs { unsigned count); /* write pte one entry at a time with addr mapping */ void (*write_pte)(struct amdgpu_ib *ib, - uint64_t pe, + const dma_addr_t *pages_addr, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); /* for linear pte/pde updates without addr mapping */ @@ -295,8 +249,6 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); - /* pad the indirect buffer to the necessary number of dw */ - void (*pad_ib)(struct amdgpu_ib *ib); }; /* provided by the gmc block */ @@ -334,12 +286,11 @@ struct amdgpu_ring_funcs { struct amdgpu_ib *ib); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); - bool (*emit_semaphore)(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore, - bool emit_wait); + void (*emit_pipeline_sync)(struct amdgpu_ring *ring); void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr); void (*emit_hdp_flush)(struct amdgpu_ring *ring); + void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, uint32_t gds_base, uint32_t gds_size, uint32_t gws_base, uint32_t gws_size, @@ -349,6 +300,8 @@ struct amdgpu_ring_funcs { int (*test_ib)(struct amdgpu_ring *ring); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); + /* pad the indirect buffer to the necessary number of dw */ + void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); }; /* @@ -394,13 +347,15 @@ struct amdgpu_fence_driver { uint64_t gpu_addr; volatile uint32_t *cpu_addr; /* sync_seq is protected by ring emission lock */ - uint64_t sync_seq[AMDGPU_MAX_RINGS]; - atomic64_t last_seq; + uint32_t sync_seq; + atomic_t last_seq; bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; struct timer_list fallback_timer; - wait_queue_head_t fence_queue; + unsigned num_fences_mask; + spinlock_t lock; + struct fence **fences; }; /* some special values for the owner field */ @@ -410,19 +365,6 @@ struct amdgpu_fence_driver { #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_INT (1 << 1) -struct amdgpu_fence { - struct fence base; - - /* RB, DMA, etc. */ - struct amdgpu_ring *ring; - uint64_t seq; - - /* filp or special value for fence creator */ - void *owner; - - wait_queue_t fence_wake; -}; - struct amdgpu_user_fence { /* write-back bo */ struct amdgpu_bo *bo; @@ -434,24 +376,18 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); -int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); +int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, + unsigned num_hw_submission); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, - struct amdgpu_fence **fence); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); -int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); -bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); -void amdgpu_fence_note_sync(struct amdgpu_fence *fence, - struct amdgpu_ring *ring); - /* * TTM. */ @@ -470,6 +406,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; }; int amdgpu_copy_buffer(struct amdgpu_ring *ring, @@ -484,9 +422,9 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; - unsigned prefered_domains; - unsigned allowed_domains; uint32_t priority; + struct page **user_pages; + int user_invalidated; }; struct amdgpu_bo_va_mapping { @@ -498,7 +436,6 @@ struct amdgpu_bo_va_mapping { /* bo virtual addresses in a specific vm */ struct amdgpu_bo_va { - struct mutex mutex; /* protected by bo being reserved */ struct list_head bo_list; struct fence *last_pt_update; @@ -522,7 +459,8 @@ struct amdgpu_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ - u32 initial_domain; + u32 prefered_domains; + u32 allowed_domains; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; struct ttm_placement placement; struct ttm_buffer_object tbo; @@ -544,7 +482,6 @@ struct amdgpu_bo { struct amdgpu_bo *parent; struct ttm_bo_kmap_obj dma_buf_vmap; - pid_t pid; struct amdgpu_mn *mn; struct list_head mn_list; }; @@ -593,11 +530,14 @@ int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); * Assumption is that there won't be hole (all object on same * alignment). */ + +#define AMDGPU_SA_NUM_FENCE_LISTS 32 + struct amdgpu_sa_manager { wait_queue_head_t wq; struct amdgpu_bo *bo; struct list_head *hole; - struct list_head flist[AMDGPU_MAX_RINGS]; + struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; struct list_head olist; unsigned size; uint64_t gpu_addr; @@ -606,8 +546,6 @@ struct amdgpu_sa_manager { uint32_t align; }; -struct amdgpu_sa_bo; - /* sub-allocation buffer */ struct amdgpu_sa_bo { struct list_head olist; @@ -621,13 +559,7 @@ struct amdgpu_sa_bo { /* * GEM objects. */ -struct amdgpu_gem { - struct mutex mutex; - struct list_head objects; -}; - -int amdgpu_gem_init(struct amdgpu_device *adev); -void amdgpu_gem_fini(struct amdgpu_device *adev); +void amdgpu_gem_force_release(struct amdgpu_device *adev); int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, bool kernel, @@ -639,32 +571,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, int amdgpu_mode_dumb_mmap(struct drm_file *filp, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); - -/* - * Semaphores. - */ -struct amdgpu_semaphore { - struct amdgpu_sa_bo *sa_bo; - signed waiters; - uint64_t gpu_addr; -}; - -int amdgpu_semaphore_create(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore); -bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, - struct amdgpu_semaphore *semaphore); -void amdgpu_semaphore_free(struct amdgpu_device *adev, - struct amdgpu_semaphore **semaphore, - struct fence *fence); - /* * Synchronization */ struct amdgpu_sync { - struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; - struct fence *sync_to[AMDGPU_MAX_RINGS]; DECLARE_HASHTABLE(fences, 4); struct fence *last_vm_update; }; @@ -676,12 +586,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -int amdgpu_sync_rings(struct amdgpu_sync *sync, - struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_wait(struct amdgpu_sync *sync); -void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, - struct fence *fence); +void amdgpu_sync_free(struct amdgpu_sync *sync); +int amdgpu_sync_init(void); +void amdgpu_sync_fini(void); /* * GART structures, functions & helpers @@ -799,6 +708,7 @@ struct amdgpu_flip_work { struct fence *excl; unsigned shared_count; struct fence **shared; + struct fence_cb cb; }; @@ -811,12 +721,11 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_ring *ring; - struct amdgpu_fence *fence; struct amdgpu_user_fence *user; struct amdgpu_vm *vm; + unsigned vm_id; + uint64_t vm_pd_addr; struct amdgpu_ctx *ctx; - struct amdgpu_sync sync; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; @@ -835,13 +744,14 @@ enum amdgpu_ring_type { extern struct amd_sched_backend_ops amdgpu_sched_ops; -int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_ib *ibs, - unsigned num_ibs, - int (*free_job)(struct amdgpu_job *), - void *owner, - struct fence **fence); +int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, + struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, + struct amdgpu_job **job); +void amdgpu_job_free(struct amdgpu_job *job); +int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, + struct amd_sched_entity *entity, void *owner, + struct fence **f); struct amdgpu_ring { struct amdgpu_device *adev; @@ -850,7 +760,6 @@ struct amdgpu_ring { struct amd_gpu_scheduler sched; spinlock_t fence_lock; - struct mutex *ring_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; @@ -859,7 +768,7 @@ struct amdgpu_ring { unsigned wptr; unsigned wptr_old; unsigned ring_size; - unsigned ring_free_dw; + unsigned max_dw; int count_dw; uint64_t gpu_addr; uint32_t align_mask; @@ -867,8 +776,6 @@ struct amdgpu_ring { bool ready; u32 nop; u32 idx; - u64 last_semaphore_signal_addr; - u64 last_semaphore_wait_addr; u32 me; u32 pipe; u32 queue; @@ -881,7 +788,6 @@ struct amdgpu_ring { struct amdgpu_ctx *current_ctx; enum amdgpu_ring_type type; char name[16]; - bool is_pte_ring; }; /* @@ -925,13 +831,14 @@ struct amdgpu_vm_pt { }; struct amdgpu_vm_id { - unsigned id; - uint64_t pd_gpu_addr; + struct amdgpu_vm_manager_id *mgr_id; + uint64_t pd_gpu_addr; /* last flushed PD/PT update */ - struct fence *flushed_updates; + struct fence *flushed_updates; }; struct amdgpu_vm { + /* tree of virtual addresses mapped */ struct rb_root va; /* protecting invalidated */ @@ -956,30 +863,47 @@ struct amdgpu_vm { /* for id and flush management per ring */ struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; - /* for interval tree */ - spinlock_t it_lock; + /* protecting freed */ spinlock_t freed_lock; + + /* Scheduler entity for page table updates */ + struct amd_sched_entity entity; +}; + +struct amdgpu_vm_manager_id { + struct list_head list; + struct fence *active; + atomic_long_t owner; + + uint32_t gds_base; + uint32_t gds_size; + uint32_t gws_base; + uint32_t gws_size; + uint32_t oa_base; + uint32_t oa_size; }; struct amdgpu_vm_manager { - struct { - struct fence *active; - atomic_long_t owner; - } ids[AMDGPU_NUM_VM]; + /* Handling of VMIDs */ + struct mutex lock; + unsigned num_ids; + struct list_head ids_lru; + struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; uint32_t max_pfn; - /* number of VMIDs */ - unsigned nvm; /* vram base address for page table entry */ u64 vram_base_offset; /* is vm enabled? */ bool enabled; /* vm pte handling */ const struct amdgpu_vm_pte_funcs *vm_pte_funcs; - struct amdgpu_ring *vm_pte_funcs_ring; + struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; + unsigned vm_pte_num_rings; + atomic_t vm_pte_next_ring; }; +void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -990,14 +914,15 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, struct fence *fence, + unsigned *vm_id, uint64_t *vm_pd_addr); void amdgpu_vm_flush(struct amdgpu_ring *ring, - struct amdgpu_vm *vm, - struct fence *updates); -void amdgpu_vm_fence(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct fence *fence); -uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); + unsigned vm_id, uint64_t pd_addr, + uint32_t gds_base, uint32_t gds_size, + uint32_t gws_base, uint32_t gws_size, + uint32_t oa_base, uint32_t oa_size); +void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); +uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -1023,7 +948,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t addr); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -int amdgpu_vm_free_job(struct amdgpu_job *job); /* * context related structures @@ -1051,10 +975,6 @@ struct amdgpu_ctx_mgr { struct idr ctx_handles; }; -int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, - struct amdgpu_ctx *ctx); -void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); - struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); int amdgpu_ctx_put(struct amdgpu_ctx *ctx); @@ -1089,13 +1009,15 @@ struct amdgpu_bo_list { struct amdgpu_bo *gds_obj; struct amdgpu_bo *gws_obj; struct amdgpu_bo *oa_obj; - bool has_userptr; + unsigned first_userptr; unsigned num_entries; struct amdgpu_bo_list_entry *array; }; struct amdgpu_bo_list * amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); +void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, + struct list_head *validated); void amdgpu_bo_list_put(struct amdgpu_bo_list *list); void amdgpu_bo_list_free(struct amdgpu_bo_list *list); @@ -1169,6 +1091,7 @@ struct amdgpu_gca_config { unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; unsigned gb_addr_config; + unsigned num_rbs; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; @@ -1211,23 +1134,20 @@ struct amdgpu_gfx { unsigned ce_ram_size; }; -int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, +int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); -void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); -int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_ib *ib, void *owner); +void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f); +int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, + struct amdgpu_ib *ib, struct fence *last_vm_update, + struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); -/* Ring access between begin & end cannot sleep */ -void amdgpu_ring_free_size(struct amdgpu_ring *ring); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); -int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); +void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, uint32_t **data); int amdgpu_ring_restore(struct amdgpu_ring *ring, @@ -1237,7 +1157,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq_src, unsigned irq_type, enum amdgpu_ring_type ring_type); void amdgpu_ring_fini(struct amdgpu_ring *ring); -struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f); /* * CS. @@ -1246,47 +1165,58 @@ struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; uint32_t *kdata; - void __user *user_ptr; }; struct amdgpu_cs_parser { struct amdgpu_device *adev; struct drm_file *filp; struct amdgpu_ctx *ctx; - struct amdgpu_bo_list *bo_list; + /* chunks */ unsigned nchunks; struct amdgpu_cs_chunk *chunks; - /* relocations */ - struct amdgpu_bo_list_entry vm_pd; - struct list_head validated; - struct fence *fence; - struct amdgpu_ib *ibs; - uint32_t num_ibs; + /* scheduler job object */ + struct amdgpu_job *job; - struct ww_acquire_ctx ticket; + /* buffer objects */ + struct ww_acquire_ctx ticket; + struct amdgpu_bo_list *bo_list; + struct amdgpu_bo_list_entry vm_pd; + struct list_head validated; + struct fence *fence; + uint64_t bytes_moved_threshold; + uint64_t bytes_moved; /* user fence */ - struct amdgpu_user_fence uf; struct amdgpu_bo_list_entry uf_entry; }; struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_ring *ring; + struct amdgpu_sync sync; struct amdgpu_ib *ibs; + struct fence *fence; /* the hw fence */ uint32_t num_ibs; void *owner; struct amdgpu_user_fence uf; - int (*free_job)(struct amdgpu_job *job); }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) -static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) +static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx) { - return p->ibs[ib_idx].ptr[idx]; + return p->job->ibs[ib_idx].ptr[idx]; +} + +static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, + uint32_t ib_idx, int idx, + uint32_t value) +{ + p->job->ibs[ib_idx].ptr[idx] = value; } /* @@ -1538,6 +1468,7 @@ enum amdgpu_dpm_forced_level { AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, AMDGPU_DPM_FORCED_LEVEL_LOW = 1, AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, + AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, }; struct amdgpu_vce_state { @@ -1667,6 +1598,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + struct amd_sched_entity entity; }; /* @@ -1691,6 +1623,7 @@ struct amdgpu_vce { struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; + struct amd_sched_entity entity; }; /* @@ -1925,6 +1858,18 @@ void amdgpu_cgs_destroy_device(void *cgs_device); /* + * CGS + */ +void *amdgpu_cgs_create_device(struct amdgpu_device *adev); +void amdgpu_cgs_destroy_device(void *cgs_device); + + +/* GPU virtualization */ +struct amdgpu_virtualization { + bool supports_sr_iov; +}; + +/* * Core structure, functions and helpers. */ typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); @@ -1944,6 +1889,10 @@ struct amdgpu_device { struct drm_device *ddev; struct pci_dev *pdev; +#ifdef CONFIG_DRM_AMD_ACP + struct amdgpu_acp acp; +#endif + /* ASIC */ enum amd_asic_type asic_type; uint32_t family; @@ -2020,7 +1969,6 @@ struct amdgpu_device { /* memory management */ struct amdgpu_mman mman; - struct amdgpu_gem gem; struct amdgpu_vram_scratch vram_scratch; struct amdgpu_wb wb; atomic64_t vram_usage; @@ -2038,7 +1986,6 @@ struct amdgpu_device { /* rings */ unsigned fence_context; - struct mutex ring_lock; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; @@ -2050,6 +1997,7 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; bool pp_enabled; + bool pp_force_state_enabled; /* dpm */ struct amdgpu_pm pm; @@ -2066,7 +2014,6 @@ struct amdgpu_device { struct amdgpu_sdma sdma; /* uvd */ - bool has_uvd; struct amdgpu_uvd uvd; /* vce */ @@ -2091,8 +2038,7 @@ struct amdgpu_device { /* amdkfd interface */ struct kfd_dev *kfd; - /* kernel conext for IB submission */ - struct amdgpu_ctx kernel_ctx; + struct amdgpu_virtualization virtualization; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2114,20 +2060,6 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); /* - * Cast helper - */ -extern const struct fence_ops amdgpu_fence_ops; -static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) -{ - struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); - - if (__f->base.ops == &amdgpu_fence_ops) - return __f; - - return NULL; -} - -/* * Registers read & write functions. */ #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) @@ -2197,7 +2129,6 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ring->ring[ring->wptr++] = v; ring->wptr &= ring->ptr_mask; ring->count_dw--; - ring->ring_free_dw--; } static inline struct amdgpu_sdma_instance * @@ -2233,9 +2164,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) -#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) @@ -2243,11 +2173,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) +#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) -#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) +#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) +#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2278,60 +2210,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) #define amdgpu_dpm_get_temperature(adev) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_temperature((adev)) + (adev)->pm.funcs->get_temperature((adev))) #define amdgpu_dpm_set_fan_control_mode(adev, m) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->set_fan_control_mode((adev), (m)) + (adev)->pm.funcs->set_fan_control_mode((adev), (m))) #define amdgpu_dpm_get_fan_control_mode(adev) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ - (adev)->pm.funcs->get_fan_control_mode((adev)) + (adev)->pm.funcs->get_fan_control_mode((adev))) #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) + (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ - (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) + (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) #define amdgpu_dpm_get_sclk(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_sclk((adev), (l)) + (adev)->pm.funcs->get_sclk((adev), (l))) #define amdgpu_dpm_get_mclk(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->get_mclk((adev), (l)) + (adev)->pm.funcs->get_mclk((adev), (l))) #define amdgpu_dpm_force_performance_level(adev, l) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ - (adev)->pm.funcs->force_performance_level((adev), (l)) + (adev)->pm.funcs->force_performance_level((adev), (l))) #define amdgpu_dpm_powergate_uvd(adev, g) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_uvd((adev), (g)) + (adev)->pm.funcs->powergate_uvd((adev), (g))) #define amdgpu_dpm_powergate_vce(adev, g) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ - (adev)->pm.funcs->powergate_vce((adev), (g)) + (adev)->pm.funcs->powergate_vce((adev), (g))) #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ - (adev)->pp_enabled ? \ + ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) + (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) @@ -2339,6 +2271,21 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_get_performance_level(adev) \ (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) +#define amdgpu_dpm_get_pp_num_states(adev, data) \ + (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) + +#define amdgpu_dpm_get_pp_table(adev, table) \ + (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) + +#define amdgpu_dpm_set_pp_table(adev, buf, size) \ + (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) + +#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ + (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) + +#define amdgpu_dpm_force_clock_level(adev, type, level) \ + (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2349,7 +2296,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, @@ -2357,9 +2303,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, struct amdgpu_ring **out_ring); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); +int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); +struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end); +bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, + int *last_invalidated); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); |