diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-04 03:02:26 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-04 03:02:26 +0300 |
commit | 906dde0f355bd97c080c215811ae7db1137c4af8 (patch) | |
tree | 1e1b4ba58a59c4027f06d86e7430566ee0dcbb15 /drivers/gpu/drm/amd/amdgpu | |
parent | 69c0067aa3f40d3e52ab78643aecb17d669d3389 (diff) | |
parent | 7846b12fe0b5feab5446d892f41b5140c1419109 (diff) | |
download | linux-906dde0f355bd97c080c215811ae7db1137c4af8.tar.xz |
Merge tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This is the main drm pull request for 4.14 merge window.
I'm sending this early, as my continuing journey into fatherhood is
occurring really soon now, I'm going to be mostly useless for the next
couple of weeks, though I may be able to read email, I doubt I'll be
doing much patch applications or git sending. If anything urgent pops
up I've asked Daniel/Jani/Alex/Sean to try and direct stuff towards
you.
Outside drm changes:
Some rcar-du updates that touch the V4L tree, all acks should be in
place. It adds one export to the radix tree code for new i915 use
case. There are some minor AGP cleanups (don't see that too often).
Changes to the vbox driver in staging to avoid breaking compilation.
Summary:
core:
- Atomic helper fixes
- Atomic UAPI fixes
- Add YCBCR 4:2:0 support
- Drop set_busid hook
- Refactor fb_helper locking
- Remove a bunch of internal APIs
- Add a bunch of better default handlers
- Format modifier/blob plane property added
- More internal header refactoring
- Make more internal API names consistent
- Enhanced syncobj APIs (wait/signal/reset/create signalled)
bridge:
- Add Synopsys Designware MIPI DSI host bridge driver
tiny:
- Add Pervasive Displays RePaper displays
- Add support for LEGO MINDSTORMS EV3 LCD
i915:
- Lots of GEN10/CNL support patches
- drm syncobj support
- Skylake+ watermark refactoring
- GVT vGPU 48-bit ppgtt support
- GVT performance improvements
- NOA change ioctl
- CCS (color compression) scanout support
- GPU reset improvements
amdgpu:
- Initial hugepage support
- BO migration logic rework
- Vega10 improvements
- Powerplay fixes
- Stop reprogramming the MC
- Fixes for ACP audio on stoney
- SR-IOV fixes/improvements
- Command submission overhead improvements
amdkfd:
- Non-dGPU upstreaming patches
- Scratch VA ioctl
- Image tiling modes
- Update PM4 headers for new firmware
- Drop all BUG_ONs.
nouveau:
- GP108 modesetting support.
- Disable MSI on big endian.
vmwgfx:
- Add fence fd support.
msm:
- Runtime PM improvements
exynos:
- NV12MT support
- Refactor KMS drivers
imx-drm:
- Lock scanout channel to improve memory bw
- Cleanups
etnaviv:
- GEM object population fixes
tegra:
- Prep work for Tegra186 support
- PRIME mmap support
sunxi:
- HDMI support improvements
- HDMI CEC support
omapdrm:
- HDMI hotplug IRQ support
- Big driver cleanup
- OMAP5 DSI support
rcar-du:
- vblank fixes
- VSP1 updates
arcgpu:
- Minor fixes
stm:
- Add STM32 DSI controller driver
dw_hdmi:
- Add support for Rockchip RK3399
- HDMI CEC support
atmel-hlcdc:
- Add 8-bit color support
vc4:
- Atomic fixes
- New ioctl to attach a label to a buffer object
- HDMI CEC support
- Allow userspace to dictate rendering order on submit ioctl"
* tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux: (1074 commits)
drm/syncobj: Add a signal ioctl (v3)
drm/syncobj: Add a reset ioctl (v3)
drm/syncobj: Add a syncobj_array_find helper
drm/syncobj: Allow wait for submit and signal behavior (v5)
drm/syncobj: Add a CREATE_SIGNALED flag
drm/syncobj: Add a callback mechanism for replace_fence (v3)
drm/syncobj: add sync obj wait interface. (v8)
i915: Use drm_syncobj_fence_get
drm/syncobj: Add a race-free drm_syncobj_fence_get helper (v2)
drm/syncobj: Rename fence_get to find_fence
drm: kirin: Add mode_valid logic to avoid mode clocks we can't generate
drm/vmwgfx: Bump the version for fence FD support
drm/vmwgfx: Add export fence to file descriptor support
drm/vmwgfx: Add support for imported Fence File Descriptor
drm/vmwgfx: Prepare to support fence fd
drm/vmwgfx: Fix incorrect command header offset at restart
drm/vmwgfx: Support the NOP_ERROR command
drm/vmwgfx: Restart command buffers after errors
drm/vmwgfx: Move irq bottom half processing to threads
drm/vmwgfx: Don't use drm_irq_[un]install
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
95 files changed, 3428 insertions, 2849 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index faea6349228f..658bac0cdc5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o + amdgpu_queue_mgr.o amdgpu_vf_error.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ff7bf1a9f967..12e71bbfd222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -68,13 +68,16 @@ #include "gpu_scheduler.h" #include "amdgpu_virt.h" +#include "amdgpu_gart.h" /* * Modules parameters. */ extern int amdgpu_modeset; extern int amdgpu_vram_limit; -extern int amdgpu_gart_size; +extern int amdgpu_vis_vram_limit; +extern unsigned amdgpu_gart_size; +extern int amdgpu_gtt_size; extern int amdgpu_moverate; extern int amdgpu_benchmarking; extern int amdgpu_testing; @@ -93,6 +96,7 @@ extern int amdgpu_bapm; extern int amdgpu_deep_color; extern int amdgpu_vm_size; extern int amdgpu_vm_block_size; +extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; @@ -104,6 +108,7 @@ extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; +extern unsigned amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern unsigned amdgpu_pp_feature_mask; @@ -369,78 +374,10 @@ struct amdgpu_clock { }; /* - * BO. + * GEM. */ -struct amdgpu_bo_list_entry { - struct amdgpu_bo *robj; - struct ttm_validate_buffer tv; - struct amdgpu_bo_va *bo_va; - uint32_t priority; - struct page **user_pages; - int user_invalidated; -}; - -struct amdgpu_bo_va_mapping { - struct list_head list; - struct rb_node rb; - uint64_t start; - uint64_t last; - uint64_t __subtree_last; - uint64_t offset; - uint64_t flags; -}; - -/* bo virtual addresses in a specific vm */ -struct amdgpu_bo_va { - /* protected by bo being reserved */ - struct list_head bo_list; - struct dma_fence *last_pt_update; - unsigned ref_count; - - /* protected by vm mutex and spinlock */ - struct list_head vm_status; - - /* mappings for this bo_va */ - struct list_head invalids; - struct list_head valids; - - /* constant after initialization */ - struct amdgpu_vm *vm; - struct amdgpu_bo *bo; -}; #define AMDGPU_GEM_DOMAIN_MAX 0x3 - -struct amdgpu_bo { - /* Protected by tbo.reserved */ - u32 prefered_domains; - u32 allowed_domains; - struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; - u64 flags; - unsigned pin_count; - void *kptr; - u64 tiling_flags; - u64 metadata_flags; - void *metadata; - u32 metadata_size; - unsigned prime_shared_count; - /* list of all virtual address to which this bo - * is associated to - */ - struct list_head va; - /* Constant after initialization */ - struct drm_gem_object gem_base; - struct amdgpu_bo *parent; - struct amdgpu_bo *shadow; - - struct ttm_bo_kmap_obj dma_buf_vmap; - struct amdgpu_mn *mn; - struct list_head mn_list; - struct list_head shadow_list; -}; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) void amdgpu_gem_object_free(struct drm_gem_object *obj); @@ -532,49 +469,6 @@ int amdgpu_fence_slab_init(void); void amdgpu_fence_slab_fini(void); /* - * GART structures, functions & helpers - */ -struct amdgpu_mc; - -#define AMDGPU_GPU_PAGE_SIZE 4096 -#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) -#define AMDGPU_GPU_PAGE_SHIFT 12 -#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) - -struct amdgpu_gart { - dma_addr_t table_addr; - struct amdgpu_bo *robj; - void *ptr; - unsigned num_gpu_pages; - unsigned num_cpu_pages; - unsigned table_size; -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS - struct page **pages; -#endif - bool ready; - - /* Asic default pte flags */ - uint64_t gart_pte_flags; - - const struct amdgpu_gart_funcs *gart_funcs; -}; - -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); -int amdgpu_gart_init(struct amdgpu_device *adev); -void amdgpu_gart_fini(struct amdgpu_device *adev); -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, - int pages); -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, - dma_addr_t *dma_addr, uint64_t flags); -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); - -/* * VMHUB structures, functions & helpers */ struct amdgpu_vmhub { @@ -598,22 +492,20 @@ struct amdgpu_mc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; - u64 gtt_size; - u64 gtt_start; - u64 gtt_end; + u64 gart_size; + u64 gart_start; + u64 gart_end; u64 vram_start; u64 vram_end; unsigned vram_width; u64 real_vram_size; int vram_mtrr; - u64 gtt_base_align; u64 mc_mask; const struct firmware *fw; /* MC firmware */ uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; uint32_t srbm_soft_reset; - struct amdgpu_mode_mc_save save; bool prt_warning; uint64_t stolen_size; /* apertures */ @@ -719,15 +611,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT /* overlap the doorbell assignment with VCN as they are mutually exclusive * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD */ - AMDGPU_DOORBELL64_RING0_1 = 0xF8, - AMDGPU_DOORBELL64_RING2_3 = 0xF9, - AMDGPU_DOORBELL64_RING4_5 = 0xFA, - AMDGPU_DOORBELL64_RING6_7 = 0xFB, + AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, + AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, + AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, + AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, - AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC, - AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD, - AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE, - AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF, + AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, + AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, + AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, + AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF @@ -857,6 +749,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; + struct amdgpu_bo_va *csa_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; @@ -866,6 +759,14 @@ struct amdgpu_fpriv { /* * residency list */ +struct amdgpu_bo_list_entry { + struct amdgpu_bo *robj; + struct ttm_validate_buffer tv; + struct amdgpu_bo_va *bo_va; + uint32_t priority; + struct page **user_pages; + int user_invalidated; +}; struct amdgpu_bo_list { struct mutex lock; @@ -1159,7 +1060,9 @@ struct amdgpu_cs_parser { struct list_head validated; struct dma_fence *fence; uint64_t bytes_moved_threshold; + uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; + uint64_t bytes_moved_vis; struct amdgpu_bo_list_entry *evictable; /* user fence */ @@ -1230,8 +1133,6 @@ struct amdgpu_wb { int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb); -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb); void amdgpu_get_pcie_info(struct amdgpu_device *adev); @@ -1525,7 +1426,7 @@ struct amdgpu_device { bool is_atom_fw; uint8_t *bios; uint32_t bios_size; - struct amdgpu_bo *stollen_vga_memory; + struct amdgpu_bo *stolen_vga_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -1557,6 +1458,10 @@ struct amdgpu_device { spinlock_t gc_cac_idx_lock; amdgpu_rreg_t gc_cac_rreg; amdgpu_wreg_t gc_cac_wreg; + /* protects concurrent se_cac register access */ + spinlock_t se_cac_idx_lock; + amdgpu_rreg_t se_cac_rreg; + amdgpu_wreg_t se_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -1579,9 +1484,6 @@ struct amdgpu_device { struct amdgpu_mman mman; struct amdgpu_vram_scratch vram_scratch; struct amdgpu_wb wb; - atomic64_t vram_usage; - atomic64_t vram_vis_usage; - atomic64_t gtt_usage; atomic64_t num_bytes_moved; atomic64_t num_evictions; atomic64_t num_vram_cpu_page_faults; @@ -1593,6 +1495,7 @@ struct amdgpu_device { spinlock_t lock; s64 last_update_us; s64 accum_us; /* accumulated microseconds */ + s64 accum_us_vis; /* for visible VRAM */ u32 log2_max_MBps; } mm_stats; @@ -1687,6 +1590,8 @@ struct amdgpu_device { bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; + /* record last mm index being written through WREG32*/ + unsigned long last_mm_index; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1742,6 +1647,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) +#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) +#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -1792,50 +1699,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) -/* - * RING helpers. - */ -static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) -{ - if (ring->count_dw <= 0) - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - ring->ring[ring->wptr++ & ring->buf_mask] = v; - ring->wptr &= ring->ptr_mask; - ring->count_dw--; -} - -static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) -{ - unsigned occupied, chunk1, chunk2; - void *dst; - - if (unlikely(ring->count_dw < count_dw)) { - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); - return; - } - - occupied = ring->wptr & ring->buf_mask; - dst = (void *)&ring->ring[occupied]; - chunk1 = ring->buf_mask + 1 - occupied; - chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; - chunk2 = count_dw - chunk1; - chunk1 <<= 2; - chunk2 <<= 2; - - if (chunk1) - memcpy(dst, src, chunk1); - - if (chunk2) { - src += chunk1; - dst = (void *)ring->ring; - memcpy(dst, src, chunk2); - } - - ring->wptr += count_dw; - ring->wptr &= ring->ptr_mask; - ring->count_dw -= count_dw; -} - static inline struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) { @@ -1898,7 +1761,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) -#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) @@ -1911,8 +1773,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) -#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) -#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -1927,7 +1787,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); @@ -1943,7 +1804,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 06879d1dcabd..a52795d9b458 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -285,19 +285,20 @@ static int acp_hw_init(void *handle) return 0; else if (r) return r; + if (adev->asic_type != CHIP_STONEY) { + adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); + if (adev->acp.acp_genpd == NULL) + return -ENOMEM; - adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); - if (adev->acp.acp_genpd == NULL) - return -ENOMEM; - - adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; - adev->acp.acp_genpd->gpd.power_off = acp_poweroff; - adev->acp.acp_genpd->gpd.power_on = acp_poweron; + adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; + adev->acp.acp_genpd->gpd.power_off = acp_poweroff; + adev->acp.acp_genpd->gpd.power_on = acp_poweron; - adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; + adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; - pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); + } adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, GFP_KERNEL); @@ -319,14 +320,29 @@ static int acp_hw_init(void *handle) return -ENOMEM; } - i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; + } i2s_pdata[0].cap = DWC_I2S_PLAY; i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; + switch (adev->asic_type) { + case CHIP_STONEY: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1 | + DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; + break; + default: + i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | + DW_I2S_QUIRK_COMP_PARAM1; + } - i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | - DW_I2S_QUIRK_COMP_PARAM1; i2s_pdata[1].cap = DWC_I2S_RECORD; i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; @@ -373,12 +389,14 @@ static int acp_hw_init(void *handle) if (r) return r; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); - if (r) { - dev_err(dev, "Failed to add dev to genpd\n"); - return r; + if (adev->asic_type != CHIP_STONEY) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); + if (r) { + dev_err(dev, "Failed to add dev to genpd\n"); + return r; + } } } @@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* return early if no ACP */ - if (!adev->acp.acp_genpd) + if (!adev->acp.acp_cell) return 0; - for (i = 0; i < ACP_DEVS ; i++) { - dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); - ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); - /* If removal fails, dont giveup and try rest */ - if (ret) - dev_err(dev, "remove dev from genpd failed\n"); + if (adev->acp.acp_genpd) { + for (i = 0; i < ACP_DEVS ; i++) { + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); + ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); + /* If removal fails, dont giveup and try rest */ + if (ret) + dev_err(dev, "remove dev from genpd failed\n"); + } + kfree(adev->acp.acp_genpd); } mfd_remove_devices(adev->acp.parent); kfree(adev->acp.acp_res); - kfree(adev->acp.acp_genpd); kfree(adev->acp.acp_cell); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index ef79551b4cb7..57afad79f55d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -30,10 +30,10 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "amdgpu.h" +#include "amdgpu_pm.h" #include "amd_acpi.h" #include "atom.h" -extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); /* Call the ATIF method */ /** @@ -289,7 +289,7 @@ out: * handles it. * Returns NOTIFY code */ -int amdgpu_atif_handler(struct amdgpu_device *adev, +static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { struct amdgpu_atif *atif = &adev->atif; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 37971d9402e3..5432af39a674 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -27,16 +27,15 @@ #include "amdgpu_gfx.h" #include <linux/module.h> -const struct kfd2kgd_calls *kfd2kgd; const struct kgd2kfd_calls *kgd2kfd; -bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); +bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); int amdgpu_amdkfd_init(void) { int ret; #if defined(CONFIG_HSA_AMD_MODULE) - int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); + int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**); kgd2kfd_init_p = symbol_request(kgd2kfd_init); @@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void) return ret; } -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) +void amdgpu_amdkfd_fini(void) +{ + if (kgd2kfd) { + kgd2kfd->exit(); + symbol_put(kgd2kfd_init); + } +} + +void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { + const struct kfd2kgd_calls *kfd2kgd; + + if (!kgd2kfd) + return; + switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; default: - return false; + dev_info(adev->dev, "kfd not supported on this ASIC\n"); + return; } - return true; -} - -void amdgpu_amdkfd_fini(void) -{ - if (kgd2kfd) { - kgd2kfd->exit(); - symbol_put(kgd2kfd_init); - } -} - -void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) -{ - if (kgd2kfd) - adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, + adev->pdev, kfd2kgd); } void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) @@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, return -ENOMEM; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); + AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, + &(*mem)->bo); if (r) { dev_err(adev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 73f83a10ae14..8d689ab7e429 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -26,6 +26,7 @@ #define AMDGPU_AMDKFD_H_INCLUDED #include <linux/types.h> +#include <linux/mmu_context.h> #include <kgd_kfd_interface.h> struct amdgpu_device; @@ -39,8 +40,6 @@ struct kgd_mem { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev); - void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, @@ -62,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); +#define read_user_wptr(mmptr, wptr, dst) \ + ({ \ + bool valid = false; \ + if ((mmptr) && (wptr)) { \ + if ((mmptr) == current->mm) { \ + valid = !get_user((dst), (wptr)); \ + } else if (current->mm == NULL) { \ + use_mm(mmptr); \ + valid = !get_user((dst), (wptr)); \ + unuse_mm(mmptr); \ + } \ + } \ + valid; \ + }) + #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 5254562fd0f9..b9dbbf9cb8b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -39,6 +39,12 @@ #include "gmc/gmc_7_1_sh_mask.h" #include "cik_structs.h" +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + enum { MAX_TRAPID = 8, /* 3 bits in the bitfield. */ MAX_WATCH_ADDRESSES = 4 @@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr); + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); @@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} static const struct kfd2kgd_calls kfd2kgd = { .init_gtt_mem_allocation = alloc_gtt_mem, @@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, .write_vmid_invalidate_request = write_vmid_invalidate_request, - .get_fw_version = get_fw_version + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) @@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, queue_id, 0); @@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr) + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t wptr_shadow, is_wptr_shadow_valid; struct cik_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, wptr_val, data; m = get_mqd(mqd); - is_wptr_shadow_valid = !get_user(wptr_shadow, wptr); - if (is_wptr_shadow_valid) - m->cp_hqd_pq_wptr = wptr_shadow; - acquire_queue(kgd, pipe_id, queue_id); - gfx_v7_0_mqd_commit(adev, m); + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + + for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Copy userspace write pointer value to register. + * Activate doorbell logic to monitor subsequent changes. + */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + if (read_user_wptr(mm, wptr, wptr_val)) + WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(mmCP_HQD_ACTIVE, data); + release_queue(kgd); return 0; @@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; - int timeout = utimeout; + enum hqd_dequeue_request_type type; + unsigned long flags, end_jiffies; + int retry; acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); - WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + + /* Workaround: If IQ timer is active and the wait time is close to or + * equal to 0, dequeueing is not safe. Wait until either the wait time + * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is + * cleared before continuing. Also, ensure wait times are set to at + * least 0x3. + */ + local_irq_save(flags); + preempt_disable(); + retry = 5000; /* wait for 500 usecs at maximum */ + while (true) { + temp = RREG32(mmCP_HQD_IQ_TIMER); + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { + pr_debug("HW is processing IQ\n"); + goto loop; + } + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) + == 3) /* SEM-rearm is safe */ + break; + /* Wait time 3 is safe for CP, but our MMIO read/write + * time is close to 1 microsecond, so check for 10 to + * leave more buffer room + */ + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) + >= 10) + break; + pr_debug("IQ timer is active\n"); + } else + break; +loop: + if (!retry) { + pr_err("CP HQD IQ timer status time out\n"); + break; + } + ndelay(100); + --retry; + } + retry = 1000; + while (true) { + temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) + break; + pr_debug("Dequeue request is pending\n"); + if (!retry) { + pr_err("CP HQD dequeue request time out\n"); + break; + } + ndelay(100); + --retry; + } + local_irq_restore(flags); + preempt_enable(); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { temp = RREG32(mmCP_HQD_ACTIVE); - if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; - if (timeout <= 0) { - pr_err("kfd: cp queue preemption time out.\n"); + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out\n"); release_queue(kgd); return -ETIME; } - msleep(20); - timeout -= 20; + usleep_range(500, 1000); } release_queue(kgd); @@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); } +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + lock_srbm(kgd, 0, 0, 0, vmid); + WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); + unlock_srbm(kgd); +} + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; @@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) switch (type) { case KGD_ENGINE_PFP: hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw->data; break; case KGD_ENGINE_ME: hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; + adev->gfx.me_fw->data; break; case KGD_ENGINE_CE: hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; + adev->gfx.ce_fw->data; break; case KGD_ENGINE_MEC1: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; + adev->gfx.mec_fw->data; break; case KGD_ENGINE_MEC2: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw->data; break; case KGD_ENGINE_RLC: hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw->data; break; case KGD_ENGINE_SDMA1: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; + adev->sdma.instance[0].fw->data; break; case KGD_ENGINE_SDMA2: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; + adev->sdma.instance[1].fw->data; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 133d06671e46..fb6e5dbd5a03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -39,6 +39,12 @@ #include "vi_structs.h" #include "vid.h" +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + struct cik_sdma_rlc_registers; /* @@ -55,12 +61,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr); + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, @@ -85,6 +94,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} static const struct kfd2kgd_calls kfd2kgd = { .init_gtt_mem_allocation = alloc_gtt_mem, @@ -111,12 +147,15 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, .write_vmid_invalidate_request = write_vmid_invalidate_request, - .get_fw_version = get_fw_version + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, }; struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; + return (struct kfd2kgd_calls *)&kfd2kgd; } static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) @@ -147,7 +186,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, queue_id, 0); @@ -216,7 +255,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) uint32_t mec; uint32_t pipe; - mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); lock_srbm(kgd, mec, pipe, 0, 0); @@ -244,20 +283,67 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr) + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) { - struct vi_mqd *m; - uint32_t shadow_wptr, valid_wptr; struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct vi_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, wptr_val, data; m = get_mqd(mqd); - valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr)); - if (valid_wptr == 0) - m->cp_hqd_pq_wptr = shadow_wptr; - acquire_queue(kgd, pipe_id, queue_id); - gfx_v8_0_mqd_commit(adev, mqd); + + /* HIQ is set during driver init period with vmid set to 0*/ + if (m->cp_hqd_vmid == 0) { + uint32_t value, mec, pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + value = RREG32(mmRLC_CP_SCHEDULERS); + value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, + ((mec << 5) | (pipe << 3) | queue_id | 0x80)); + WREG32(mmRLC_CP_SCHEDULERS, value); + } + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + + for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Tonga errata: EOP RPTR/WPTR should be left unmodified. + * This is safe since EOP RPTR==WPTR for any inactive HQD + * on ASICs that do not support context-save. + * EOP writes/reads can start anywhere in the ring. + */ + if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { + WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); + WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); + WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); + } + + for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++) + WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); + + /* Copy userspace write pointer value to register. + * Activate doorbell logic to monitor subsequent changes. + */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); + + if (read_user_wptr(mm, wptr, wptr_val)) + WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(mmCP_HQD_ACTIVE, data); + release_queue(kgd); return 0; @@ -308,29 +394,102 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; - int timeout = utimeout; + enum hqd_dequeue_request_type type; + unsigned long flags, end_jiffies; + int retry; + struct vi_mqd *m = get_mqd(mqd); acquire_queue(kgd, pipe_id, queue_id); - WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type); + if (m->cp_hqd_vmid == 0) + WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); + + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + /* Workaround: If IQ timer is active and the wait time is close to or + * equal to 0, dequeueing is not safe. Wait until either the wait time + * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is + * cleared before continuing. Also, ensure wait times are set to at + * least 0x3. + */ + local_irq_save(flags); + preempt_disable(); + retry = 5000; /* wait for 500 usecs at maximum */ + while (true) { + temp = RREG32(mmCP_HQD_IQ_TIMER); + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { + pr_debug("HW is processing IQ\n"); + goto loop; + } + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) + == 3) /* SEM-rearm is safe */ + break; + /* Wait time 3 is safe for CP, but our MMIO read/write + * time is close to 1 microsecond, so check for 10 to + * leave more buffer room + */ + if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) + >= 10) + break; + pr_debug("IQ timer is active\n"); + } else + break; +loop: + if (!retry) { + pr_err("CP HQD IQ timer status time out\n"); + break; + } + ndelay(100); + --retry; + } + retry = 1000; + while (true) { + temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) + break; + pr_debug("Dequeue request is pending\n"); + + if (!retry) { + pr_err("CP HQD dequeue request time out\n"); + break; + } + ndelay(100); + --retry; + } + local_irq_restore(flags); + preempt_enable(); + + WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { temp = RREG32(mmCP_HQD_ACTIVE); - if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; - if (timeout <= 0) { - pr_err("kfd: cp queue preemption time out.\n"); + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } - msleep(20); - timeout -= 20; + usleep_range(500, 1000); } release_queue(kgd); @@ -444,6 +603,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, return 0; } +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + lock_srbm(kgd, 0, 0, 0, vmid); + WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); + unlock_srbm(kgd); +} + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; @@ -454,42 +623,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) switch (type) { case KGD_ENGINE_PFP: hdr = (const union amdgpu_firmware_header *) - adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw->data; break; case KGD_ENGINE_ME: hdr = (const union amdgpu_firmware_header *) - adev->gfx.me_fw->data; + adev->gfx.me_fw->data; break; case KGD_ENGINE_CE: hdr = (const union amdgpu_firmware_header *) - adev->gfx.ce_fw->data; + adev->gfx.ce_fw->data; break; case KGD_ENGINE_MEC1: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec_fw->data; + adev->gfx.mec_fw->data; break; case KGD_ENGINE_MEC2: hdr = (const union amdgpu_firmware_header *) - adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw->data; break; case KGD_ENGINE_RLC: hdr = (const union amdgpu_firmware_header *) - adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw->data; break; case KGD_ENGINE_SDMA1: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[0].fw->data; + adev->sdma.instance[0].fw->data; break; case KGD_ENGINE_SDMA2: hdr = (const union amdgpu_firmware_header *) - adev->sdma.instance[1].fw->data; + adev->sdma.instance[1].fw->data; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1e8e1123ddf4..ce443586a0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) { uint32_t bios_6_scratch; - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; @@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) bios_6_scratch |= ATOM_S6_ACC_MODE; } - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) { uint32_t bios_2_scratch, bios_6_scratch; - bios_2_scratch = RREG32(mmBIOS_SCRATCH_2); - bios_6_scratch = RREG32(mmBIOS_SCRATCH_6); + adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0; + + bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2); + bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6); /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; @@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) /* clear the vbios dpms state */ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; - WREG32(mmBIOS_SCRATCH_2, bios_2_scratch); - WREG32(mmBIOS_SCRATCH_6, bios_6_scratch); + WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch); + WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) @@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev) int i; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i); + adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); } void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) @@ -1738,20 +1740,30 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); + WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); } void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung) { - u32 tmp = RREG32(mmBIOS_SCRATCH_3); + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); if (hung) tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; else tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - WREG32(mmBIOS_SCRATCH_3, tmp); + WREG32(adev->bios_scratch_reg_offset + 3, tmp); +} + +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); + + if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK) + return false; + else + return true; } /* Atom needs data in little endian format diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 38d0fe32e5cd..b0d5d1d7fdba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 4bdda56fccee..f9ffe8ef0cd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) } } -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i); -} - -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev) -{ - int i; - - /* - * VBIOS will check ASIC_INIT_COMPLETE bit to decide if - * execute ASIC_Init posting via driver - */ - adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK; - - for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++) - WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]); -} - -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung) -{ - u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3); - - if (hung) - tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; - else - tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - - WREG32(adev->bios_scratch_reg_offset + 3, tmp); -} - int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; @@ -130,3 +95,129 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) ctx->scratch_size_bytes = usage_bytes; return 0; } + +union igp_info { + struct atom_integrated_system_info_v1_11 v11; +}; + +/* + * Return vram width from integrated system info table, if available, + * or 0 if not. + */ +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + integratedsysteminfo); + u16 data_offset, size; + union igp_info *igp_info; + u8 frev, crev; + + /* get any igp specific overrides */ + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 11: + return igp_info->v11.umachannelnumber * 64; + default: + return 0; + } + } + + return 0; +} + +union firmware_info { + struct atom_firmware_info_v3_1 v31; +}; + +union smu_info { + struct atom_smu_info_v3_1 v31; +}; + +union umc_info { + struct atom_umc_info_v3_1 v31; +}; + +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct amdgpu_pll *spll = &adev->clock.spll; + struct amdgpu_pll *mpll = &adev->clock.mpll; + uint8_t frev, crev; + uint16_t data_offset; + int ret = -EINVAL, index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union firmware_info *firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + + adev->clock.default_sclk = + le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); + adev->clock.default_mclk = + le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); + + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + + /* not technically a clock, but... */ + adev->mode_info.firmware_flags = + le32_to_cpu(firmware_info->v31.firmware_capability); + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smu_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union smu_info *smu_info = + (union smu_info *)(mode_info->atom_context->bios + + data_offset); + + /* system clock */ + spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); + + spll->reference_div = 0; + spll->min_post_div = 1; + spll->max_post_div = 1; + spll->min_ref_div = 2; + spll->max_ref_div = 0xff; + spll->min_feedback_div = 4; + spll->max_feedback_div = 0xff; + spll->best_vco = 0; + + ret = 0; + } + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + umc_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union umc_info *umc_info = + (union umc_info *)(mode_info->atom_context->bios + + data_offset); + + /* memory clock */ + mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); + + mpll->reference_div = 0; + mpll->min_post_div = 1; + mpll->max_post_div = 1; + mpll->min_ref_div = 2; + mpll->max_ref_div = 0xff; + mpll->min_feedback_div = 4; + mpll->max_feedback_div = 0xff; + mpll->best_vco = 0; + + ret = 0; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index a2c3ebe22c71..288b97e54347 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -26,10 +26,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev); -void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev, - bool hung); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 1beae5b930d0..63ec1e1bb6aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, - false); + false, false); if (r) goto exit_do_move; r = dma_fence_wait(fence, false); @@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, n = AMDGPU_BENCHMARK_ITERATIONS; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, - NULL, &sobj); + NULL, 0, &sobj); if (r) { goto out_cleanup; } @@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, goto out_cleanup; } r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, - NULL, &dobj); + NULL, 0, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 365e735f6647..c21adf60a7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } -static bool is_atom_fw(uint8_t *bios) -{ - uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8); - uint8_t frev = bios[bios_header_start + 2]; - uint8_t crev = bios[bios_header_start + 3]; - - if ((frev < 3) || - ((frev == 3) && (crev < 3))) - return false; - - return true; -} - /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a @@ -117,7 +104,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); - bios = ioremap(vram_base, size); + bios = ioremap_wc(vram_base, size); if (!bios) { return false; } @@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) return false; success: - adev->is_atom_fw = is_atom_fw(adev->bios); + adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 5e771bc11b00..59089e027f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev, r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); mutex_unlock(&fpriv->bo_list_lock); if (r < 0) { - kfree(list); + amdgpu_bo_list_free(list); return r; } *id = r; @@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { @@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; - if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) + if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; total_size += amdgpu_bo_size(entry->robj); @@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; - const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; + const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c0a806280257..fd435a96481c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, true, domain, flags, NULL, &placement, NULL, - &obj); + 0, &obj); if (ret) { DRM_ERROR("(%d) bo create failed\n", ret); return ret; @@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h r = amdgpu_bo_reserve(obj, true); if (unlikely(r != 0)) return r; - r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, + r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, min_offset, max_offset, mcaddr); amdgpu_bo_unreserve(obj); return r; @@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_DIDT(index); case CGS_IND_REG_GC_CAC: return RREG32_GC_CAC(index); + case CGS_IND_REG_SE_CAC: + return RREG32_SE_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_DIDT(index, value); case CGS_IND_REG_GC_CAC: return WREG32_GC_CAC(index, value); + case CGS_IND_REG_SE_CAC: + return WREG32_SE_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -610,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, return 0; } +static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device, + bool lock) +{ + CGS_FUNC_ADEV; + + if (lock) + mutex_lock(&adev->grbm_idx_mutex); + else + mutex_unlock(&adev->grbm_idx_mutex); +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -644,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); if (CGS_UCODE_ID_CP_MEC == type) - info->image_size = (header->jt_offset) << 2; + info->image_size = le32_to_cpu(header->jt_offset) << 2; info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); @@ -719,7 +734,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, strcpy(fw_name, "amdgpu/polaris12_smc.bin"); break; case CHIP_VEGA10: - strcpy(fw_name, "amdgpu/vega10_smc.bin"); + if ((adev->pdev->device == 0x687f) && + ((adev->pdev->revision == 0xc0) || + (adev->pdev->revision == 0xc1) || + (adev->pdev->revision == 0xc3))) + strcpy(fw_name, "amdgpu/vega10_acg_smc.bin"); + else + strcpy(fw_name, "amdgpu/vega10_smc.bin"); break; default: DRM_ERROR("SMC firmware not supported\n"); @@ -1117,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { .query_system_info = amdgpu_cgs_query_system_info, .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, .enter_safe_mode = amdgpu_cgs_enter_safe_mode, + .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5599c01b265d..269b835571eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, *offset = data->offset; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { amdgpu_bo_unref(&p->uf_entry.robj); @@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } /* get chunks */ - chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks); + chunk_array_user = u64_to_user_ptr(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)(uintptr_t)chunk_array[i]; + chunk_ptr = u64_to_user_ptr(chunk_array[i]); if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; + cdata = u64_to_user_ptr(user_chunk.chunk_data); p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); if (p->chunks[i].kdata == NULL) { @@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) * ticks. The accumulated microseconds (us) are converted to bytes and * returned. */ -static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) +static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, + u64 *max_bytes, + u64 *max_vis_bytes) { s64 time_us, increment_us; - u64 max_bytes; u64 free_vram, total_vram, used_vram; /* Allow a maximum of 200 accumulated ms. This is basically per-IB @@ -238,11 +239,14 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) */ const s64 us_upper_bound = 200000; - if (!adev->mm_stats.log2_max_MBps) - return 0; + if (!adev->mm_stats.log2_max_MBps) { + *max_bytes = 0; + *max_vis_bytes = 0; + return; + } total_vram = adev->mc.real_vram_size - adev->vram_pin_size; - used_vram = atomic64_read(&adev->vram_usage); + used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; spin_lock(&adev->mm_stats.lock); @@ -280,23 +284,46 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); } - /* This returns 0 if the driver is in debt to disallow (optional) + /* This is set to 0 if the driver is in debt to disallow (optional) * buffer moves. */ - max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + + /* Do the same for visible VRAM if half of it is free */ + if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { + u64 total_vis_vram = adev->mc.visible_vram_size; + u64 used_vis_vram = + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); + + if (used_vis_vram < total_vis_vram) { + u64 free_vis_vram = total_vis_vram - used_vis_vram; + adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + + increment_us, us_upper_bound); + + if (free_vis_vram >= total_vis_vram / 2) + adev->mm_stats.accum_us_vis = + max(bytes_to_us(adev, free_vis_vram / 2), + adev->mm_stats.accum_us_vis); + } + + *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); + } else { + *max_vis_bytes = 0; + } spin_unlock(&adev->mm_stats.lock); - return max_bytes; } /* Report how many bytes have really been moved for the last command * submission. This can result in a debt that can stop buffer migrations * temporarily. */ -void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, + u64 num_vis_bytes) { spin_lock(&adev->mm_stats.lock); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); + adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); spin_unlock(&adev->mm_stats.lock); } @@ -304,7 +331,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; uint32_t domain; int r; @@ -314,17 +341,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, /* Don't move this buffer if we have depleted our allowance * to move it. Don't move anything if the threshold is zero. */ - if (p->bytes_moved < p->bytes_moved_threshold) - domain = bo->prefered_domains; - else + if (p->bytes_moved < p->bytes_moved_threshold) { + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + /* And don't move a CPU_ACCESS_REQUIRED BO to limited + * visible VRAM if we've depleted our allowance to do + * that. + */ + if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) + domain = bo->preferred_domains; + else + domain = bo->allowed_domains; + } else { + domain = bo->preferred_domains; + } + } else { domain = bo->allowed_domains; + } retry: amdgpu_ttm_placement_from_domain(bo, domain); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + p->bytes_moved_vis += bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; @@ -350,7 +395,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo *bo = candidate->robj; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; + bool update_bytes_moved_vis; uint32_t other; /* If we reached our current BO we can forget it */ @@ -370,10 +416,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, /* Good we can try to move this BO somewhere else */ amdgpu_ttm_placement_from_domain(bo, other); + update_bytes_moved_vis = + adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - + bytes_moved = atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved; + p->bytes_moved += bytes_moved; + if (update_bytes_moved_vis) + p->bytes_moved_vis += bytes_moved; if (unlikely(r)) break; @@ -554,8 +607,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, + &p->bytes_moved_vis_threshold); p->bytes_moved = 0; + p->bytes_moved_vis = 0; p->evictable = list_last_entry(&p->validated, struct amdgpu_bo_list_entry, tv.head); @@ -579,8 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto error_validate; } - amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); - + amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, + p->bytes_moved_vis); fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); @@ -619,10 +674,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) { - amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); + if (r) ttm_eu_backoff_reservation(&p->ticket, &p->validated); - } error_free_pages: @@ -670,21 +723,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) * If error is set than unvalidate buffer, otherwise just free memory * used by parsing context. **/ -static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, + bool backoff) { - struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; unsigned i; - if (!error) { - amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); - + if (!error) ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, parser->fence); - } else if (backoff) { + else if (backoff) ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); - } for (i = 0; i < parser->num_post_dep_syncobjs; i++) drm_syncobj_put(parser->post_dep_syncobjs[i]); @@ -737,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) if (amdgpu_sriov_vf(adev)) { struct dma_fence *f; - bo_va = vm->csa_bo_va; + + bo_va = fpriv->csa_va; BUG_ON(!bo_va); r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) @@ -774,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) } - r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync); + r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync); if (amdgpu_vm_debug && p->bo_list) { /* Invalidate all BOs to test for userspace bugs */ @@ -984,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, { int r; struct dma_fence *fence; - r = drm_syncobj_fence_get(p->filp, handle, &fence); + r = drm_syncobj_find_fence(p->filp, handle, &fence); if (r) return r; @@ -1383,7 +1434,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, if (fences == NULL) return -ENOMEM; - fences_user = (void __user *)(uintptr_t)(wait->in.fences); + fences_user = u64_to_user_ptr(wait->in.fences); if (copy_from_user(fences, fences_user, sizeof(struct drm_amdgpu_fence) * fence_count)) { r = -EFAULT; @@ -1436,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } @@ -1445,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, addr > mapping->last) continue; - *bo = lobj->bo_va->bo; + *bo = lobj->bo_va->base.bo; return mapping; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4a8fc15467cf..1a459ac63df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -53,6 +53,9 @@ #include "bif/bif_4_1_d.h" #include <linux/pci.h> #include <linux/firmware.h> +#include "amdgpu_vf_error.h" + +#include "amdgpu_amdkfd.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { BUG_ON(in_interrupt()); return amdgpu_virt_kiq_wreg(adev, reg, v); @@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) @@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); @@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } /** @@ -318,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->vram_scratch.robj, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vram_scratch.robj); - return r; - } - r = amdgpu_bo_kmap(adev->vram_scratch.robj, - (void **)&adev->vram_scratch.ptr); - if (r) - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - - return r; + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->vram_scratch.robj, + &adev->vram_scratch.gpu_addr, + (void **)&adev->vram_scratch.ptr); } static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->vram_scratch.robj); - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - } - amdgpu_bo_unref(&adev->vram_scratch.robj); + amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } /** @@ -521,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), + /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -552,32 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); - if (offset < adev->wb.num_wb) { - __set_bit(offset, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} -/** - * amdgpu_wb_get_64bit - Allocate a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Allocate a wb slot for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) -{ - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 2, 7, 0); - if ((offset + 1) < adev->wb.num_wb) { + if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); - __set_bit(offset + 1, adev->wb.used); - *wb = offset; + *wb = offset * 8; /* convert to dw offset */ return 0; } else { return -EINVAL; @@ -599,22 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_wb_free_64bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) -{ - if ((wb + 1) < adev->wb.num_wb) { - __clear_bit(wb, adev->wb.used); - __clear_bit(wb + 1, adev->wb.used); - } -} - -/** * amdgpu_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations @@ -665,7 +611,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gtt_location - try to find GTT location + * amdgpu_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -676,28 +622,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; - size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; - size_bf = mc->vram_start & ~mc->gtt_base_align; + size_af = adev->mc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; if (size_bf > size_af) { - if (mc->gtt_size > size_bf) { + if (mc->gart_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_bf; + mc->gart_size = size_bf; } - mc->gtt_start = 0; + mc->gart_start = 0; } else { - if (mc->gtt_size > size_af) { + if (mc->gart_size > size_af) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_af; + mc->gart_size = size_af; } - mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + mc->gart_start = mc->vram_end + 1; } - mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + mc->gart_size >> 20, mc->gart_start, mc->gart_end); } /* @@ -720,7 +666,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev) adev->has_hw_reset = false; return true; } - /* then check MEM_SIZE, in case the crtcs are off */ + + /* bios scratch used on CIK+ */ + if (adev->asic_type >= CHIP_BONAIRE) + return amdgpu_atombios_scratch_need_asic_init(adev); + + /* check MEM_SIZE for older asics */ reg = amdgpu_asic_get_config_memsize(adev); if ((reg != 0) && (reg != 0xffffffff)) @@ -1031,19 +982,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * amdgpu_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool amdgpu_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - static void amdgpu_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -1077,7 +1015,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + if (!is_power_of_2(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); goto def_value; @@ -1118,19 +1056,31 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)){ dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size != -1) { + if (amdgpu_gart_size < 32) { + /* gart size must be greater or equal to 32M */ + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = 32; + } + + if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { /* gtt size must be greater or equal to 32M */ - if (amdgpu_gart_size < 32) { - dev_warn(adev->dev, "gart size (%d) too small\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; - } + dev_warn(adev->dev, "gtt size (%d) too small\n", + amdgpu_gtt_size); + amdgpu_gtt_size = -1; + } + + /* valid range is between 4 and 9 inclusive */ + if (amdgpu_vm_fragment_size != -1 && + (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { + dev_warn(adev->dev, "valid range is between 4 and 9\n"); + amdgpu_vm_fragment_size = -1; } amdgpu_check_vm_size(adev); @@ -1138,7 +1088,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { + !is_power_of_2(amdgpu_vram_page_split))) { dev_warn(adev->dev, "invalid VRAM page split (%d)\n", amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; @@ -1901,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, - AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -2019,7 +1970,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; - adev->mc.gtt_size = 512 * 1024 * 1024; + adev->mc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; @@ -2068,6 +2019,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); + spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); @@ -2143,6 +2095,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); goto failed; } @@ -2153,6 +2106,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); r = -EINVAL; goto failed; } @@ -2160,18 +2114,28 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) { dev_err(adev->dev, "gpu post error!\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); goto failed; } } else { DRM_INFO("GPU post is not needed\n"); } - if (!adev->is_atom_fw) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -2181,6 +2145,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -2190,6 +2155,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); goto failed; } @@ -2209,6 +2175,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); goto failed; } @@ -2253,12 +2220,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } return 0; failed: + amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2351,6 +2320,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } drm_modeset_unlock_all(dev); + amdgpu_amdkfd_suspend(adev); + /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); @@ -2392,10 +2363,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2444,10 +2412,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { @@ -2490,6 +2455,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } + r = amdgpu_amdkfd_resume(adev); + if (r) + return r; /* blat the mode back in */ if (fbcon) { @@ -2860,21 +2828,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2952,6 +2908,7 @@ out: } } else { dev_err(adev->dev, "asic resume failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { if (adev->rings[i] && adev->rings[i]->sched.thread) { kthread_unpark(adev->rings[i]->sched.thread); @@ -2962,12 +2919,16 @@ out: drm_helper_resume_force_mode(adev->ddev); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) + if (r) { /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); - else + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } + else { dev_info(adev->dev, "GPU reset successed!\n"); + } + amdgpu_vf_error_trans_all(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index cdf2ab20166a..6ad243293a78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); + drm_gem_object_put_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } @@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(-ENOMEM); } ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b59f37c83fa6..e39ec981b11c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -68,13 +68,16 @@ * - 3.16.0 - Add reserved vmid support * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. * - 3.18.0 - Export gpu always on cu bitmap + * - 3.19.0 - Add support for UVD MJPEG decode */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 18 +#define KMS_DRIVER_MINOR 19 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; -int amdgpu_gart_size = -1; /* auto */ +int amdgpu_vis_vram_limit = 0; +unsigned amdgpu_gart_size = 256; +int amdgpu_gtt_size = -1; /* auto */ int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; int amdgpu_testing = 0; @@ -92,6 +95,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; int amdgpu_vm_size = -1; +int amdgpu_vm_fragment_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; @@ -106,6 +110,7 @@ unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; +unsigned amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; unsigned amdgpu_pp_feature_mask = 0xffffffff; @@ -120,8 +125,14 @@ int amdgpu_lbpw = -1; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); -MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); -module_param_named(gartsize, amdgpu_gart_size, int, 0600); +MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes"); +module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444); + +MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)"); +module_param_named(gartsize, amdgpu_gart_size, uint, 0600); + +MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)"); +module_param_named(gttsize, amdgpu_gtt_size, int, 0600); MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); module_param_named(moverate, amdgpu_moverate, int, 0600); @@ -174,6 +185,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)"); module_param_named(vm_size, amdgpu_vm_size, int, 0444); +MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)"); +module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444); + MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); @@ -186,7 +200,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both"); module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444); -MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); +MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)"); module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); @@ -199,7 +213,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); -module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); +module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))"); module_param_named(no_evict, amdgpu_no_evict, int, 0444); @@ -219,6 +233,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); +MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))"); +module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444); + MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); @@ -803,7 +820,6 @@ static struct drm_driver kms_driver = { .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, .lastclose = amdgpu_driver_lastclose_kms, - .set_busid = drm_pci_set_busid, .unload = amdgpu_driver_unload_kms, .get_vblank_counter = amdgpu_get_vblank_counter_kms, .enable_vblank = amdgpu_enable_vblank_kms, @@ -823,7 +839,6 @@ static struct drm_driver kms_driver = { .gem_close_object = amdgpu_gem_object_close, .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, - .dumb_destroy = drm_gem_dumb_destroy, .fops = &amdgpu_driver_kms_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index c0d8c6ff6380..9afa9c097e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) amdgpu_bo_unpin(abo); amdgpu_bo_unreserve(abo); } - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, @@ -245,13 +245,12 @@ static int amdgpufb_create(struct drm_fb_helper *helper, drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); - info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &amdgpufb_ops; tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = abo->kptr; + info->screen_base = amdgpu_bo_kptr(abo); info->screen_size = amdgpu_bo_size(abo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); @@ -281,7 +280,7 @@ out: } if (fb && ret) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); @@ -312,31 +311,7 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb return 0; } -/** Sets the color ramps on behalf of fbcon */ -static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, int regno) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - amdgpu_crtc->lut_r[regno] = red >> 6; - amdgpu_crtc->lut_g[regno] = green >> 6; - amdgpu_crtc->lut_b[regno] = blue >> 6; -} - -/** Gets the color ramps on behalf of fbcon */ -static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno) -{ - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - - *red = amdgpu_crtc->lut_r[regno] << 6; - *green = amdgpu_crtc->lut_g[regno] << 6; - *blue = amdgpu_crtc->lut_b[regno] << 6; -} - static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { - .gamma_set = amdgpu_crtc_fb_gamma_set, - .gamma_get = amdgpu_crtc_fb_gamma_get, .fb_probe = amdgpufb_create, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index a57abc1a25fb..94c1e2e8e34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -55,6 +55,19 @@ /* * Common GART table functions. */ + +/** + * amdgpu_gart_set_defaults - set the default gart_size + * + * @adev: amdgpu_device pointer + * + * Set the default gart_size based on parameters and available VRAM. + */ +void amdgpu_gart_set_defaults(struct amdgpu_device *adev) +{ + adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20; +} + /** * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table * @@ -131,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->gart.robj); + NULL, NULL, 0, &adev->gart.robj); if (r) { return r; } @@ -263,6 +276,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } /** + * amdgpu_gart_map - map dma_addresses into GART entries + * + * @adev: amdgpu_device pointer + * @offset: offset into the GPU's gart aperture + * @pages: number of pages to bind + * @dma_addr: DMA addresses of pages + * + * Map the dma_addresses into GART entries (all asics). + * Returns 0 for success, -EINVAL for failure. + */ +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst) +{ + uint64_t page_base; + unsigned i, j, t; + + if (!adev->gart.ready) { + WARN(1, "trying to bind memory to uninitialized GART !\n"); + return -EINVAL; + } + + t = offset / AMDGPU_GPU_PAGE_SIZE; + + for (i = 0; i < pages; i++) { + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { + amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); + page_base += AMDGPU_GPU_PAGE_SIZE; + } + } + return 0; +} + +/** * amdgpu_gart_bind - bind pages into the gart page table * * @adev: amdgpu_device pointer @@ -279,31 +327,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint64_t flags) { - unsigned t; - unsigned p; - uint64_t page_base; - int i, j; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + unsigned i,t,p; +#endif + int r; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); - - for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + for (i = 0; i < pages; i++, p++) adev->gart.pages[p] = pagelist[i]; #endif - if (adev->gart.ptr) { - page_base = dma_addr[i]; - for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { - amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags); - page_base += AMDGPU_GPU_PAGE_SIZE; - } - } + + if (adev->gart.ptr) { + r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, + adev->gart.ptr); + if (r) + return r; } + mb(); amdgpu_gart_flush_gpu_tlb(adev, 0); return 0; @@ -333,8 +380,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) if (r) return r; /* Compute table size */ - adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE; - adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE; + adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; + adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h new file mode 100644 index 000000000000..d4cce6936200 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -0,0 +1,77 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_GART_H__ +#define __AMDGPU_GART_H__ + +#include <linux/types.h> + +/* + * GART structures, functions & helpers + */ +struct amdgpu_device; +struct amdgpu_bo; +struct amdgpu_gart_funcs; + +#define AMDGPU_GPU_PAGE_SIZE 4096 +#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) +#define AMDGPU_GPU_PAGE_SHIFT 12 +#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) + +struct amdgpu_gart { + dma_addr_t table_addr; + struct amdgpu_bo *robj; + void *ptr; + unsigned num_gpu_pages; + unsigned num_cpu_pages; + unsigned table_size; +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS + struct page **pages; +#endif + bool ready; + + /* Asic default pte flags */ + uint64_t gart_pte_flags; + + const struct amdgpu_gart_funcs *gart_funcs; +}; + +void amdgpu_gart_set_defaults(struct amdgpu_device *adev); +int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); +int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); +void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); +int amdgpu_gart_init(struct amdgpu_device *adev); +void amdgpu_gart_fini(struct amdgpu_device *adev); +int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages); +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst); +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, struct page **pagelist, + dma_addr_t *dma_addr, uint64_t flags); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 621f739103a6..7171968f261e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, struct drm_gem_object **obj) { struct amdgpu_bo *robj; - unsigned long max_size; int r; *obj = NULL; @@ -58,20 +57,9 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } - if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { - /* Maximum bo size is the unpinned gtt size since we use the gtt to - * handle vram to system pool migrations. - */ - max_size = adev->mc.gtt_size - adev->gart_pin_size; - if (size > max_size) { - DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", - size >> 20, max_size >> 20); - return -ENOMEM; - } - } retry: r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, - flags, NULL, NULL, &robj); + flags, NULL, NULL, 0, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { @@ -103,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); @@ -237,9 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_VRAM_CLEARED| - AMDGPU_GEM_CREATE_SHADOW | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) + AMDGPU_GEM_CREATE_VRAM_CLEARED)) return -EINVAL; /* reject invalid gem domains */ @@ -275,7 +261,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -318,7 +304,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, return r; bo = gem_to_amdgpu_bo(gobj); - bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); if (r) @@ -353,7 +339,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) return r; @@ -367,7 +353,7 @@ unlock_mmap_sem: up_read(¤t->mm->mmap_sem); release_object: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -386,11 +372,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return -EPERM; } *offset_p = amdgpu_bo_mmap_offset(robj); - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return 0; } @@ -460,7 +446,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } else r = ret; - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -503,7 +489,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, unreserve: amdgpu_bo_unreserve(robj); out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -635,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; @@ -655,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, + r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, args->map_size); if (r) goto error_backoff; @@ -676,7 +662,7 @@ error_backoff: ttm_eu_backoff_reservation(&ticket, &list); error_unref: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -701,11 +687,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { struct drm_amdgpu_gem_create_in info; - void __user *out = (void __user *)(uintptr_t)args->value; + void __user *out = u64_to_user_ptr(args->value); info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; - info.domains = robj->prefered_domains; + info.domains = robj->preferred_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); if (copy_to_user(out, &info, sizeof(info))) @@ -723,10 +709,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, amdgpu_bo_unreserve(robj); break; } - robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | + robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU); - robj->allowed_domains = robj->prefered_domains; + robj->allowed_domains = robj->preferred_domains; if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -738,7 +724,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } out: - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); return r; } @@ -766,7 +752,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(gobj); + drm_gem_object_put_unlocked(gobj); if (r) { return r; } @@ -784,6 +770,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) unsigned domain; const char *placement; unsigned pin_count; + uint64_t offset; domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); switch (domain) { @@ -798,9 +785,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) placement = " CPU"; break; } - seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", - id, amdgpu_bo_size(bo), placement, - amdgpu_bo_gpu_offset(bo)); + seq_printf(m, "\t0x%08x: %12ld byte %s", + id, amdgpu_bo_size(bo), placement); + + offset = ACCESS_ONCE(bo->tbo.mem.start); + if (offset != AMDGPU_BO_INVALID_OFFSET) + seq_printf(m, " @ 0x%010Lx", offset); pin_count = ACCESS_ONCE(bo->pin_count); if (pin_count) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e26108aad3fe..4f6c68fc1dd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) if (mec >= adev->gfx.mec.num_mec) break; - if (adev->gfx.mec.num_mec > 1) { + /* FIXME: spreading the queues across pipes causes perf regressions */ + if (0) { /* policy: amdgpu owns the first two queues of the first MEC */ if (mec == 0 && queue < 2) set_bit(i, adev->gfx.mec.queue_bitmap); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7d22c44034d..9e05e257729f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -28,7 +28,7 @@ struct amdgpu_gtt_mgr { struct drm_mm mm; spinlock_t lock; - uint64_t available; + atomic64_t available; }; /** @@ -42,15 +42,19 @@ struct amdgpu_gtt_mgr { static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, unsigned long p_size) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr; + uint64_t start, size; mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) return -ENOMEM; - drm_mm_init(&mgr->mm, 0, p_size); + start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + size = (adev->mc.gart_size >> PAGE_SHIFT) - start; + drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); - mgr->available = p_size; + atomic64_set(&mgr->available, p_size); man->priv = mgr; return 0; } @@ -81,6 +85,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) } /** + * amdgpu_gtt_mgr_is_allocated - Check if mem has address space + * + * @mem: the mem object to check + * + * Check if a mem object has already address space allocated. + */ +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem) +{ + struct drm_mm_node *node = mem->mm_node; + + return (node->start != AMDGPU_BO_INVALID_OFFSET); +} + +/** * amdgpu_gtt_mgr_alloc - allocate new ranges * * @man: TTM memory type manager @@ -95,13 +113,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; struct drm_mm_node *node = mem->mm_node; enum drm_mm_insert_mode mode; unsigned long fpfn, lpfn; int r; - if (node->start != AMDGPU_BO_INVALID_OFFSET) + if (amdgpu_gtt_mgr_is_allocated(mem)) return 0; if (place) @@ -112,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, if (place && place->lpfn) lpfn = place->lpfn; else - lpfn = man->size; + lpfn = adev->gart.num_cpu_pages; mode = DRM_MM_INSERT_BEST; if (place && place->flags & TTM_PL_FLAG_TOPDOWN) @@ -134,15 +153,6 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, return r; } -void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); - struct amdgpu_gtt_mgr *mgr = man->priv; - - seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", - man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20); - -} /** * amdgpu_gtt_mgr_new - allocate a new node * @@ -163,11 +173,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, int r; spin_lock(&mgr->lock); - if (mgr->available < mem->num_pages) { + if (atomic64_read(&mgr->available) < mem->num_pages) { spin_unlock(&mgr->lock); return 0; } - mgr->available -= mem->num_pages; + atomic64_sub(mem->num_pages, &mgr->available); spin_unlock(&mgr->lock); node = kzalloc(sizeof(*node), GFP_KERNEL); @@ -194,9 +204,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, return 0; err_out: - spin_lock(&mgr->lock); - mgr->available += mem->num_pages; - spin_unlock(&mgr->lock); + atomic64_add(mem->num_pages, &mgr->available); return r; } @@ -223,30 +231,47 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, spin_lock(&mgr->lock); if (node->start != AMDGPU_BO_INVALID_OFFSET) drm_mm_remove_node(node); - mgr->available += mem->num_pages; spin_unlock(&mgr->lock); + atomic64_add(mem->num_pages, &mgr->available); kfree(node); mem->mm_node = NULL; } /** + * amdgpu_gtt_mgr_usage - return usage of GTT domain + * + * @man: TTM memory type manager + * + * Return how many bytes are used in the GTT domain + */ +uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + + return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE; +} + +/** * amdgpu_gtt_mgr_debug - dump VRAM table * * @man: TTM memory type manager - * @prefix: text prefix + * @printer: DRM printer to use * * Dump the table content using printk. */ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, - const char *prefix) + struct drm_printer *printer) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_printer p = drm_debug_printer(prefix); spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, &p); + drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); + + drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n", + man->size, (u64)atomic64_read(&mgr->available), + amdgpu_gtt_mgr_usage(man) >> 20); } const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index f774b3f497d2..659997bfff30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, unsigned i; int r = 0; + bool need_pipe_sync = false; if (num_ibs == 0) return -EINVAL; @@ -165,15 +166,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || amdgpu_vm_need_pipeline_sync(ring, job))) { - amdgpu_ring_emit_pipeline_sync(ring); + need_pipe_sync = true; dma_fence_put(tmp); } if (ring->funcs->insert_start) ring->funcs->insert_start(ring); - if (vm) { - r = amdgpu_vm_flush(ring, job); + if (job) { + r = amdgpu_vm_flush(ring, job, need_pipe_sync); if (r) { amdgpu_ring_undo(ring); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 62da6c5c6095..4bdd851f56d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -220,6 +220,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev) int r = 0; spin_lock_init(&adev->irq.lock); + + /* Disable vblank irqs aggressively for power-saving */ + adev->ddev->vblank_disable_immediate = true; + r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); if (r) { return r; @@ -263,7 +267,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) { unsigned i, j; - drm_vblank_cleanup(adev->ddev); if (adev->irq.installed) { drm_irq_uninstall(adev->ddev); adev->irq.installed = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 3d641e10e6b6..4510627ae83e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); if (r) kfree(*job); + else + (*job)->vm_pd_addr = adev->gart.table_addr; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b0b23101d1c8..e16229000a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - amdgpu_amdkfd_load_interface(adev); amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); @@ -456,13 +455,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = atomic64_read(&adev->vram_usage); + ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: - ui64 = atomic64_read(&adev->vram_vis_usage); + ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GTT_USAGE: - ui64 = atomic64_read(&adev->gtt_usage); + ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_GDS_CONFIG: { struct drm_amdgpu_info_gds gds_info; @@ -485,7 +484,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); - vram_gtt.gtt_size = adev->mc.gtt_size; + vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; + vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= adev->gart_pin_size; return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -497,7 +497,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file mem.vram.total_heap_size = adev->mc.real_vram_size; mem.vram.usable_heap_size = adev->mc.real_vram_size - adev->vram_pin_size; - mem.vram.heap_usage = atomic64_read(&adev->vram_usage); + mem.vram.heap_usage = + amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -506,14 +507,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file adev->mc.visible_vram_size - (adev->vram_pin_size - adev->invisible_pin_size); mem.cpu_accessible_vram.heap_usage = - atomic64_read(&adev->vram_vis_usage); + amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.usable_heap_size * 3 / 4; - mem.gtt.total_heap_size = adev->mc.gtt_size; - mem.gtt.usable_heap_size = - adev->mc.gtt_size - adev->gart_pin_size; - mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); + mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size; + mem.gtt.total_heap_size *= PAGE_SIZE; + mem.gtt.usable_heap_size = mem.gtt.total_heap_size + - adev->gart_pin_size; + mem.gtt.heap_usage = + amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]); mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; return copy_to_user(out, &mem, @@ -571,8 +574,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; } else { - dev_info.max_engine_clock = adev->pm.default_sclk * 10; - dev_info.max_memory_clock = adev->pm.default_mclk * 10; + dev_info.max_engine_clock = adev->clock.default_sclk * 10; + dev_info.max_memory_clock = adev->clock.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * @@ -587,10 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); - dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) * - AMDGPU_GPU_PAGE_SIZE; + dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; - dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; @@ -839,7 +840,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) } if (amdgpu_sriov_vf(adev)) { - r = amdgpu_map_static_csa(adev, &fpriv->vm); + r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); if (r) goto out_suspend; } @@ -892,8 +893,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); - amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); - fpriv->vm.csa_bo_va = NULL; + amdgpu_vm_bo_rmv(adev, fpriv->csa_va); + fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 43a9d3aec6c4..2af2678ddaf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -257,15 +257,7 @@ struct amdgpu_audio { int num_pins; }; -struct amdgpu_mode_mc_save { - u32 vga_render_control; - u32 vga_hdp_control; - bool crtc_enabled[AMDGPU_MAX_CRTCS]; -}; - struct amdgpu_display_funcs { - /* vga render */ - void (*set_vga_render_state)(struct amdgpu_device *adev, bool render); /* display watermarks */ void (*bandwidth_update)(struct amdgpu_device *adev); /* get frame count */ @@ -300,10 +292,6 @@ struct amdgpu_display_funcs { uint16_t connector_object_id, struct amdgpu_hpd *hpd, struct amdgpu_router *router); - void (*stop_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); - void (*resume_mc_access)(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); }; struct amdgpu_mode_info { @@ -369,7 +357,6 @@ struct amdgpu_atom_ss { struct amdgpu_crtc { struct drm_crtc base; int crtc_id; - u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; uint32_t crtc_offset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8ee69652be8c..e7e899190bef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -37,55 +37,6 @@ #include "amdgpu.h" #include "amdgpu_trace.h" - - -static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, - struct ttm_mem_reg *mem) -{ - if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) - return 0; - - return ((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; -} - -static void amdgpu_update_memory_usage(struct amdgpu_device *adev, - struct ttm_mem_reg *old_mem, - struct ttm_mem_reg *new_mem) -{ - u64 vis_size; - if (!adev) - return; - - if (new_mem) { - switch (new_mem->mem_type) { - case TTM_PL_TT: - atomic64_add(new_mem->size, &adev->gtt_usage); - break; - case TTM_PL_VRAM: - atomic64_add(new_mem->size, &adev->vram_usage); - vis_size = amdgpu_get_vis_part_size(adev, new_mem); - atomic64_add(vis_size, &adev->vram_vis_usage); - break; - } - } - - if (old_mem) { - switch (old_mem->mem_type) { - case TTM_PL_TT: - atomic64_sub(old_mem->size, &adev->gtt_usage); - break; - case TTM_PL_VRAM: - atomic64_sub(old_mem->size, &adev->vram_usage); - vis_size = amdgpu_get_vis_part_size(adev, old_mem); - atomic64_sub(vis_size, &adev->vram_vis_usage); - break; - } - } -} - static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); @@ -93,7 +44,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct amdgpu_bo, tbo); - amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); + amdgpu_bo_kunmap(bo); drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); @@ -219,7 +170,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, } /** - * amdgpu_bo_create_kernel - create BO for kernel use + * amdgpu_bo_create_reserved - create reserved BO for kernel use * * @adev: amdgpu device object * @size: size for the new BO @@ -229,24 +180,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, * @gpu_addr: GPU addr of the pinned BO * @cpu_addr: optional CPU address mapping * - * Allocates and pins a BO for kernel internal use. + * Allocates and pins a BO for kernel internal use, and returns it still + * reserved. * * Returns 0 on success, negative error code otherwise. */ -int amdgpu_bo_create_kernel(struct amdgpu_device *adev, - unsigned long size, int align, - u32 domain, struct amdgpu_bo **bo_ptr, - u64 *gpu_addr, void **cpu_addr) +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) { + bool free = false; int r; - r = amdgpu_bo_create(adev, size, align, true, domain, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo_ptr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); - return r; + if (!*bo_ptr) { + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, + NULL, NULL, 0, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", + r); + return r; + } + free = true; } r = amdgpu_bo_reserve(*bo_ptr, false); @@ -269,20 +226,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } } - amdgpu_bo_unreserve(*bo_ptr); - return 0; error_unreserve: amdgpu_bo_unreserve(*bo_ptr); error_free: - amdgpu_bo_unref(bo_ptr); + if (free) + amdgpu_bo_unref(bo_ptr); return r; } /** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, + gpu_addr, cpu_addr); + + if (r) + return r; + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -317,12 +306,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct amdgpu_bo *bo; enum ttm_bo_type type; unsigned long page_align; - u64 initial_bytes_moved; + u64 initial_bytes_moved, bytes_moved; size_t acc_size; int r; @@ -351,13 +341,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_CPU | AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA); - bo->allowed_domains = bo->prefered_domains; + bo->allowed_domains = bo->preferred_domains; if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; @@ -398,8 +388,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv, &amdgpu_ttm_bo_destroy); - amdgpu_cs_report_moved_bytes(adev, - atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); + bytes_moved = atomic64_read(&adev->num_bytes_moved) - + initial_bytes_moved; + if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + bo->tbo.mem.mem_type == TTM_PL_VRAM && + bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) + amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved); + else + amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); if (unlikely(r != 0)) return r; @@ -411,7 +407,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; - r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); if (unlikely(r)) goto fail_unreserve; @@ -426,6 +422,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, trace_amdgpu_bo_create(bo); + /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ + if (type == ttm_bo_type_device) + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + return 0; fail_unreserve: @@ -459,6 +459,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &placement, bo->tbo.resv, + 0, &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); @@ -470,11 +471,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, return r; } +/* init_value will only take effect when flags contains + * AMDGPU_GEM_CREATE_VRAM_CLEARED. + */ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr) { struct ttm_placement placement = {0}; @@ -489,7 +494,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain, flags, sg, &placement, - resv, bo_ptr); + resv, init_value, bo_ptr); if (r) return r; @@ -535,7 +540,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -551,7 +556,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) if (bo->pin_count) return 0; - domain = bo->prefered_domains; + domain = bo->preferred_domains; retry: amdgpu_ttm_placement_from_domain(bo, domain); @@ -588,7 +593,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, amdgpu_bo_size(bo), resv, fence, - direct); + direct, false); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -598,16 +603,16 @@ err: int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) { - bool is_iomem; + void *kptr; long r; if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM; - if (bo->kptr) { - if (ptr) { - *ptr = bo->kptr; - } + kptr = amdgpu_bo_kptr(bo); + if (kptr) { + if (ptr) + *ptr = kptr; return 0; } @@ -620,19 +625,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r) return r; - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) - *ptr = bo->kptr; + *ptr = amdgpu_bo_kptr(bo); return 0; } +void *amdgpu_bo_kptr(struct amdgpu_bo *bo) +{ + bool is_iomem; + + return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); +} + void amdgpu_bo_kunmap(struct amdgpu_bo *bo) { - if (bo->kptr == NULL) - return; - bo->kptr = NULL; - ttm_bo_kunmap(&bo->kmap); + if (bo->kmap.bo) + ttm_bo_kunmap(&bo->kmap); } struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) @@ -724,15 +733,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, dev_err(adev->dev, "%p pin failed\n", bo); goto error; } - r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); - if (unlikely(r)) { - dev_err(adev->dev, "%p bind failed\n", bo); - goto error; - } bo->pin_count = 1; - if (gpu_addr != NULL) + if (gpu_addr != NULL) { + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (unlikely(r)) { + dev_err(adev->dev, "%p bind failed\n", bo); + goto error; + } *gpu_addr = amdgpu_bo_gpu_offset(bo); + } if (domain == AMDGPU_GEM_DOMAIN_VRAM) { adev->vram_pin_size += amdgpu_bo_size(bo); if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) @@ -921,6 +931,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, abo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_vm_bo_invalidate(adev, abo); + amdgpu_bo_kunmap(abo); + /* remember the eviction */ if (evict) atomic64_inc(&adev->num_evictions); @@ -930,8 +942,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, return; /* move_notify is called before move happens */ - amdgpu_update_memory_usage(adev, &bo->mem, new_mem); - trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } @@ -939,19 +949,22 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - unsigned long offset, size, lpfn; - int i, r; + unsigned long offset, size; + int r; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return 0; abo = container_of(bo, struct amdgpu_bo, tbo); + + /* Remember that this BO was accessed by the CPU */ + abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + if (bo->mem.mem_type != TTM_PL_VRAM) return 0; size = bo->mem.num_pages << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT; - /* TODO: figure out how to map scattered VRAM to the CPU */ if ((offset + size) <= adev->mc.visible_vram_size) return 0; @@ -961,26 +974,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ atomic64_inc(&adev->num_vram_cpu_page_faults); - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); - lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - for (i = 0; i < abo->placement.num_placement; i++) { - /* Force into visible VRAM */ - if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || - abo->placements[i].lpfn > lpfn)) - abo->placements[i].lpfn = lpfn; - } + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + + /* Avoid costly evictions; only set GTT as a busy placement */ + abo->placement.num_busy_placement = 1; + abo->placement.busy_placement = &abo->placements[1]; + r = ttm_bo_validate(bo, &abo->placement, false, false); - if (unlikely(r == -ENOMEM)) { - amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &abo->placement, false, false); - } else if (unlikely(r != 0)) { + if (unlikely(r != 0)) return r; - } offset = bo->mem.start << PAGE_SHIFT; /* this should never happen */ - if ((offset + size) > adev->mc.visible_vram_size) + if (bo->mem.mem_type == TTM_PL_VRAM && + (offset + size) > adev->mc.visible_vram_size) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 382485115b06..a288fa6d72c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -33,6 +33,61 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +/* bo virtual addresses in a vm */ +struct amdgpu_bo_va_mapping { + struct list_head list; + struct rb_node rb; + uint64_t start; + uint64_t last; + uint64_t __subtree_last; + uint64_t offset; + uint64_t flags; +}; + +/* User space allocated BO in a VM */ +struct amdgpu_bo_va { + struct amdgpu_vm_bo_base base; + + /* protected by bo being reserved */ + struct dma_fence *last_pt_update; + unsigned ref_count; + + /* mappings for this bo_va */ + struct list_head invalids; + struct list_head valids; +}; + +struct amdgpu_bo { + /* Protected by tbo.reserved */ + u32 preferred_domains; + u32 allowed_domains; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + struct ttm_placement placement; + struct ttm_buffer_object tbo; + struct ttm_bo_kmap_obj kmap; + u64 flags; + unsigned pin_count; + u64 tiling_flags; + u64 metadata_flags; + void *metadata; + u32 metadata_size; + unsigned prime_shared_count; + /* list of all virtual address to which this bo is associated to */ + struct list_head va; + /* Constant after initialization */ + struct drm_gem_object gem_base; + struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; + + struct ttm_bo_kmap_obj dma_buf_vmap; + struct amdgpu_mn *mn; + + union { + struct list_head mn_list; + struct list_head shadow_list; + }; +}; + /** * amdgpu_mem_type_to_domain - return domain corresponding to mem_type * @mem_type: ttm memory type @@ -120,7 +175,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) */ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) { - return bo->tbo.mem.mem_type != TTM_PL_SYSTEM; + switch (bo->tbo.mem.mem_type) { + case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm); + case TTM_PL_VRAM: return true; + default: return false; + } } int amdgpu_bo_create(struct amdgpu_device *adev, @@ -128,6 +187,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, @@ -135,7 +195,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, + uint64_t init_value, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_reserved(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, @@ -143,6 +208,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); +void *amdgpu_bo_kptr(struct amdgpu_bo *bo); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); void amdgpu_bo_unref(struct amdgpu_bo **bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index c19c4d138751..f21a7716b90e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -30,6 +30,7 @@ struct cg_flag_name const char *name; }; +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 6bdc866570ab..5b3f92891f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ww_mutex_lock(&resv->lock, NULL); ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4083be61b328..8c2204c7b384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -63,8 +63,13 @@ static int psp_sw_init(void *handle) psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk; break; case CHIP_RAVEN: +#if 0 + psp->init_microcode = psp_v10_0_init_microcode; +#endif psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf; psp->ring_init = psp_v10_0_ring_init; + psp->ring_create = psp_v10_0_ring_create; + psp->ring_destroy = psp_v10_0_ring_destroy; psp->cmd_submit = psp_v10_0_cmd_submit; psp->compare_sram_data = psp_v10_0_compare_sram_data; break; @@ -95,9 +100,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, int i; struct amdgpu_device *adev = psp->adev; - val = RREG32(reg_index); - for (i = 0; i < adev->usec_timeout; i++) { + val = RREG32(reg_index); if (check_changed) { if (val != reg_val) return 0; @@ -118,33 +122,18 @@ psp_cmd_submit_buf(struct psp_context *psp, int index) { int ret; - struct amdgpu_bo *cmd_buf_bo; - uint64_t cmd_buf_mc_addr; - struct psp_gfx_cmd_resp *cmd_buf_mem; - struct amdgpu_device *adev = psp->adev; - - ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &cmd_buf_bo, &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - if (ret) - return ret; - memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); + memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); - memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); + memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); - ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr, + ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, fence_mc_addr, index); while (*((unsigned int *)psp->fence_buf) != index) { msleep(1); } - amdgpu_bo_free_kernel(&cmd_buf_bo, - &cmd_buf_mc_addr, - (void **)&cmd_buf_mem); - return ret; } @@ -352,13 +341,20 @@ static int psp_load_fw(struct amdgpu_device *adev) &psp->fence_buf_mc_addr, &psp->fence_buf); if (ret) + goto failed_mem2; + + ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); + if (ret) goto failed_mem1; memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); ret = psp_ring_init(psp, PSP_RING_TYPE__KM); if (ret) - goto failed_mem1; + goto failed_mem; ret = psp_tmr_init(psp); if (ret) @@ -379,9 +375,13 @@ static int psp_load_fw(struct amdgpu_device *adev) return 0; failed_mem: + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, + &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); +failed_mem1: amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); -failed_mem1: +failed_mem2: amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); failed: @@ -435,16 +435,15 @@ static int psp_hw_fini(void *handle) psp_ring_destroy(psp, PSP_RING_TYPE__KM); - if (psp->tmr_buf) - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); - - if (psp->fw_pri_buf) - amdgpu_bo_free_kernel(&psp->fw_pri_bo, - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); - - if (psp->fence_buf_bo) - amdgpu_bo_free_kernel(&psp->fence_buf_bo, - &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); + amdgpu_bo_free_kernel(&psp->fw_pri_bo, + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); + amdgpu_bo_free_kernel(&psp->fence_buf_bo, + &psp->fence_buf_mc_addr, &psp->fence_buf); + amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr, + &psp->asd_shared_buf); + amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, + (void **)&psp->cmd_buf_mem); kfree(psp->cmd); psp->cmd = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 1a1c8b469f93..538fa9dbfb21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -108,6 +108,11 @@ struct psp_context struct amdgpu_bo *fence_buf_bo; uint64_t fence_buf_mc_addr; void *fence_buf; + + /* cmd buffer */ + struct amdgpu_bo *cmd_buf_bo; + uint64_t cmd_buf_mc_addr; + struct psp_gfx_cmd_resp *cmd_buf_mem; }; struct amdgpu_psp_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 75165e07b1cd..6c5646b48d1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -184,32 +184,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - if (ring->funcs->support_64bit_ptrs) { - r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } - - } else { - r = amdgpu_wb_get(adev, &ring->rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); - return r; - } - - r = amdgpu_wb_get(adev, &ring->wptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); - return r; - } + r = amdgpu_wb_get(adev, &ring->rptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); + return r; + } + r = amdgpu_wb_get(adev, &ring->wptr_offs); + if (r) { + dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); + return r; } r = amdgpu_wb_get(adev, &ring->fence_offs); @@ -277,18 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) { ring->ready = false; - if (ring->funcs->support_64bit_ptrs) { - amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs); - amdgpu_wb_free_64bit(ring->adev, ring->fence_offs); - amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs); - amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs); - } else { - amdgpu_wb_free(ring->adev, ring->cond_exe_offs); - amdgpu_wb_free(ring->adev, ring->fence_offs); - amdgpu_wb_free(ring->adev, ring->rptr_offs); - amdgpu_wb_free(ring->adev, ring->wptr_offs); - } + /* Not to finish a ring which is not initialized */ + if (!(ring->adev) || !(ring->adev->rings[ring->idx])) + return; + + amdgpu_wb_free(ring->adev, ring->rptr_offs); + amdgpu_wb_free(ring->adev, ring->wptr_offs); + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); + amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bc8dec992f73..322d25299a00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -212,4 +212,44 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) } +static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) +{ + if (ring->count_dw <= 0) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + ring->ring[ring->wptr++ & ring->buf_mask] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; +} + +static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, + void *src, int count_dw) +{ + unsigned occupied, chunk1, chunk2; + void *dst; + + if (unlikely(ring->count_dw < count_dw)) + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + + occupied = ring->wptr & ring->buf_mask; + dst = (void *)&ring->ring[occupied]; + chunk1 = ring->buf_mask + 1 - occupied; + chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; + chunk2 = count_dw - chunk1; + chunk1 <<= 2; + chunk2 <<= 2; + + if (chunk1) + memcpy(dst, src, chunk1); + + if (chunk2) { + src += chunk1; + dst = (void *)ring->ring; + memcpy(dst, src, chunk2); + } + + ring->wptr += count_dw; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count_dw; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 5ca75a456ad2..3144400435b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&sa_manager->flist[i]); r = amdgpu_bo_create(adev, size, align, true, domain, - 0, NULL, NULL, &sa_manager->bo); + 0, NULL, NULL, 0, &sa_manager->bo); if (r) { dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 15510dadde01..ed8c3739015b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo **gtt_obj = NULL; - uint64_t gtt_addr, vram_addr; + uint64_t gart_addr, vram_addr; unsigned n, size; int i, r; @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffers) / test size */ - n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024; + n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) if (adev->rings[i]) n -= adev->rings[i]->ring_size; @@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, - NULL, NULL, &vram_obj); + NULL, NULL, 0, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -76,13 +76,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } for (i = 0; i < n; i++) { void *gtt_map, *vram_map; - void **gtt_start, **gtt_end; + void **gart_start, **gart_end; void **vram_start, **vram_end; struct dma_fence *fence = NULL; r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, - NULL, gtt_obj + i); + NULL, 0, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; @@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = amdgpu_bo_reserve(gtt_obj[i], false); if (unlikely(r != 0)) goto out_lclean_unref; - r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, >t_addr); + r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr); if (r) { DRM_ERROR("Failed to pin GTT object %d\n", i); goto out_lclean_unres; @@ -103,15 +103,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size; - gtt_start < gtt_end; - gtt_start++) - *gtt_start = gtt_start; + for (gart_start = gtt_map, gart_end = gtt_map + size; + gart_start < gart_end; + gart_start++) + *gart_start = gart_start; amdgpu_bo_kunmap(gtt_obj[i]); - r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence, false); + r = amdgpu_copy_buffer(ring, gart_addr, vram_addr, + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; vram_start < vram_end; - gtt_start++, vram_start++) { - if (*vram_start != gtt_start) { + gart_start++, vram_start++) { + if (*vram_start != gart_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " "expected 0x%p (GTT/VRAM offset " "0x%16llx/0x%16llx)\n", - i, *vram_start, gtt_start, + i, *vram_start, gart_start, (unsigned long long) - (gtt_addr - adev->mc.gtt_start + - (void*)gtt_start - gtt_map), + (gart_addr - adev->mc.gart_start + + (void*)gart_start - gtt_map), (unsigned long long) (vram_addr - adev->mc.vram_start + - (void*)gtt_start - gtt_map)); + (void*)gart_start - gtt_map)); amdgpu_bo_kunmap(vram_obj); goto out_lclean_unpin; } @@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); - r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence, false); + r = amdgpu_copy_buffer(ring, vram_addr, gart_addr, + size, NULL, &fence, false, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); @@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) goto out_lclean_unpin; } - for (gtt_start = gtt_map, gtt_end = gtt_map + size, + for (gart_start = gtt_map, gart_end = gtt_map + size, vram_start = vram_map, vram_end = vram_map + size; - gtt_start < gtt_end; - gtt_start++, vram_start++) { - if (*gtt_start != vram_start) { + gart_start < gart_end; + gart_start++, vram_start++) { + if (*gart_start != vram_start) { DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " "expected 0x%p (VRAM/GTT offset " "0x%16llx/0x%16llx)\n", - i, *gtt_start, vram_start, + i, *gart_start, vram_start, (unsigned long long) (vram_addr - adev->mc.vram_start + (void*)vram_start - vram_map), (unsigned long long) - (gtt_addr - adev->mc.gtt_start + + (gart_addr - adev->mc.gart_start + (void*)vram_start - vram_map)); amdgpu_bo_kunmap(gtt_obj[i]); goto out_lclean_unpin; @@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", - gtt_addr - adev->mc.gtt_start); + gart_addr - adev->mc.gart_start); continue; out_lclean_unpin: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 8601904e670a..1c88bd5e29ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -14,6 +14,62 @@ #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \ job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) +TRACE_EVENT(amdgpu_ttm_tt_populate, + TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), + TP_ARGS(adev, dma_address, phys_address), + TP_STRUCT__entry( + __field(uint16_t, domain) + __field(uint8_t, bus) + __field(uint8_t, slot) + __field(uint8_t, func) + __field(uint64_t, dma) + __field(uint64_t, phys) + ), + TP_fast_assign( + __entry->domain = pci_domain_nr(adev->pdev->bus); + __entry->bus = adev->pdev->bus->number; + __entry->slot = PCI_SLOT(adev->pdev->devfn); + __entry->func = PCI_FUNC(adev->pdev->devfn); + __entry->dma = dma_address; + __entry->phys = phys_address; + ), + TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", + (unsigned)__entry->domain, + (unsigned)__entry->bus, + (unsigned)__entry->slot, + (unsigned)__entry->func, + (unsigned long long)__entry->dma, + (unsigned long long)__entry->phys) +); + +TRACE_EVENT(amdgpu_ttm_tt_unpopulate, + TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address), + TP_ARGS(adev, dma_address, phys_address), + TP_STRUCT__entry( + __field(uint16_t, domain) + __field(uint8_t, bus) + __field(uint8_t, slot) + __field(uint8_t, func) + __field(uint64_t, dma) + __field(uint64_t, phys) + ), + TP_fast_assign( + __entry->domain = pci_domain_nr(adev->pdev->bus); + __entry->bus = adev->pdev->bus->number; + __entry->slot = PCI_SLOT(adev->pdev->devfn); + __entry->func = PCI_FUNC(adev->pdev->devfn); + __entry->dma = dma_address; + __entry->phys = phys_address; + ), + TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx", + (unsigned)__entry->domain, + (unsigned)__entry->bus, + (unsigned)__entry->slot, + (unsigned)__entry->func, + (unsigned long long)__entry->dma, + (unsigned long long)__entry->phys) +); + TRACE_EVENT(amdgpu_mm_rreg, TP_PROTO(unsigned did, uint32_t reg, uint32_t value), TP_ARGS(did, reg, value), @@ -105,12 +161,12 @@ TRACE_EVENT(amdgpu_bo_create, __entry->bo = bo; __entry->pages = bo->tbo.num_pages; __entry->type = bo->tbo.mem.mem_type; - __entry->prefer = bo->prefered_domains; + __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d", + TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d", __entry->bo, __entry->pages, __entry->type, __entry->prefer, __entry->allow, __entry->visible) ); @@ -224,17 +280,17 @@ TRACE_EVENT(amdgpu_vm_bo_map, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( - __entry->bo = bo_va ? bo_va->bo : NULL; + __entry->bo = bo_va ? bo_va->base.bo : NULL; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -248,17 +304,17 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, __field(long, start) __field(long, last) __field(u64, offset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( - __entry->bo = bo_va->bo; + __entry->bo = bo_va->base.bo; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), - TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx", __entry->bo, __entry->start, __entry->last, __entry->offset, __entry->flags) ); @@ -269,7 +325,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, TP_STRUCT__entry( __field(u64, soffset) __field(u64, eoffset) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -277,7 +333,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, __entry->eoffset = mapping->last + 1; __entry->flags = mapping->flags; ), - TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", + TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx", __entry->soffset, __entry->eoffset, __entry->flags) ); @@ -293,14 +349,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags), + uint32_t incr, uint64_t flags), TP_ARGS(pe, addr, count, incr, flags), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) - __field(u32, flags) + __field(u64, flags) ), TP_fast_assign( @@ -310,7 +366,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->incr = incr; __entry->flags = flags; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u", + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", __entry->pe, __entry->addr, __entry->incr, __entry->flags, __entry->count) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c9b131b13ef7..8b2c294f6f79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -43,14 +43,20 @@ #include <linux/pagemap.h> #include <linux/debugfs.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "bif/bif_4_1_d.h" #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr); + static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); - /* * Global memory. */ @@ -97,6 +103,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) goto error_bo; } + mutex_init(&adev->mman.gtt_window_lock); + ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, @@ -123,6 +131,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) if (adev->mman.mem_global_referenced) { amd_sched_entity_fini(adev->mman.entity.sched, &adev->mman.entity); + mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.mem_global_ref); adev->mman.mem_global_referenced = false; @@ -150,7 +159,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_TT: man->func = &amdgpu_gtt_mgr_func; - man->gpu_offset = adev->mc.gtt_start; + man->gpu_offset = adev->mc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -186,12 +195,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - static struct ttm_place placements = { + static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; - unsigned i; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; @@ -207,22 +215,36 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, adev->mman.buffer_funcs_ring && adev->mman.buffer_funcs_ring->ready == false) { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && + !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { + unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + struct drm_mm_node *node = bo->mem.mm_node; + unsigned long pages_left; + + for (pages_left = bo->mem.num_pages; + pages_left; + pages_left -= node->size, node++) { + if (node->start < fpfn) + break; + } + + if (!pages_left) + goto gtt; + + /* Try evicting to the CPU inaccessible part of VRAM + * first, but only set GTT as busy placement, so this + * BO will be evicted to GTT rather than causing other + * BOs to be evicted from VRAM + */ + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT); + abo->placements[0].fpfn = fpfn; + abo->placements[0].lpfn = 0; + abo->placement.busy_placement = &abo->placements[1]; + abo->placement.num_busy_placement = 1; } else { +gtt: amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); - for (i = 0; i < abo->placement.num_placement; ++i) { - if (!(abo->placements[i].flags & - TTM_PL_FLAG_TT)) - continue; - - if (abo->placements[i].lpfn) - continue; - - /* set an upper limit to force directly - * allocating address space for the BO. - */ - abo->placements[i].lpfn = - adev->mc.gtt_size >> PAGE_SHIFT; - } } break; case TTM_PL_TT: @@ -252,29 +274,18 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, new_mem->mm_node = NULL; } -static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, - struct drm_mm_node *mm_node, - struct ttm_mem_reg *mem, - uint64_t *addr) +static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, + struct drm_mm_node *mm_node, + struct ttm_mem_reg *mem) { - int r; + uint64_t addr = 0; - switch (mem->mem_type) { - case TTM_PL_TT: - r = amdgpu_ttm_bind(bo, mem); - if (r) - return r; - - case TTM_PL_VRAM: - *addr = mm_node->start << PAGE_SHIFT; - *addr += bo->bdev->man[mem->mem_type].gpu_offset; - break; - default: - DRM_ERROR("Unknown placement %d\n", mem->mem_type); - return -EINVAL; + if (mem->mem_type != TTM_PL_TT || + amdgpu_gtt_mgr_is_allocated(mem)) { + addr = mm_node->start << PAGE_SHIFT; + addr += bo->bdev->man[mem->mem_type].gpu_offset; } - - return 0; + return addr; } static int amdgpu_move_blit(struct ttm_buffer_object *bo, @@ -299,26 +310,40 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } old_mm = old_mem->mm_node; - r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); - if (r) - return r; old_size = old_mm->size; - + old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); new_mm = new_mem->mm_node; - r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); - if (r) - return r; new_size = new_mm->size; + new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); num_pages = new_mem->num_pages; + mutex_lock(&adev->mman.gtt_window_lock); while (num_pages) { - unsigned long cur_pages = min(old_size, new_size); + unsigned long cur_pages = min(min(old_size, new_size), + (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); + uint64_t from = old_start, to = new_start; struct dma_fence *next; - r = amdgpu_copy_buffer(ring, old_start, new_start, + if (old_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(old_mem)) { + r = amdgpu_map_buffer(bo, old_mem, cur_pages, + old_start, 0, ring, &from); + if (r) + goto error; + } + + if (new_mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(new_mem)) { + r = amdgpu_map_buffer(bo, new_mem, cur_pages, + new_start, 1, ring, &to); + if (r) + goto error; + } + + r = amdgpu_copy_buffer(ring, from, to, cur_pages * PAGE_SIZE, - bo->resv, &next, false); + bo->resv, &next, false, true); if (r) goto error; @@ -331,10 +356,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, old_size -= cur_pages; if (!old_size) { - r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, - &old_start); - if (r) - goto error; + old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); old_size = old_mm->size; } else { old_start += cur_pages * PAGE_SIZE; @@ -342,22 +364,21 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_size -= cur_pages; if (!new_size) { - r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, - &new_start); - if (r) - goto error; - + new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); new_size = new_mm->size; } else { new_start += cur_pages * PAGE_SIZE; } } + mutex_unlock(&adev->mman.gtt_window_lock); r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); dma_fence_put(fence); return r; error: + mutex_unlock(&adev->mman.gtt_window_lock); + if (fence) dma_fence_wait(fence, false); dma_fence_put(fence); @@ -384,7 +405,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -431,7 +452,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; + placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -507,6 +528,15 @@ memcpy: } } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); return 0; @@ -633,6 +663,38 @@ release_pages: return r; } +static void amdgpu_trace_dma_map(struct ttm_tt *ttm) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned i; + + if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) { + for (i = 0; i < ttm->num_pages; i++) { + trace_amdgpu_ttm_tt_populate( + adev, + gtt->ttm.dma_address[i], + page_to_phys(ttm->pages[i])); + } + } +} + +static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned i; + + if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) { + for (i = 0; i < ttm->num_pages; i++) { + trace_amdgpu_ttm_tt_unpopulate( + adev, + gtt->ttm.dma_address[i], + page_to_phys(ttm->pages[i])); + } + } +} + /* prepare the sg table with the user pages */ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) { @@ -659,6 +721,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); + amdgpu_trace_dma_map(ttm); + return 0; release_sg: @@ -692,14 +756,41 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) put_page(page); } + amdgpu_trace_dma_unmap(ttm); + sg_free_table(ttm->sg); } +static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + uint64_t flags; + int r; + + spin_lock(>t->adev->gtt_list_lock); + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem); + gtt->offset = (u64)mem->start << PAGE_SHIFT; + r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + + if (r) { + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + goto error_gart_bind; + } + + list_add_tail(>t->list, >t->adev->gtt_list); +error_gart_bind: + spin_unlock(>t->adev->gtt_list_lock); + return r; + +} + static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; - int r; + int r = 0; if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(ttm); @@ -718,7 +809,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; - return 0; + if (amdgpu_gtt_mgr_is_allocated(bo_mem)) + r = amdgpu_ttm_do_bind(ttm, bo_mem); + + return r; } bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) @@ -731,8 +825,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) { struct ttm_tt *ttm = bo->ttm; - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - uint64_t flags; int r; if (!ttm || amdgpu_ttm_is_bound(ttm)) @@ -745,22 +837,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) return r; } - spin_lock(>t->adev->gtt_list_lock); - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); - gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); - return r; + return amdgpu_ttm_do_bind(ttm, bo_mem); } int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) @@ -852,7 +929,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) { - struct amdgpu_device *adev; + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned i; int r; @@ -875,14 +952,14 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); ttm->state = tt_unbound; - return 0; + r = 0; + goto trace_mappings; } - adev = amdgpu_ttm_adev(ttm->bdev); - #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev); + r = ttm_dma_populate(>t->ttm, adev->dev); + goto trace_mappings; } #endif @@ -905,7 +982,12 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) return -EFAULT; } } - return 0; + + r = 0; +trace_mappings: + if (likely(!r)) + amdgpu_trace_dma_map(ttm); + return r; } static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) @@ -926,6 +1008,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) adev = amdgpu_ttm_adev(ttm->bdev); + amdgpu_trace_dma_unmap(ttm); + #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { ttm_dma_unpopulate(>t->ttm, adev->dev); @@ -1075,6 +1159,67 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, + unsigned long offset, + void *buf, int len, int write) +{ + struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct drm_mm_node *nodes = abo->tbo.mem.mm_node; + uint32_t value = 0; + int ret = 0; + uint64_t pos; + unsigned long flags; + + if (bo->mem.mem_type != TTM_PL_VRAM) + return -EIO; + + while (offset >= (nodes->size << PAGE_SHIFT)) { + offset -= nodes->size << PAGE_SHIFT; + ++nodes; + } + pos = (nodes->start << PAGE_SHIFT) + offset; + + while (len && pos < adev->mc.mc_vram_size) { + uint64_t aligned_pos = pos & ~(uint64_t)3; + uint32_t bytes = 4 - (pos & 3); + uint32_t shift = (pos & 3) * 8; + uint32_t mask = 0xffffffff << shift; + + if (len < bytes) { + mask &= 0xffffffff >> (bytes - len) * 8; + bytes = len; + } + + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); + WREG32(mmMM_INDEX_HI, aligned_pos >> 31); + if (!write || mask != 0xffffffff) + value = RREG32(mmMM_DATA); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + WREG32(mmMM_DATA, value); + } + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + if (!write) { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + + ret += bytes; + buf = (uint8_t *)buf + bytes; + pos += bytes; + len -= bytes; + if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) { + ++nodes; + pos = (nodes->start << PAGE_SHIFT); + } + } + + return ret; +} + static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, @@ -1090,11 +1235,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, + .access_memory = &amdgpu_ttm_access_memory }; int amdgpu_ttm_init(struct amdgpu_device *adev) { + uint64_t gtt_size; int r; + u64 vis_vram_limit; r = amdgpu_ttm_global_init(adev); if (r) { @@ -1118,36 +1266,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + + /* Reduce size of CPU-visible VRAM if requested */ + vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; + if (amdgpu_vis_vram_limit > 0 && + vis_vram_limit <= adev->mc.visible_vram_size) + adev->mc.visible_vram_size = vis_vram_limit; + /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); - r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->stollen_vga_memory); - if (r) { - return r; - } - r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); + r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->stolen_vga_memory, + NULL, NULL); if (r) return r; - r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); - amdgpu_bo_unreserve(adev->stollen_vga_memory); - if (r) { - amdgpu_bo_unref(&adev->stollen_vga_memory); - return r; - } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); - r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, - adev->mc.gtt_size >> PAGE_SHIFT); + + if (amdgpu_gtt_size == -1) + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + adev->mc.mc_vram_size); + else + gtt_size = (uint64_t)amdgpu_gtt_size << 20; + r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); return r; } DRM_INFO("amdgpu: %uM of GTT memory ready.\n", - (unsigned)(adev->mc.gtt_size / (1024 * 1024))); + (unsigned)(gtt_size / (1024 * 1024))); adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; @@ -1203,13 +1352,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (!adev->mman.initialized) return; amdgpu_ttm_debugfs_fini(adev); - if (adev->stollen_vga_memory) { - r = amdgpu_bo_reserve(adev->stollen_vga_memory, true); + if (adev->stolen_vga_memory) { + r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); if (r == 0) { - amdgpu_bo_unpin(adev->stollen_vga_memory); - amdgpu_bo_unreserve(adev->stollen_vga_memory); + amdgpu_bo_unpin(adev->stolen_vga_memory); + amdgpu_bo_unreserve(adev->stolen_vga_memory); } - amdgpu_bo_unref(&adev->stollen_vga_memory); + amdgpu_bo_unref(&adev->stolen_vga_memory); } ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); @@ -1256,12 +1405,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) return ttm_bo_mmap(filp, vma, &adev->mman.bdev); } -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +static int amdgpu_map_buffer(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem, unsigned num_pages, + uint64_t offset, unsigned window, + struct amdgpu_ring *ring, + uint64_t *addr) +{ + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; + struct amdgpu_device *adev = ring->adev; + struct ttm_tt *ttm = bo->ttm; + struct amdgpu_job *job; + unsigned num_dw, num_bytes; + dma_addr_t *dma_address; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + uint64_t flags; + int r; + + BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < + AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + + *addr = adev->mc.gart_start; + *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE; + + num_dw = adev->mman.buffer_funcs->copy_num_dw; + while (num_dw & 0x7) + num_dw++; + + num_bytes = num_pages * 8; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); + if (r) + return r; + + src_addr = num_dw * 4; + src_addr += job->ibs[0].gpu_addr; + + dst_addr = adev->gart.table_addr; + dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8; + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, + dst_addr, num_bytes); + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + dma_address = >t->ttm.dma_address[offset >> PAGE_SHIFT]; + flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem); + r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, + &job->ibs[0].ptr[num_dw]); + if (r) + goto error_free; + + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) + goto error_free; + + dma_fence_put(fence); + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit) + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1283,6 +1497,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, if (r) return r; + job->vm_needs_flush = vm_needs_flush; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, AMDGPU_FENCE_OWNER_UNDEFINED); @@ -1327,11 +1542,12 @@ error_free: } int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -1347,6 +1563,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } + if (bo->tbo.mem.mem_type == TTM_PL_TT) { + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (r) + return r; + } + num_pages = bo->tbo.num_pages; mm_node = bo->tbo.mem.mm_node; num_loops = 0; @@ -1357,7 +1579,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, num_pages -= mm_node->size; ++mm_node; } - num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* 10 double words for each SDMA_OP_PTEPDE cmd */ + num_dw = num_loops * 10; /* for IB padding */ num_dw += 64; @@ -1382,16 +1606,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint64_t dst_addr; - r = amdgpu_mm_node_addr(&bo->tbo, mm_node, - &bo->tbo.mem, &dst_addr); - if (r) - return r; + WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); + dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); while (byte_count) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, - dst_addr, cur_size_in_bytes); + amdgpu_vm_set_pte_pde(adev, &job->ibs[0], + dst_addr, 0, + cur_size_in_bytes >> 3, 0, + src_data); dst_addr += cur_size_in_bytes; byte_count -= cur_size_in_bytes; @@ -1417,32 +1641,16 @@ error_free: #if defined(CONFIG_DEBUG_FS) -extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager - *man); static int amdgpu_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; unsigned ttm_pl = *(int *)node->info_ent->data; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; - struct ttm_bo_global *glob = adev->mman.bdev.glob; + struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl]; struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&glob->lru_lock); - drm_mm_print(mm, &p); - spin_unlock(&glob->lru_lock); - switch (ttm_pl) { - case TTM_PL_VRAM: - seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", - adev->mman.bdev.man[ttm_pl].size, - (u64)atomic64_read(&adev->vram_usage) >> 20, - (u64)atomic64_read(&adev->vram_vis_usage) >> 20); - break; - case TTM_PL_TT: - amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]); - break; - } + man->func->debug(man, &p); return 0; } @@ -1574,7 +1782,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) adev, &amdgpu_ttm_gtt_fops); if (IS_ERR(ent)) return PTR_ERR(ent); - i_size_write(ent->d_inode, adev->mc.gtt_size); + i_size_write(ent->d_inode, adev->mc.gart_size); adev->mman.gtt = ent; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6bdede8ff12b..f22a4758719d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,6 +34,9 @@ #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) +#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 +#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 + struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; struct drm_global_reference mem_global_ref; @@ -49,6 +52,8 @@ struct amdgpu_mman { /* buffer handling */ const struct amdgpu_buffer_funcs *buffer_funcs; struct amdgpu_ring *buffer_funcs_ring; + + struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ struct amd_sched_entity entity; }; @@ -56,24 +61,29 @@ struct amdgpu_mman { extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; +bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem); int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem); +uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); + +uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, +int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, + uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct dma_fence **fence, bool direct_submit); + struct dma_fence **fence, bool direct_submit, + bool vm_needs_flush); int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, + uint64_t src_data, struct reservation_object *resv, struct dma_fence **fence); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 4f50eeb65855..36c763310df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) else return AMDGPU_FW_LOAD_PSP; case CHIP_RAVEN: -#if 0 - if (!load_type) + if (load_type != 2) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; -#else - return AMDGPU_FW_LOAD_DIRECT; -#endif default: DRM_ERROR("Unknow firmware load type\n"); } @@ -362,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, (le32_to_cpu(header->jt_offset) * 4); memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); - ucode->ucode_size += le32_to_cpu(header->jt_size) * 4; - return 0; } @@ -377,10 +371,15 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) struct amdgpu_firmware_info *ucode = NULL; const struct common_firmware_header *header = NULL; + if (!adev->firmware.fw_size) { + dev_warn(adev->dev, "No ip firmware need to load\n"); + return 0; + } + err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, bo); + NULL, NULL, 0, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); goto failed; @@ -459,6 +458,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) int i; struct amdgpu_firmware_info *ucode = NULL; + if (!adev->firmware.fw_size) + return 0; + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2ca09f111f08..e19928dae8e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -588,6 +588,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, } break; + case 8: /* MJPEG */ + min_dpb_size = 0; + break; + case 16: /* H265 */ image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; image_size = ALIGN(image_size, 256); @@ -1051,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -1101,7 +1105,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b692ad402252..c855366521ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r, timeout = adev->usec_timeout; - /* workaround VCE ring test slow issue for sriov*/ + /* skip ring test for sriov*/ if (amdgpu_sriov_vf(adev)) - timeout *= 10; + return 0; r = amdgpu_ring_alloc(ring, 16); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 09190fadd228..041e0121590c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (fences == 0) { if (adev->pm.dpm_enabled) { + /* might be used when with pg/cg amdgpu_dpm_enable_uvd(adev, false); - } else { - amdgpu_asic_set_uvd_clocks(adev, 0, 0); + */ } } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); @@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); - if (set_clocks) { - if (adev->pm.dpm_enabled) { - amdgpu_dpm_enable_uvd(adev, true); - } else { - amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - } + if (set_clocks && adev->pm.dpm_enabled) { + /* might be used when with pg/cg + amdgpu_dpm_enable_uvd(adev, true); + */ } } @@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; @@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); + NULL, NULL, 0, &bo); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c new file mode 100644 index 000000000000..45ac91861965 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c @@ -0,0 +1,85 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_vf_error.h" +#include "mxgpu_ai.h" + +#define AMDGPU_VF_ERROR_ENTRY_SIZE 16 + +/* struct error_entry - amdgpu VF error information. */ +struct amdgpu_vf_error_buffer { + int read_count; + int write_count; + uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE]; + uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; +}; + +struct amdgpu_vf_error_buffer admgpu_vf_errors; + + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data) +{ + int index; + uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code); + + index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + admgpu_vf_errors.code [index] = error_code; + admgpu_vf_errors.flags [index] = error_flags; + admgpu_vf_errors.data [index] = error_data; + admgpu_vf_errors.write_count ++; +} + + +void amdgpu_vf_error_trans_all(struct amdgpu_device *adev) +{ + /* u32 pf2vf_flags = 0; */ + u32 data1, data2, data3; + int index; + + if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) { + return; + } +/* + TODO: Enable these code when pv2vf_info is merged + AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags); + if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) { + return; + } +*/ + /* The errors are overlay of array, correct read_count as full. */ + if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) { + admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE; + } + + while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) { + index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE; + data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]); + data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF; + data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF; + + adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3); + admgpu_vf_errors.read_count ++; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h new file mode 100644 index 000000000000..2a3278ec76ba --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h @@ -0,0 +1,62 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VF_ERROR_H__ +#define __VF_ERROR_H__ + +#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF)) +#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF)) + +/* Please keep enum same as AMD GIM driver */ +enum AMDGIM_ERROR_VF { + AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0, + AMDGIM_ERROR_VF_NO_VBIOS, + AMDGIM_ERROR_VF_GPU_POST_ERROR, + AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, + AMDGIM_ERROR_VF_FENCE_INIT_FAIL, + + AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, + AMDGIM_ERROR_VF_IB_INIT_FAIL, + AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, + AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, + AMDGIM_ERROR_VF_GPU_RESET_FAIL, + + AMDGIM_ERROR_VF_TEST, + AMDGIM_ERROR_VF_MAX +}; + +enum AMDGIM_ERROR_CATEGORY { + AMDGIM_ERROR_CATEGORY_NON_USED = 0, + AMDGIM_ERROR_CATEGORY_GIM, + AMDGIM_ERROR_CATEGORY_PF, + AMDGIM_ERROR_CATEGORY_VF, + AMDGIM_ERROR_CATEGORY_VBIOS, + AMDGIM_ERROR_CATEGORY_MONITOR, + + AMDGIM_ERROR_CATEGORY_MAX +}; + +void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data); +void amdgpu_vf_error_trans_all (struct amdgpu_device *adev); + +#endif /* __VF_ERROR_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 8a081e162d13..ab05121b9272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) * address within META_DATA init package to support SRIOV gfx preemption. */ -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va) { - int r; - struct amdgpu_bo_va *bo_va; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; struct ttm_validate_buffer csa_tv; + int r; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); @@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) return r; } - bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); - if (!bo_va) { + *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + if (!*bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); return -ENOMEM; } - r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, - AMDGPU_CSA_SIZE); + r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, + AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, - AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | - AMDGPU_PTE_EXECUTABLE); + r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); - amdgpu_vm_bo_rmv(adev, bo_va); + amdgpu_vm_bo_rmv(adev, *bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } - vm->csa_bo_va = bo_va; ttm_eu_backoff_reservation(&ticket, &list); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 9e1062edb76e..afcfb8bcfb65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -43,6 +43,7 @@ struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); int (*reset_gpu)(struct amdgpu_device *adev); + void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); }; /* GPU virtualization */ @@ -89,7 +90,8 @@ static inline bool is_virtual_machine(void) struct amdgpu_vm; int amdgpu_allocate_static_csa(struct amdgpu_device *adev); -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5795f81369f0..6b1343e5541d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -77,8 +77,6 @@ struct amdgpu_pte_update_params { void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); - /* indicate update pt or its shadow */ - bool shadow; /* The next two are used during VM update by CPU * DMA addresses to use for mapping * Kernel pointer of PD/PT BO that needs to be updated @@ -161,11 +159,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, */ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, int (*validate)(void *, struct amdgpu_bo *), - void *param) + void *param, bool use_cpu_for_update, + struct ttm_bo_global *glob) { unsigned i; int r; + if (parent->bo->shadow) { + struct amdgpu_bo *shadow = parent->bo->shadow; + + r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); + if (r) + return r; + } + + if (use_cpu_for_update) { + r = amdgpu_bo_kmap(parent->bo, NULL); + if (r) + return r; + } + if (!parent->entries) return 0; @@ -179,11 +192,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, if (r) return r; + spin_lock(&glob->lru_lock); + ttm_bo_move_to_lru_tail(&entry->bo->tbo); + if (entry->bo->shadow) + ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo); + spin_unlock(&glob->lru_lock); + /* * Recurse into the sub directory. This is harmless because we * have only a maximum of 5 layers. */ - r = amdgpu_vm_validate_level(entry, validate, param); + r = amdgpu_vm_validate_level(entry, validate, param, + use_cpu_for_update, glob); if (r) return r; } @@ -214,54 +234,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (num_evictions == vm->last_eviction_counter) return 0; - return amdgpu_vm_validate_level(&vm->root, validate, param); -} - -/** - * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent) -{ - unsigned i; - - if (!parent->entries) - return; - - for (i = 0; i <= parent->last_entry_used; ++i) { - struct amdgpu_vm_pt *entry = &parent->entries[i]; - - if (!entry->bo) - continue; - - ttm_bo_move_to_lru_tail(&entry->bo->tbo); - amdgpu_vm_move_level_in_lru(entry); - } + return amdgpu_vm_validate_level(&vm->root, validate, param, + vm->use_cpu_for_update, + adev->mman.bdev.glob); } /** - * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail - * - * @adev: amdgpu device instance - * @vm: vm providing the BOs - * - * Move the PT BOs to the tail of the LRU. - */ -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ - struct ttm_bo_global *glob = adev->mman.bdev.glob; - - spin_lock(&glob->lru_lock); - amdgpu_vm_move_level_in_lru(&vm->root); - spin_unlock(&glob->lru_lock); -} - - /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * * @adev: amdgpu_device pointer @@ -282,6 +260,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned pt_idx, from, to; int r; u64 flags; + uint64_t init_value = 0; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); @@ -315,6 +294,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); + if (vm->pte_support_ats) { + init_value = AMDGPU_PTE_SYSTEM; + if (level != adev->vm_manager.num_level - 1) + init_value |= AMDGPU_PDE_PTE; + } + /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.bo->tbo.resv; @@ -327,10 +312,18 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, resv, &pt); + NULL, resv, init_value, &pt); if (r) return r; + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(pt, NULL); + if (r) { + amdgpu_bo_unref(&pt); + return r; + } + } + /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ @@ -424,7 +417,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, struct dma_fence *updates = sync->last_vm_update; int r = 0; struct dma_fence *flushed, *tmp; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; flushed = id->flushed_updates; if ((amdgpu_vm_had_gpu_reset(adev, id)) || @@ -545,11 +538,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } kfree(fences); - job->vm_needs_flush = false; + job->vm_needs_flush = vm->use_cpu_for_update; /* Check if we can use a VMID already assigned to this VM */ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { struct dma_fence *flushed; - bool needs_flush = false; + bool needs_flush = vm->use_cpu_for_update; /* Check all the prerequisites to using this VMID */ if (amdgpu_vm_had_gpu_reset(adev, id)) @@ -745,7 +738,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -767,12 +760,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) vm_flush_needed = true; } - if (!vm_flush_needed && !gds_switch_needed) + if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) return 0; if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); + if (need_pipe_sync) + amdgpu_ring_emit_pipeline_sync(ring); + if (ring->funcs->emit_vm_flush && vm_flush_needed) { struct dma_fence *fence; @@ -874,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, { struct amdgpu_bo_va *bo_va; - list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->vm == vm) { + list_for_each_entry(bo_va, &bo->va, base.bo_list) { + if (bo_va->base.vm == vm) { return bo_va; } } @@ -981,6 +977,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, unsigned int i; uint64_t value; + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + for (i = 0; i < count; i++) { value = params->pages_addr ? amdgpu_vm_map_gart(params->pages_addr, addr) : @@ -989,19 +987,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, i, value, flags); addr += incr; } - - /* Flush HDP */ - mb(); - amdgpu_gart_flush_gpu_tlb(params->adev, 0); } -static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo) +static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, + void *owner) { struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1042,23 +1037,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, params.adev = adev; shadow = parent->bo->shadow; - WARN_ON(vm->use_cpu_for_update && shadow); - if (vm->use_cpu_for_update && !shadow) { - r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr); - if (r) - return r; - r = amdgpu_vm_bo_wait(adev, parent->bo); - if (unlikely(r)) { - amdgpu_bo_kunmap(parent->bo); + if (vm->use_cpu_for_update) { + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); + if (unlikely(r)) return r; - } + params.func = amdgpu_vm_cpu_set_ptes; } else { - if (shadow) { - r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); - if (r) - return r; - } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); @@ -1094,21 +1080,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, if (bo == NULL) continue; - if (bo->shadow) { - struct amdgpu_bo *pt_shadow = bo->shadow; - - r = amdgpu_ttm_bind(&pt_shadow->tbo, - &pt_shadow->tbo.mem); - if (r) - return r; - } - pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_gart_get_vm_pde(adev, pt); - if (parent->entries[pt_idx].addr == pt) + /* Don't update huge pages here */ + if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) || + parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID)) continue; - parent->entries[pt_idx].addr = pt; + parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || @@ -1146,28 +1125,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, count, incr, AMDGPU_PTE_VALID); } - if (params.func == amdgpu_vm_cpu_set_ptes) - amdgpu_bo_kunmap(parent->bo); - else if (params.ib->length_dw == 0) { - amdgpu_job_free(job); - } else { - amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, - AMDGPU_FENCE_OWNER_VM); - if (shadow) - amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, + if (!vm->use_cpu_for_update) { + if (params.ib->length_dw == 0) { + amdgpu_job_free(job); + } else { + amdgpu_ring_pad_ib(ring, params.ib); + amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv, AMDGPU_FENCE_OWNER_VM); + if (shadow) + amdgpu_sync_resv(adev, &job->sync, + shadow->tbo.resv, + AMDGPU_FENCE_OWNER_VM); + + WARN_ON(params.ib->length_dw > ndw); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); + if (r) + goto error_free; - WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); - if (r) - goto error_free; - - amdgpu_bo_fence(parent->bo, fence, true); - dma_fence_put(vm->last_dir_update); - vm->last_dir_update = dma_fence_get(fence); - dma_fence_put(fence); + amdgpu_bo_fence(parent->bo, fence, true); + dma_fence_put(vm->last_dir_update); + vm->last_dir_update = dma_fence_get(fence); + dma_fence_put(fence); + } } /* * Recurse into the subdirectories. This recursion is harmless because @@ -1235,33 +1215,98 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, if (r) amdgpu_vm_invalidate_level(&vm->root); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return r; } /** - * amdgpu_vm_find_pt - find the page table for an address + * amdgpu_vm_find_entry - find the entry for an address * * @p: see amdgpu_pte_update_params definition * @addr: virtual address in question + * @entry: resulting entry or NULL + * @parent: parent entry * - * Find the page table BO for a virtual address, return NULL when none found. + * Find the vm_pt entry and it's parent for the given address. */ -static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, - uint64_t addr) +void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, + struct amdgpu_vm_pt **entry, + struct amdgpu_vm_pt **parent) { - struct amdgpu_vm_pt *entry = &p->vm->root; unsigned idx, level = p->adev->vm_manager.num_level; - while (entry->entries) { + *parent = NULL; + *entry = &p->vm->root; + while ((*entry)->entries) { idx = addr >> (p->adev->vm_manager.block_size * level--); - idx %= amdgpu_bo_size(entry->bo) / 8; - entry = &entry->entries[idx]; + idx %= amdgpu_bo_size((*entry)->bo) / 8; + *parent = *entry; + *entry = &(*entry)->entries[idx]; } if (level) - return NULL; + *entry = NULL; +} + +/** + * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages + * + * @p: see amdgpu_pte_update_params definition + * @entry: vm_pt entry to check + * @parent: parent entry + * @nptes: number of PTEs updated with this operation + * @dst: destination address where the PTEs should point to + * @flags: access flags fro the PTEs + * + * Check if we can update the PD with a huge page. + */ +static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, + struct amdgpu_vm_pt *entry, + struct amdgpu_vm_pt *parent, + unsigned nptes, uint64_t dst, + uint64_t flags) +{ + bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes); + uint64_t pd_addr, pde; + + /* In the case of a mixed PT the PDE must point to it*/ + if (p->adev->asic_type < CHIP_VEGA10 || + nptes != AMDGPU_VM_PTE_COUNT(p->adev) || + p->func == amdgpu_vm_do_copy_ptes || + !(flags & AMDGPU_PTE_VALID)) { + + dst = amdgpu_bo_gpu_offset(entry->bo); + dst = amdgpu_gart_get_vm_pde(p->adev, dst); + flags = AMDGPU_PTE_VALID; + } else { + /* Set the huge page flag to stop scanning at this PDE */ + flags |= AMDGPU_PDE_PTE; + } + + if (entry->addr == (dst | flags)) + return; + + entry->addr = (dst | flags); - return entry->bo; + if (use_cpu_update) { + pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags); + } else { + if (parent->bo->shadow) { + pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } + pd_addr = amdgpu_bo_gpu_offset(parent->bo); + pde = pd_addr + (entry - parent->entries) * 8; + amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags); + } } /** @@ -1287,49 +1332,44 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t addr, pe_start; struct amdgpu_bo *pt; unsigned nptes; - int r; bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes); - /* walk over the address space and update the page tables */ - for (addr = start; addr < end; addr += nptes) { - pt = amdgpu_vm_get_pt(params, addr); - if (!pt) { - pr_err("PT not found, aborting update_ptes\n"); - return -EINVAL; - } + for (addr = start; addr < end; addr += nptes, + dst += nptes * AMDGPU_GPU_PAGE_SIZE) { + struct amdgpu_vm_pt *entry, *parent; - if (params->shadow) { - if (WARN_ONCE(use_cpu_update, - "CPU VM update doesn't suuport shadow pages")) - return 0; - - if (!pt->shadow) - return 0; - pt = pt->shadow; - } + amdgpu_vm_get_entry(params, addr, &entry, &parent); + if (!entry) + return -ENOENT; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); + amdgpu_vm_handle_huge_pages(params, entry, parent, + nptes, dst, flags); + /* We don't need to update PTEs for huge pages */ + if (entry->addr & AMDGPU_PDE_PTE) + continue; + + pt = entry->bo; if (use_cpu_update) { - r = amdgpu_bo_kmap(pt, (void *)&pe_start); - if (r) - return r; - } else + pe_start = (unsigned long)amdgpu_bo_kptr(pt); + } else { + if (pt->shadow) { + pe_start = amdgpu_bo_gpu_offset(pt->shadow); + pe_start += (addr & mask) * 8; + params->func(params, pe_start, dst, nptes, + AMDGPU_GPU_PAGE_SIZE, flags); + } pe_start = amdgpu_bo_gpu_offset(pt); + } pe_start += (addr & mask) * 8; - params->func(params, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); - - dst += nptes * AMDGPU_GPU_PAGE_SIZE; - - if (use_cpu_update) - amdgpu_bo_kunmap(pt); } return 0; @@ -1370,10 +1410,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. */ - - /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); - uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + unsigned pages_per_frag = params->adev->vm_manager.fragment_size; + uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag); + uint64_t frag_align = 1 << pages_per_frag; uint64_t frag_start = ALIGN(start, frag_align); uint64_t frag_end = end & ~(frag_align - 1); @@ -1445,6 +1484,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.vm = vm; params.src = src; + /* sync to everything on unmapping */ + if (!(flags & AMDGPU_PTE_VALID)) + owner = AMDGPU_FENCE_OWNER_UNDEFINED; + if (vm->use_cpu_for_update) { /* params.src is used as flag to indicate system Memory */ if (pages_addr) @@ -1453,23 +1496,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* Wait for PT BOs to be free. PTs share the same resv. object * as the root PD BO */ - r = amdgpu_vm_bo_wait(adev, vm->root.bo); + r = amdgpu_vm_wait_pd(adev, vm, owner); if (unlikely(r)) return r; params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; - params.shadow = false; return amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - /* sync to everything on unmapping */ - if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; - nptes = last - start + 1; /* @@ -1481,6 +1519,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; + /* one PDE write for each huge page */ + ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6; + if (src) { /* only copy commands needed */ ndw += ncmds * 7; @@ -1542,11 +1583,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - params.shadow = true; - r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); - if (r) - goto error_free; - params.shadow = false; r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags); if (r) goto error_free; @@ -1565,6 +1601,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); + amdgpu_vm_invalidate_level(&vm->root); return r; } @@ -1687,7 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint64_t gtt_flags, flags; @@ -1696,27 +1734,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct dma_fence *exclusive; int r; - if (clear || !bo_va->bo) { + if (clear || !bo_va->base.bo) { mem = NULL; nodes = NULL; exclusive = NULL; } else { struct ttm_dma_tt *ttm; - mem = &bo_va->bo->tbo.mem; + mem = &bo_va->base.bo->tbo.mem; nodes = mem->mm_node; if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo_va->bo->tbo.ttm, struct - ttm_dma_tt, ttm); + ttm = container_of(bo_va->base.bo->tbo.ttm, + struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); + exclusive = reservation_object_get_excl(bo->tbo.resv); } - if (bo_va->bo) { - flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && - adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? + if (bo) { + flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); + gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) && + adev == amdgpu_ttm_adev(bo->tbo.bdev)) ? flags : 0; } else { flags = 0x0; @@ -1724,7 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } spin_lock(&vm->status_lock); - if (!list_empty(&bo_va->vm_status)) + if (!list_empty(&bo_va->base.vm_status)) list_splice_init(&bo_va->valids, &bo_va->invalids); spin_unlock(&vm->status_lock); @@ -1747,11 +1785,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); - list_del_init(&bo_va->vm_status); + list_del_init(&bo_va->base.vm_status); if (clear) - list_add(&bo_va->vm_status, &vm->cleared); + list_add(&bo_va->base.vm_status, &vm->cleared); spin_unlock(&vm->status_lock); + if (vm->use_cpu_for_update) { + /* Flush HDP */ + mb(); + amdgpu_gart_flush_gpu_tlb(adev, 0); + } + return 0; } @@ -1905,15 +1949,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct dma_fence *f = NULL; int r; + uint64_t init_pte_value = 0; while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); + if (vm->pte_support_ats) + init_pte_value = AMDGPU_PTE_SYSTEM; + r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, mapping->start, mapping->last, - 0, 0, &f); + init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -1933,26 +1981,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, } /** - * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT + * amdgpu_vm_clear_moved - clear moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm * - * Make sure all invalidated BOs are cleared in the PT. + * Make sure all moved BOs are cleared in the PT. * Returns 0 for success. * * PTs have to be reserved and mutex must be locked! */ -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, - struct amdgpu_vm *vm, struct amdgpu_sync *sync) +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = NULL; int r = 0; spin_lock(&vm->status_lock); - while (!list_empty(&vm->invalidated)) { - bo_va = list_first_entry(&vm->invalidated, - struct amdgpu_bo_va, vm_status); + while (!list_empty(&vm->moved)) { + bo_va = list_first_entry(&vm->moved, + struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); r = amdgpu_vm_bo_update(adev, bo_va, true); @@ -1992,16 +2040,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, if (bo_va == NULL) { return NULL; } - bo_va->vm = vm; - bo_va->bo = bo; + bo_va->base.vm = vm; + bo_va->base.bo = bo; + INIT_LIST_HEAD(&bo_va->base.bo_list); + INIT_LIST_HEAD(&bo_va->base.vm_status); + bo_va->ref_count = 1; - INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - INIT_LIST_HEAD(&bo_va->vm_status); if (bo) - list_add_tail(&bo_va->bo_list, &bo->va); + list_add_tail(&bo_va->base.bo_list, &bo->va); return bo_va; } @@ -2026,7 +2075,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping, *tmp; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; /* validate the parameters */ @@ -2037,7 +2087,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2047,7 +2097,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " - "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, + "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, tmp->start, tmp->last + 1); return -EINVAL; } @@ -2092,7 +2142,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_bo *bo = bo_va->base.bo; + struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; int r; @@ -2104,7 +2155,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || - (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) + (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; /* Allocate all the needed memory */ @@ -2112,7 +2163,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, if (!mapping) return -ENOMEM; - r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); + r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); if (r) { kfree(mapping); return r; @@ -2152,7 +2203,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, uint64_t saddr) { struct amdgpu_bo_va_mapping *mapping; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -2300,12 +2351,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va) { struct amdgpu_bo_va_mapping *mapping, *next; - struct amdgpu_vm *vm = bo_va->vm; + struct amdgpu_vm *vm = bo_va->base.vm; - list_del(&bo_va->bo_list); + list_del(&bo_va->base.bo_list); spin_lock(&vm->status_lock); - list_del(&bo_va->vm_status); + list_del(&bo_va->base.vm_status); spin_unlock(&vm->status_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { @@ -2337,13 +2388,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo) { - struct amdgpu_bo_va *bo_va; + struct amdgpu_vm_bo_base *bo_base; - list_for_each_entry(bo_va, &bo->va, bo_list) { - spin_lock(&bo_va->vm->status_lock); - if (list_empty(&bo_va->vm_status)) - list_add(&bo_va->vm_status, &bo_va->vm->invalidated); - spin_unlock(&bo_va->vm->status_lock); + list_for_each_entry(bo_base, &bo->va, bo_list) { + spin_lock(&bo_base->vm->status_lock); + if (list_empty(&bo_base->vm_status)) + list_add(&bo_base->vm_status, + &bo_base->vm->moved); + spin_unlock(&bo_base->vm->status_lock); } } @@ -2361,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) } /** - * amdgpu_vm_adjust_size - adjust vm size and block size + * amdgpu_vm_set_fragment_size - adjust fragment size in PTE + * + * @adev: amdgpu_device pointer + * @fragment_size_default: the default fragment size if it's set auto + */ +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default) +{ + if (amdgpu_vm_fragment_size == -1) + adev->vm_manager.fragment_size = fragment_size_default; + else + adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; +} + +/** + * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer * @vm_size: the default vm size if it's set auto */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default) { /* adjust vm size firstly */ if (amdgpu_vm_size == -1) @@ -2381,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) else adev->vm_manager.block_size = amdgpu_vm_block_size; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, adev->vm_manager.block_size); + amdgpu_vm_set_fragment_size(adev, fragment_size_default); + + DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n", + adev->vm_manager.vm_size, adev->vm_manager.block_size, + adev->vm_manager.fragment_size); } /** @@ -2404,13 +2473,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amd_sched_rq *rq; int r, i; u64 flags; + uint64_t init_pde_value = 0; vm->va = RB_ROOT; vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); - INIT_LIST_HEAD(&vm->invalidated); + INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); @@ -2425,10 +2495,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) + vm->pte_support_ats = false; + + if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - else + + if (adev->asic_type == CHIP_RAVEN) { + vm->pte_support_ats = true; + init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; + } + } else vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); DRM_DEBUG_DRIVER("VM update mode is %s\n", @@ -2448,7 +2525,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, - NULL, NULL, &vm->root.bo); + NULL, NULL, init_pde_value, &vm->root.bo); if (r) goto error_free_sched_entity; @@ -2457,6 +2534,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_root; vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + + if (vm->use_cpu_for_update) { + r = amdgpu_bo_kmap(vm->root.bo, NULL); + if (r) + goto error_free_root; + } + amdgpu_bo_unreserve(vm->root.bo); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 936f158bc5ec..ba6691b58ee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -50,9 +50,6 @@ struct amdgpu_bo_list_entry; /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -/* LOG2 number of continuous pages for the fragment field */ -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 - #define AMDGPU_PTE_VALID (1ULL << 0) #define AMDGPU_PTE_SYSTEM (1ULL << 1) #define AMDGPU_PTE_SNOOPED (1ULL << 2) @@ -68,6 +65,9 @@ struct amdgpu_bo_list_entry; /* TILED for VEGA10, reserved for older ASICs */ #define AMDGPU_PTE_PRT (1ULL << 51) +/* PDE is handled as PTE for VEGA10 */ +#define AMDGPU_PDE_PTE (1ULL << 54) + /* VEGA10 only */ #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) @@ -94,6 +94,18 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) +/* base structure for tracking BO usage in a VM */ +struct amdgpu_vm_bo_base { + /* constant after initialization */ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; + + /* protected by bo being reserved */ + struct list_head bo_list; + + /* protected by spinlock */ + struct list_head vm_status; +}; struct amdgpu_vm_pt { struct amdgpu_bo *bo; @@ -112,7 +124,7 @@ struct amdgpu_vm { spinlock_t status_lock; /* BOs moved, but not yet updated in the PT */ - struct list_head invalidated; + struct list_head moved; /* BOs cleared in the PT because of a move */ struct list_head cleared; @@ -135,11 +147,12 @@ struct amdgpu_vm { u64 client_id; /* dedicated to vm */ struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS]; - /* each VM will map on CSA */ - struct amdgpu_bo_va *csa_bo_va; /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ bool use_cpu_for_update; + + /* Flag to indicate ATS support from PTE for GFX9 */ + bool pte_support_ats; }; struct amdgpu_vm_id { @@ -182,6 +195,7 @@ struct amdgpu_vm_manager { uint32_t num_level; uint64_t vm_size; uint32_t block_size; + uint32_t fragment_size; /* vram base address for page table entry */ u64 vram_base_offset; /* vm pte handling */ @@ -214,15 +228,13 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); -void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, - struct amdgpu_vm *vm); int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev); @@ -231,8 +243,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); -int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_sync *sync); +int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear); @@ -259,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, uint64_t saddr, uint64_t size); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); +void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, + uint32_t fragment_size_default); +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, + uint32_t fragment_size_default); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a2c59a08b2bd..26e900627971 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -28,6 +28,8 @@ struct amdgpu_vram_mgr { struct drm_mm mm; spinlock_t lock; + atomic64_t usage; + atomic64_t vis_usage; }; /** @@ -79,6 +81,27 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) } /** + * amdgpu_vram_mgr_vis_size - Calculate visible node size + * + * @adev: amdgpu device structure + * @node: MM node structure + * + * Calculate how many bytes of the MM node are inside visible VRAM + */ +static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, + struct drm_mm_node *node) +{ + uint64_t start = node->start << PAGE_SHIFT; + uint64_t end = (node->size + node->start) << PAGE_SHIFT; + + if (start >= adev->mc.visible_vram_size) + return 0; + + return (end > adev->mc.visible_vram_size ? + adev->mc.visible_vram_size : end) - start; +} + +/** * amdgpu_vram_mgr_new - allocate new ranges * * @man: TTM memory type manager @@ -93,11 +116,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm *mm = &mgr->mm; struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; + uint64_t usage = 0, vis_usage = 0; unsigned i; int r; @@ -142,6 +167,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (unlikely(r)) goto error; + usage += nodes[i].size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); + /* Calculate a virtual BO start address to easily check if * everything is CPU accessible. */ @@ -155,6 +183,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, } spin_unlock(&mgr->lock); + atomic64_add(usage, &mgr->usage); + atomic64_add(vis_usage, &mgr->vis_usage); + mem->mm_node = nodes; return 0; @@ -181,8 +212,10 @@ error: static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { + struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm_node *nodes = mem->mm_node; + uint64_t usage = 0, vis_usage = 0; unsigned pages = mem->num_pages; if (!mem->mm_node) @@ -192,31 +225,67 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, while (pages) { pages -= nodes->size; drm_mm_remove_node(nodes); + usage += nodes->size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); ++nodes; } spin_unlock(&mgr->lock); + atomic64_sub(usage, &mgr->usage); + atomic64_sub(vis_usage, &mgr->vis_usage); + kfree(mem->mm_node); mem->mm_node = NULL; } /** + * amdgpu_vram_mgr_usage - how many bytes are used in this domain + * + * @man: TTM memory type manager + * + * Returns how many bytes are used in this domain. + */ +uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_vram_mgr *mgr = man->priv; + + return atomic64_read(&mgr->usage); +} + +/** + * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part + * + * @man: TTM memory type manager + * + * Returns how many bytes are used in the visible part of VRAM + */ +uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man) +{ + struct amdgpu_vram_mgr *mgr = man->priv; + + return atomic64_read(&mgr->vis_usage); +} + +/** * amdgpu_vram_mgr_debug - dump VRAM table * * @man: TTM memory type manager - * @prefix: text prefix + * @printer: DRM printer to use * * Dump the table content using printk. */ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, - const char *prefix) + struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr = man->priv; - struct drm_printer p = drm_debug_printer(prefix); spin_lock(&mgr->lock); - drm_mm_print(&mgr->mm, &p); + drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); + + drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", + man->size, amdgpu_vram_mgr_usage(man) >> 20, + amdgpu_vram_mgr_vis_usage(man) >> 20); } const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 37a499ab30eb..567c4a5cf90c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_amdkfd_suspend(adev); - return cik_common_hw_fini(adev); } static int cik_common_resume(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = cik_common_hw_init(adev); - if (r) - return r; - - return amdgpu_amdkfd_resume(adev); + return cik_common_hw_init(adev); } static bool cik_common_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c216e16826c9..f508f4d01e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -342,6 +342,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev) } /** + * cik_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch (VI). + */ +static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); + if (enable) { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 0); + } + + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** * cik_sdma_enable - stop the async dma engines * * @adev: amdgpu_device pointer @@ -537,6 +594,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) /* halt the engine before programing */ cik_sdma_enable(adev, false); + /* enable sdma ring preemption */ + cik_ctx_switch_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -984,6 +1043,7 @@ static int cik_sdma_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cik_ctx_switch_enable(adev, false); cik_sdma_enable(adev, false); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9f78c03a2e31..4e519dc42916 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp, frame_count; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -1867,7 +1739,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, dce_v10_0_audio_write_sad_regs(encoder); dce_v10_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; @@ -2267,6 +2139,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; + u16 *r, *g, *b; int i; u32 tmp; @@ -2304,11 +2177,14 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; for (i = 0; i < 256; i++) { WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->lut_r[i] << 20) | - (amdgpu_crtc->lut_g[i] << 10) | - (amdgpu_crtc->lut_b[i] << 0)); + ((*r++ & 0xffc0) << 14) | + ((*g++ & 0xffc0) << 4) | + (*b++ >> 6)); } tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); @@ -2555,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2563,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2597,7 +2473,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2624,15 +2500,6 @@ static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int i; - - /* userspace palettes are always correct as is */ - for (i = 0; i < size; i++) { - amdgpu_crtc->lut_r[i] = red[i] >> 6; - amdgpu_crtc->lut_g[i] = green[i] >> 6; - amdgpu_crtc->lut_b[i] = blue[i] >> 6; - } dce_v10_0_crtc_load_lut(crtc); return 0; @@ -2844,14 +2711,12 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic, .prepare = dce_v10_0_crtc_prepare, .commit = dce_v10_0_crtc_commit, - .load_lut = dce_v10_0_crtc_load_lut, .disable = dce_v10_0_crtc_disable, }; static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) { struct amdgpu_crtc *amdgpu_crtc; - int i; amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); @@ -2869,12 +2734,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - for (i = 0; i < 256; i++) { - amdgpu_crtc->lut_r[i] = i << 2; - amdgpu_crtc->lut_g[i] = i << 2; - amdgpu_crtc->lut_b[i] = i << 2; - } - switch (amdgpu_crtc->crtc_id) { case 0: default: @@ -3025,6 +2884,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_init_golden_registers(adev); + /* disable vga render */ + dce_v10_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3737,7 +3598,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { - .set_vga_render_state = &dce_v10_0_set_vga_render_state, .bandwidth_update = &dce_v10_0_bandwidth_update, .vblank_get_counter = &dce_v10_0_vblank_get_counter, .vblank_wait = &dce_v10_0_vblank_wait, @@ -3750,8 +3610,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, .add_encoder = &dce_v10_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v10_0_stop_mc_access, - .resume_mc_access = &dce_v10_0_resume_mc_access, }; static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4bcf01dc567a..11edc75edaa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -1851,7 +1778,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, dce_v11_0_audio_write_sad_regs(encoder); dce_v11_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; @@ -2251,6 +2178,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; + u16 *r, *g, *b; int i; u32 tmp; @@ -2282,11 +2210,14 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; for (i = 0; i < 256; i++) { WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->lut_r[i] << 20) | - (amdgpu_crtc->lut_g[i] << 10) | - (amdgpu_crtc->lut_b[i] << 0)); + ((*r++ & 0xffc0) << 14) | + ((*g++ & 0xffc0) << 4) | + (*b++ >> 6)); } tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); @@ -2575,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2583,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2617,7 +2548,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2644,15 +2575,6 @@ static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int i; - - /* userspace palettes are always correct as is */ - for (i = 0; i < size; i++) { - amdgpu_crtc->lut_r[i] = red[i] >> 6; - amdgpu_crtc->lut_g[i] = green[i] >> 6; - amdgpu_crtc->lut_b[i] = blue[i] >> 6; - } dce_v11_0_crtc_load_lut(crtc); return 0; @@ -2892,14 +2814,12 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, .prepare = dce_v11_0_crtc_prepare, .commit = dce_v11_0_crtc_commit, - .load_lut = dce_v11_0_crtc_load_lut, .disable = dce_v11_0_crtc_disable, }; static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) { struct amdgpu_crtc *amdgpu_crtc; - int i; amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); @@ -2917,12 +2837,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - for (i = 0; i < 256; i++) { - amdgpu_crtc->lut_r[i] = i << 2; - amdgpu_crtc->lut_g[i] = i << 2; - amdgpu_crtc->lut_b[i] = i << 2; - } - switch (amdgpu_crtc->crtc_id) { case 0: default: @@ -3086,6 +3000,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_init_golden_registers(adev); + /* disable vga render */ + dce_v11_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); @@ -3806,7 +3722,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { - .set_vga_render_state = &dce_v11_0_set_vga_render_state, .bandwidth_update = &dce_v11_0_bandwidth_update, .vblank_get_counter = &dce_v11_0_vblank_get_counter, .vblank_wait = &dce_v11_0_vblank_wait, @@ -3819,8 +3734,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, .add_encoder = &dce_v11_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v11_0_stop_mc_access, - .resume_mc_access = &dce_v11_0_resume_mc_access, }; static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index fd134a4629d7..a51e35f824a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -42,6 +42,7 @@ #include "dce/dce_6_0_d.h" #include "dce/dce_6_0_sh_mask.h" #include "gca/gfx_7_2_enum.h" +#include "dce_v6_0.h" #include "si_enums.h" static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); @@ -392,117 +393,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) return mmDC_GPIO_HPD_A; } -static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc) -{ - if (crtc >= adev->mode_info.num_crtc) - return 0; - else - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); -} - -static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp, frame_count; - int i, j; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - WREG32(mmVGA_RENDER_CONTROL, 0); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK; - if (crtc_enabled) { - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - - if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) { - dce_v6_0_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK; - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = evergreen_get_vblank_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (evergreen_get_vblank_counter(adev, i) != frame_count) - break; - udelay(1); - } - - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i, j; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); - - /* unlock regs and wait for update */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 0) { - tmp &= ~0x7; - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) { - tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK; - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (tmp & 1) { - tmp &= ~1; - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0) - break; - udelay(1); - } - } - } - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); - -} - static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -1597,7 +1487,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, ssize_t err; u32 tmp; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; @@ -2182,6 +2072,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; + u16 *r, *g, *b; int i; DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); @@ -2211,11 +2102,14 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; for (i = 0; i < 256; i++) { WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->lut_r[i] << 20) | - (amdgpu_crtc->lut_g[i] << 10) | - (amdgpu_crtc->lut_b[i] << 0)); + ((*r++ & 0xffc0) << 14) | + ((*g++ & 0xffc0) << 4) | + (*b++ >> 6)); } WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, @@ -2428,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2436,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2470,7 +2364,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2496,15 +2390,6 @@ static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int i; - - /* userspace palettes are always correct as is */ - for (i = 0; i < size; i++) { - amdgpu_crtc->lut_r[i] = red[i] >> 6; - amdgpu_crtc->lut_g[i] = green[i] >> 6; - amdgpu_crtc->lut_b[i] = blue[i] >> 6; - } dce_v6_0_crtc_load_lut(crtc); return 0; @@ -2712,14 +2597,12 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic, .prepare = dce_v6_0_crtc_prepare, .commit = dce_v6_0_crtc_commit, - .load_lut = dce_v6_0_crtc_load_lut, .disable = dce_v6_0_crtc_disable, }; static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) { struct amdgpu_crtc *amdgpu_crtc; - int i; amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); @@ -2737,12 +2620,6 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - for (i = 0; i < 256; i++) { - amdgpu_crtc->lut_r[i] = i << 2; - amdgpu_crtc->lut_g[i] = i << 2; - amdgpu_crtc->lut_b[i] = i << 2; - } - amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; @@ -2873,6 +2750,8 @@ static int dce_v6_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v6_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3525,7 +3404,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { - .set_vga_render_state = &dce_v6_0_set_vga_render_state, .bandwidth_update = &dce_v6_0_bandwidth_update, .vblank_get_counter = &dce_v6_0_vblank_get_counter, .vblank_wait = &dce_v6_0_vblank_wait, @@ -3538,8 +3416,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, .add_encoder = &dce_v6_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v6_0_stop_mc_access, - .resume_mc_access = &dce_v6_0_resume_mc_access, }; static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a9e869554627..9cf14b8b2db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) return true; } -static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 crtc_enabled, tmp; - int i; - - save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); - - /* disable VGA render */ - tmp = RREG32(mmVGA_RENDER_CONTROL); - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - WREG32(mmVGA_RENDER_CONTROL, tmp); - - /* blank the display controllers */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), - CRTC_CONTROL, CRTC_MASTER_EN); - if (crtc_enabled) { -#if 1 - save->crtc_enabled[i] = true; - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - /*it is correct only for RGB ; black is 0*/ - WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); -#else - /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - save->crtc_enabled[i] = false; - /* ***** */ -#endif - } else { - save->crtc_enabled[i] = false; - } - } -} - -static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - u32 tmp; - int i; - - /* update crtc base addresses */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); - - if (save->crtc_enabled[i]) { - tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - mdelay(20); - } - - WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); - WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); - - /* Unlock vga access */ - WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); -} - static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, bool render) { @@ -1750,7 +1675,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, dce_v8_0_audio_write_sad_regs(encoder); dce_v8_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; @@ -2124,6 +2049,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; + u16 *r, *g, *b; int i; DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); @@ -2153,11 +2079,14 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + r = crtc->gamma_store; + g = r + crtc->gamma_size; + b = g + crtc->gamma_size; for (i = 0; i < 256; i++) { WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->lut_r[i] << 20) | - (amdgpu_crtc->lut_g[i] << 10) | - (amdgpu_crtc->lut_b[i] << 0)); + ((*r++ & 0xffc0) << 14) | + ((*g++ & 0xffc0) << 4) | + (*b++ >> 6)); } WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, @@ -2406,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, aobj = gem_to_amdgpu_bo(obj); ret = amdgpu_bo_reserve(aobj, false); if (ret != 0) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2414,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, amdgpu_bo_unreserve(aobj); if (ret) { DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -2448,7 +2377,7 @@ unpin: amdgpu_bo_unpin(aobj); amdgpu_bo_unreserve(aobj); } - drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); } amdgpu_crtc->cursor_bo = obj; @@ -2475,15 +2404,6 @@ static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int i; - - /* userspace palettes are always correct as is */ - for (i = 0; i < size; i++) { - amdgpu_crtc->lut_r[i] = red[i] >> 6; - amdgpu_crtc->lut_g[i] = green[i] >> 6; - amdgpu_crtc->lut_b[i] = blue[i] >> 6; - } dce_v8_0_crtc_load_lut(crtc); return 0; @@ -2702,14 +2622,12 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic, .prepare = dce_v8_0_crtc_prepare, .commit = dce_v8_0_crtc_commit, - .load_lut = dce_v8_0_crtc_load_lut, .disable = dce_v8_0_crtc_disable, }; static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) { struct amdgpu_crtc *amdgpu_crtc; - int i; amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); @@ -2727,12 +2645,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; - for (i = 0; i < 256; i++) { - amdgpu_crtc->lut_r[i] = i << 2; - amdgpu_crtc->lut_g[i] = i << 2; - amdgpu_crtc->lut_b[i] = i << 2; - } - amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; @@ -2870,6 +2782,8 @@ static int dce_v8_0_hw_init(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* disable vga render */ + dce_v8_0_set_vga_render_state(adev, false); /* init dig PHYs, disp eng pll */ amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); @@ -3574,7 +3488,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { - .set_vga_render_state = &dce_v8_0_set_vga_render_state, .bandwidth_update = &dce_v8_0_bandwidth_update, .vblank_get_counter = &dce_v8_0_vblank_get_counter, .vblank_wait = &dce_v8_0_vblank_wait, @@ -3587,8 +3500,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, .add_encoder = &dce_v8_0_encoder_add, .add_connector = &amdgpu_connector_add, - .stop_mc_access = &dce_v8_0_stop_mc_access, - .resume_mc_access = &dce_v8_0_resume_mc_access, }; static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 90bb08309a53..b9ee9073cb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -95,62 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) return 0; } -static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - dce_v6_0_disable_dce(adev); - break; -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KAVERI: - case CHIP_KABINI: - case CHIP_MULLINS: - dce_v8_0_disable_dce(adev); - break; -#endif - case CHIP_FIJI: - case CHIP_TONGA: - dce_v10_0_disable_dce(adev); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - dce_v11_0_disable_dce(adev); - break; - case CHIP_TOPAZ: -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_HAINAN: -#endif - /* no DCE */ - return; - default: - DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); - } - - return; -} -static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) -{ - return; -} - -static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, - bool render) -{ - return; -} - /** * dce_virtual_bandwidth_update - program display watermarks * @@ -168,16 +112,6 @@ static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx) { - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int i; - - /* userspace palettes are always correct as is */ - for (i = 0; i < size; i++) { - amdgpu_crtc->lut_r[i] = red[i] >> 6; - amdgpu_crtc->lut_g[i] = green[i] >> 6; - amdgpu_crtc->lut_b[i] = blue[i] >> 6; - } - return 0; } @@ -289,11 +223,6 @@ static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, return 0; } -static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc) -{ - return; -} - static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y, enum mode_set_atomic state) @@ -309,14 +238,12 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, .prepare = dce_virtual_crtc_prepare, .commit = dce_virtual_crtc_commit, - .load_lut = dce_virtual_crtc_load_lut, .disable = dce_virtual_crtc_disable, }; static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) { struct amdgpu_crtc *amdgpu_crtc; - int i; amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); @@ -329,12 +256,6 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) amdgpu_crtc->crtc_id = index; adev->mode_info.crtcs[index] = amdgpu_crtc; - for (i = 0; i < 256; i++) { - amdgpu_crtc->lut_r[i] = i << 2; - amdgpu_crtc->lut_g[i] = i << 2; - amdgpu_crtc->lut_b[i] = i << 2; - } - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; amdgpu_crtc->encoder = NULL; amdgpu_crtc->connector = NULL; @@ -522,6 +443,47 @@ static int dce_virtual_sw_fini(void *handle) static int dce_virtual_hw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + dce_v6_0_disable_dce(adev); + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + dce_v8_0_disable_dce(adev); + break; +#endif + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + /* no DCE */ + break; + case CHIP_VEGA10: + break; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } return 0; } @@ -677,7 +639,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, } static const struct amdgpu_display_funcs dce_virtual_display_funcs = { - .set_vga_render_state = &dce_virtual_set_vga_render_state, .bandwidth_update = &dce_virtual_bandwidth_update, .vblank_get_counter = &dce_virtual_vblank_get_counter, .vblank_wait = &dce_virtual_vblank_wait, @@ -690,8 +651,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, .add_encoder = NULL, .add_connector = NULL, - .stop_mc_access = &dce_virtual_stop_mc_access, - .resume_mc_access = &dce_virtual_resume_mc_access, }; static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) @@ -809,7 +768,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) { - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 5173ca1fd159..d228f5a99044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1573,7 +1573,7 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) @@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", + r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); } @@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); dws = adev->gfx.rlc.clear_state_size + (256 / 4); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v6_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v6_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v6_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b45e4403d1..00868764a0dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) } /** - * gmc_v7_0_init_compute_vmid - gart enable + * gfx_v7_0_init_compute_vmid - gart enable * * @adev: amdgpu_device pointer * @@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) #define DEFAULT_SH_MEM_BASES (0x6000) #define FIRST_COMPUTE_VMID (8) #define LAST_COMPUTE_VMID (16) -static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) +static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) { int i; uint32_t sh_mem_config; @@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) ELEMENT_SIZE, 1); sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, INDEX_STRIDE, 3); + WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { @@ -1934,12 +1935,11 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); WREG32(mmSH_MEM_BASES, sh_mem_base); - WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - gmc_v7_0_init_compute_vmid(adev); + gfx_v7_0_init_compute_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); @@ -2021,7 +2021,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) */ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) */ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) { - int i, r; + int i; for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (ring->mqd_obj) { - r = amdgpu_bo_reserve(ring->mqd_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); - - amdgpu_bo_unpin(ring->mqd_obj); - amdgpu_bo_unreserve(ring->mqd_obj); - - amdgpu_bo_unref(&ring->mqd_obj); - ring->mqd_obj = NULL; - } + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); } } static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v7_0_mec_init(struct amdgpu_device *adev) @@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) /* allocate space for ALL pipes (even the ones we don't own) */ mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec * GFX7_MEC_HPD_SIZE * 2; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v7_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); gfx_v7_0_mec_fini(adev); return r; } @@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) struct cik_mqd *mqd; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; - if (ring->mqd_obj == NULL) { - r = amdgpu_bo_create(adev, - sizeof(struct cik_mqd), - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &ring->mqd_obj); - if (r) { - dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(ring->mqd_obj, false); - if (unlikely(r != 0)) - goto out; - - r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, - &mqd_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); - goto out_unreserve; - } - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); + r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &mqd_gpu_addr, (void **)&mqd); if (r) { - dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); - goto out_unreserve; + dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); + return r; } mutex_lock(&adev->srbm_mutex); @@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) mutex_unlock(&adev->srbm_mutex); amdgpu_bo_kunmap(ring->mqd_obj); -out_unreserve: amdgpu_bo_unreserve(ring->mqd_obj); -out: return 0; } @@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, */ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* save restore block */ - if (adev->gfx.rlc.save_restore_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); - adev->gfx.rlc.save_restore_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* clear state block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) @@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) if (src_ptr) { /* save restore block */ - if (adev->gfx.rlc.save_restore_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.save_restore_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.save_restore_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_obj, + &adev->gfx.rlc.save_restore_gpu_addr, + (void **)&adev->gfx.rlc.sr_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); - dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* write the sr buffer */ dst_ptr = adev->gfx.rlc.sr_ptr; for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) @@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v7_0_get_csb_buffer(adev, dst_ptr); @@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } if (adev->gfx.rlc.cp_table_size) { - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); - gfx_v7_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); gfx_v7_0_rlc_fini(adev); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index aa5a50f5eac8..832e592fcd07 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 tonga_mgcg_cgcg_init[] = @@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 golden_settings_polaris10_a11[] = @@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 fiji_golden_common_all[] = @@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009, }; @@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 iceland_mgcg_cgcg_init[] = @@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF }; static const u32 cz_mgcg_cgcg_init[] = @@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] = mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001, mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, - mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, - mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, }; static const u32 stoney_mgcg_cgcg_init[] = @@ -750,7 +750,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { - int r; - - /* clear state block */ - if (adev->gfx.rlc.clear_state_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); - adev->gfx.rlc.clear_state_obj = NULL; - } - - /* jump table block */ - if (adev->gfx.rlc.cp_table_obj) { - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); - adev->gfx.rlc.cp_table_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.clear_state_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } - } - r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_rlc_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_gpu_addr); + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } - r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); - gfx_v8_0_rlc_fini(adev); - return r; - } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; gfx_v8_0_get_csb_buffer(adev, dst_ptr); @@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) if ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY)) { adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, - &adev->gfx.rlc.cp_table_obj); - if (r) { - dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); - if (unlikely(r != 0)) { - dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); return r; } @@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); } static int gfx_v8_0_mec_init(struct amdgpu_device *adev) @@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); - gfx_v8_0_mec_fini(adev); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); return r; } @@ -3802,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) ELEMENT_SIZE, 1); sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, INDEX_STRIDE, 3); + WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); + mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { vi_srbm_select(adev, 0, 0, 0, i); @@ -3825,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) WREG32(mmSH_MEM_APE1_BASE, 1); WREG32(mmSH_MEM_APE1_LIMIT, 0); - WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -4564,7 +4470,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c9b9c88231aa..69182eeca264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080, SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080, + SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000, SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107, + SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000, SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68, SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197, @@ -211,7 +213,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) { - adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.num_reg = 8; adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } @@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (cs_data) { /* clear state block */ adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); - if (adev->gfx.rlc.clear_state_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.clear_state_obj, - &adev->gfx.rlc.clear_state_gpu_addr, - (void **)&adev->gfx.rlc.cs_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create rlc csb bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", + r); + gfx_v9_0_rlc_fini(adev); + return r; } /* set up the cs buffer */ dst_ptr = adev->gfx.rlc.cs_ptr; @@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ - if (adev->gfx.rlc.cp_table_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &adev->gfx.rlc.cp_table_obj, - &adev->gfx.rlc.cp_table_gpu_addr, - (void **)&adev->gfx.rlc.cp_table_ptr); - if (r) { - dev_err(adev->dev, - "(%d) failed to create cp table bo\n", r); - gfx_v9_0_rlc_fini(adev); - return r; - } + r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_err(adev->dev, + "(%d) failed to create cp table bo\n", r); + gfx_v9_0_rlc_fini(adev); + return r; } rv_init_cp_jump_table(adev); @@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) { - int r; - - if (adev->gfx.mec.hpd_eop_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); - adev->gfx.mec.hpd_eop_obj = NULL; - } - if (adev->gfx.mec.mec_fw_obj) { - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true); - if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r); - amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj); - amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - - amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj); - adev->gfx.mec.mec_fw_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); } static int gfx_v9_0_mec_init(struct amdgpu_device *adev) @@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) amdgpu_gfx_compute_queue_acquire(adev); mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; - if (adev->gfx.mec.hpd_eop_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hpd_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.hpd_eop_obj); - if (r) { - dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.hpd_eop_gpu_addr); + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.hpd_eop_obj, + &adev->gfx.mec.hpd_eop_gpu_addr, + (void **)&hpd); if (r) { - dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); - if (r) { - dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } @@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; - if (adev->gfx.mec.mec_fw_obj == NULL) { - r = amdgpu_bo_create(adev, - mec_hdr->header.ucode_size_bytes, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->gfx.mec.mec_fw_obj); - if (r) { - dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); - return r; - } - } - - r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false); - if (unlikely(r != 0)) { - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->gfx.mec.mec_fw_gpu_addr); - if (r) { - dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r); - gfx_v9_0_mec_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw); + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->gfx.mec.mec_fw_obj, + &adev->gfx.mec.mec_fw_gpu_addr, + (void **)&fw); if (r) { - dev_warn(adev->dev, "(%d) map firmware bo failed\n", r); + dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); gfx_v9_0_mec_fini(adev); return r; } + memcpy(fw, fw_data, fw_size); amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); - return 0; } @@ -2219,7 +2157,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; const struct cs_section_def *sect = NULL; const struct cs_extent_def *ext = NULL; - int r, i; + int r, i, tmp; /* init the CP */ WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); @@ -2227,7 +2165,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) gfx_v9_0_cp_gfx_enable(adev, true); - r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4); + r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); return r; @@ -2265,6 +2203,12 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x8000); amdgpu_ring_write(ring, 0x8000); + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); + tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | + (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); return 0; @@ -2427,7 +2371,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) /* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ - if (WARN_ON(i > (sizeof(queue_mask)*8))) { + if (WARN_ON(i >= (sizeof(queue_mask)*8))) { DRM_ERROR("Invalid KCQ enabled: %d\n", i); break; } @@ -4158,7 +4102,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, return 0; } -const struct amd_ip_funcs gfx_v9_0_ip_funcs = { +static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, .late_init = gfx_v9_0_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index 56ef652a575d..fa5a3fbaf6ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h @@ -24,7 +24,6 @@ #ifndef __GFX_V9_0_H__ #define __GFX_V9_0_H__ -extern const struct amd_ip_funcs gfx_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index a42f483767e7..4f2788b61a08 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) gfxhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) @@ -124,12 +124,12 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -143,7 +143,10 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; @@ -206,6 +209,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index d2dbb085f480..206e29cad753 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h @@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v1_0_init(struct amdgpu_device *adev); u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); -extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d0214d942bfc..12b0c4cd7a5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] = SI_CRTC5_REGISTER_OFFSET }; -static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v6_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, } -static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); - } static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) @@ -228,20 +219,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; - u32 tmp; int i, j; /* Initialize HDP */ @@ -254,16 +245,23 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v6_0_mc_stop(adev, &save); - if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK); + if (adev->mode_info.num_crtc) { + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK; + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp &= ~VGA_VSTATUS_CNTL; + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -271,13 +269,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); @@ -285,7 +276,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) if (gmc_v6_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v6_0_mc_resume(adev, &save); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) @@ -342,15 +332,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.visible_vram_size = adev->mc.aper_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v6_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -479,6 +461,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { int r, i; + u32 field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -506,13 +489,15 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_L2_CNTL2, VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK | VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK); + + field = adev->vm_manager.fragment_size; WREG32(mmVM_L2_CNTL3, VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | - (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) | - (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); + (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | + (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -559,7 +544,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -829,7 +814,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; @@ -987,7 +972,6 @@ static int gmc_v6_0_wait_for_idle(void *handle) static int gmc_v6_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1003,7 +987,7 @@ static int gmc_v6_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v6_0_mc_stop(adev, &save); + gmc_v6_0_mc_stop(adev); if (gmc_v6_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1023,7 +1007,7 @@ static int gmc_v6_0_soft_reset(void *handle) udelay(50); - gmc_v6_0_mc_resume(adev, &save); + gmc_v6_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7e9ea53edf8b..e42c1ad3af5e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -37,6 +37,9 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + #include "amdgpu_atombios.h" static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); @@ -76,14 +79,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -99,8 +98,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -112,9 +110,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -242,15 +237,17 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } /** @@ -263,7 +260,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -277,13 +273,20 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v7_0_mc_stop(adev, &save); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } + if (adev->mode_info.num_crtc) { + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -291,20 +294,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v7_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -391,15 +386,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v7_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -575,7 +562,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -605,14 +592,16 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -666,7 +655,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) gmc_v7_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -961,7 +950,7 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask @@ -1138,7 +1127,6 @@ static int gmc_v7_0_wait_for_idle(void *handle) static int gmc_v7_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1154,7 +1142,7 @@ static int gmc_v7_0_soft_reset(void *handle) } if (srbm_soft_reset) { - gmc_v7_0_mc_stop(adev, &save); + gmc_v7_0_mc_stop(adev); if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1175,7 +1163,7 @@ static int gmc_v7_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - gmc_v7_0_mc_resume(adev, &save); + gmc_v7_0_mc_resume(adev); udelay(50); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cc9f88057cd5..7ca2dae8237a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -35,6 +35,9 @@ #include "oss/oss_3_0_d.h" #include "oss/oss_3_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + #include "vid.h" #include "vi.h" @@ -161,14 +164,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev) { u32 blackout; - if (adev->mode_info.num_crtc) - amdgpu_display_stop_mc_access(adev, save); - gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); @@ -184,8 +183,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev) { u32 tmp; @@ -197,9 +195,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); WREG32(mmBIF_FB_EN, tmp); - - if (adev->mode_info.num_crtc) - amdgpu_display_resume_mc_access(adev, save); } /** @@ -404,15 +399,20 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev) static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { + u64 base = 0; + + if (!amdgpu_sriov_vf(adev)) + base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + if (mc->mc_vram_size > 0xFFC0000000ULL) { /* leave room for at least 1024M GTT */ dev_warn(adev->dev, "limiting VRAM\n"); mc->real_vram_size = 0xFFC0000000ULL; mc->mc_vram_size = 0xFFC0000000ULL; } - amdgpu_vram_location(adev, &adev->mc, 0); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_vram_location(adev, &adev->mc, base); + amdgpu_gart_location(adev, mc); } /** @@ -425,7 +425,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, */ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) { - struct amdgpu_mode_mc_save save; u32 tmp; int i, j; @@ -439,13 +438,20 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); - if (adev->mode_info.num_crtc) - amdgpu_display_set_vga_render_state(adev, false); - - gmc_v8_0_mc_stop(adev, &save); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } + if (adev->mode_info.num_crtc) { + /* Lockout access through VGA aperture*/ + tmp = RREG32(mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32(mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32(mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32(mmVGA_RENDER_CONTROL, tmp); + } /* Update configuration */ WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->mc.vram_start >> 12); @@ -453,20 +459,23 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) adev->mc.vram_end >> 12); WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, adev->vram_scratch.gpu_addr >> 12); - tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; - tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); - WREG32(mmMC_VM_FB_LOCATION, tmp); - /* XXX double check these! */ - WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); - WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); - WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + + if (amdgpu_sriov_vf(adev)) { + tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + WREG32(mmMC_VM_FB_LOCATION, tmp); + /* XXX double check these! */ + WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); + WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); + } + WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } - gmc_v8_0_mc_resume(adev, &save); WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); @@ -553,15 +562,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v8_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -761,7 +762,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { int r, i; - u32 tmp; + u32 tmp, field; if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -792,10 +793,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32(mmVM_L2_CNTL2, tmp); + + field = adev->vm_manager.fragment_size; tmp = RREG32(mmVM_L2_CNTL3); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); WREG32(mmVM_L2_CNTL3, tmp); /* XXX: set to enable PTE/PDE in system memory */ tmp = RREG32(mmVM_L2_CNTL4); @@ -813,8 +816,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); WREG32(mmVM_L2_CNTL4, tmp); /* setup context0 */ - WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); - WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); + WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(adev->dummy_page.addr >> 12)); @@ -869,7 +872,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) gmc_v8_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -1045,7 +1048,7 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64); + amdgpu_vm_adjust_size(adev, 64, 4); adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask @@ -1260,7 +1263,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_stop(adev, &adev->mc.save); + gmc_v8_0_mc_stop(adev); if (gmc_v8_0_wait_for_idle(adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } @@ -1306,7 +1309,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) if (!adev->mc.srbm_soft_reset) return 0; - gmc_v8_0_mc_resume(adev, &adev->mc.save); + gmc_v8_0_mc_resume(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 175ba5f9691c..2769c2b3b56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -23,11 +23,14 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "gmc_v9_0.h" +#include "amdgpu_atomfirmware.h" #include "vega10/soc15ip.h" #include "vega10/HDP/hdp_4_0_offset.h" #include "vega10/HDP/hdp_4_0_sh_mask.h" #include "vega10/GC/gc_9_0_sh_mask.h" +#include "vega10/DC/dce_12_0_offset.h" +#include "vega10/DC/dce_12_0_sh_mask.h" #include "vega10/vega10_enum.h" #include "soc15_common.h" @@ -419,8 +422,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); amdgpu_vram_location(adev, &adev->mc, base); - adev->mc.gtt_base_align = 0; - amdgpu_gtt_location(adev, mc); + amdgpu_gart_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); @@ -442,43 +444,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) u32 tmp; int chansize, numchan; - /* hbm memory channel size */ - chansize = 128; - - tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); - tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; - tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; - switch (tmp) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 0; - break; - case 3: - numchan = 4; - break; - case 4: - numchan = 0; - break; - case 5: - numchan = 8; - break; - case 6: - numchan = 0; - break; - case 7: - numchan = 16; - break; - case 8: - numchan = 2; - break; + adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); + if (!adev->mc.vram_width) { + /* hbm memory channel size */ + chansize = 128; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + switch (tmp) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 0; + break; + case 3: + numchan = 4; + break; + case 4: + numchan = 0; + break; + case 5: + numchan = 8; + break; + case 6: + numchan = 0; + break; + case 7: + numchan = 16; + break; + case 8: + numchan = 2; + break; + } + adev->mc.vram_width = numchan * chansize; } - adev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ adev->mc.aper_base = pci_resource_start(adev->pdev, 0); @@ -494,15 +499,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; - /* unless the user had overridden it, set the gart - * size equal to the 1024 or vram, whichever is larger. - */ - if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->mc.mc_vram_size); - else - adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; - + amdgpu_gart_set_defaults(adev); gmc_v9_0_vram_gtt_location(adev, &adev->mc); return 0; @@ -537,10 +534,21 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->mc.invalidate_lock); - if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_RAVEN: adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - amdgpu_vm_adjust_size(adev, 64); - } else { + if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { + adev->vm_manager.vm_size = 1U << 18; + adev->vm_manager.block_size = 9; + adev->vm_manager.num_level = 3; + amdgpu_vm_set_fragment_size(adev, 9); + } else { + /* vm_size is 64GB for legacy 2-level page support */ + amdgpu_vm_adjust_size(adev, 64, 9); + adev->vm_manager.num_level = 1; + } + break; + case CHIP_VEGA10: /* XXX Don't know how to get VRAM type yet. */ adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; /* @@ -550,11 +558,18 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->vm_manager.vm_size = 1U << 18; adev->vm_manager.block_size = 9; - DRM_INFO("vm size is %llu GB, block size is %u-bit\n", - adev->vm_manager.vm_size, - adev->vm_manager.block_size); + adev->vm_manager.num_level = 3; + amdgpu_vm_set_fragment_size(adev, 9); + break; + default: + break; } + DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n", + adev->vm_manager.vm_size, + adev->vm_manager.block_size, + adev->vm_manager.fragment_size); + /* This interrupt is VMC page fault.*/ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, &adev->mc.vm_fault); @@ -619,11 +634,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; - /* TODO: fix num_level for APU when updating vm size and block size */ - if (adev->flags & AMD_IS_APU) - adev->vm_manager.num_level = 1; - else - adev->vm_manager.num_level = 3; amdgpu_vm_manager_init(adev); return 0; @@ -731,7 +741,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) gmc_v9_0_gart_flush_gpu_tlb(adev, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->mc.gtt_size >> 20), + (unsigned)(adev->mc.gart_size >> 20), (unsigned long long)adev->gart.table_addr); adev->gart.ready = true; return 0; @@ -745,6 +755,20 @@ static int gmc_v9_0_hw_init(void *handle) /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); + if (adev->mode_info.num_crtc) { + u32 tmp; + + /* Lockout access through VGA aperture*/ + tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp); + + /* disable VGA render */ + tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL); + tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp); + } + r = gmc_v9_0_gart_enable(adev); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 9804318f3488..4395a4f12149 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) mmhub_v1_0_init_gart_pt_regs(adev); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->mc.gtt_start >> 12)); + (u32)(adev->mc.gart_start >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->mc.gtt_start >> 44)); + (u32)(adev->mc.gart_start >> 44)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, - (u32)(adev->mc.gtt_end >> 12)); + (u32)(adev->mc.gart_end >> 12)); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, - (u32)(adev->mc.gtt_end >> 44)); + (u32)(adev->mc.gart_end >> 44)); } static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) @@ -138,12 +138,12 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { - uint32_t tmp; + uint32_t tmp, field; /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); /* XXX for emulation, Refer to closed source code.*/ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); @@ -157,7 +157,10 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); + field = adev->vm_manager.fragment_size; tmp = mmVM_L2_CNTL3_DEFAULT; + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; @@ -222,6 +225,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0); @@ -245,28 +251,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } struct pctl_data { - uint32_t index; - uint32_t data; + uint32_t index; + uint32_t data; }; -const struct pctl_data pctl0_data[] = { - {0x0, 0x7a640}, - {0x9, 0x2a64a}, - {0xd, 0x2a680}, - {0x11, 0x6a684}, - {0x19, 0xea68e}, - {0x29, 0xa69e}, - {0x2b, 0x34a6c0}, - {0x61, 0x83a707}, - {0xe6, 0x8a7a4}, - {0xf0, 0x1a7b8}, - {0xf3, 0xfa7cc}, - {0x104, 0x17a7dd}, - {0x11d, 0xa7dc}, - {0x11f, 0x12a7f5}, - {0x133, 0xa808}, - {0x135, 0x12a810}, - {0x149, 0x7a82c} +static const struct pctl_data pctl0_data[] = { + {0x0, 0x7a640}, + {0x9, 0x2a64a}, + {0xd, 0x2a680}, + {0x11, 0x6a684}, + {0x19, 0xea68e}, + {0x29, 0xa69e}, + {0x2b, 0x34a6c0}, + {0x61, 0x83a707}, + {0xe6, 0x8a7a4}, + {0xf0, 0x1a7b8}, + {0xf3, 0xfa7cc}, + {0x104, 0x17a7dd}, + {0x11d, 0xa7dc}, + {0x11f, 0x12a7f5}, + {0x133, 0xa808}, + {0x135, 0x12a810}, + {0x149, 0x7a82c} }; #define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) @@ -274,32 +280,39 @@ const struct pctl_data pctl0_data[] = { #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 -const struct pctl_data pctl1_data[] = { - {0x0, 0x39a000}, - {0x3b, 0x44a040}, - {0x81, 0x2a08d}, - {0x85, 0x6ba094}, - {0xf2, 0x18a100}, - {0x10c, 0x4a132}, - {0x112, 0xca141}, - {0x120, 0x2fa158}, - {0x151, 0x17a1d0}, - {0x16a, 0x1a1e9}, - {0x16d, 0x13a1ec}, - {0x182, 0x7a201}, - {0x18b, 0x3a20a}, - {0x190, 0x7a580}, - {0x199, 0xa590}, - {0x19b, 0x4a594}, - {0x1a1, 0x1a59c}, - {0x1a4, 0x7a82c}, - {0x1ad, 0xfa7cc}, - {0x1be, 0x17a7dd}, - {0x1d7, 0x12a810} +static const struct pctl_data pctl1_data[] = { + {0x0, 0x39a000}, + {0x3b, 0x44a040}, + {0x81, 0x2a08d}, + {0x85, 0x6ba094}, + {0xf2, 0x18a100}, + {0x10c, 0x4a132}, + {0x112, 0xca141}, + {0x120, 0x2fa158}, + {0x151, 0x17a1d0}, + {0x16a, 0x1a1e9}, + {0x16d, 0x13a1ec}, + {0x182, 0x7a201}, + {0x18b, 0x3a20a}, + {0x190, 0x7a580}, + {0x199, 0xa590}, + {0x19b, 0x4a594}, + {0x1a1, 0x1a59c}, + {0x1a4, 0x7a82c}, + {0x1ad, 0xfa7cc}, + {0x1be, 0x17a7dd}, + {0x1d7, 0x12a810}, + {0x1eb, 0x4000a7e1}, + {0x1ec, 0x5000a7f5}, + {0x1ed, 0x4000a7e2}, + {0x1ee, 0x5000a7dc}, + {0x1ef, 0x4000a7e3}, + {0x1f0, 0x5000a7f6}, + {0x1f1, 0x5000a7e4} }; #define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) -#define PCTL1_RENG_EXEC_END_PTR 0x1ea +#define PCTL1_RENG_EXEC_END_PTR 0x1f1 #define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 #define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d #define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580 diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 57bb940c0ecd..5d38229baf69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, bool enable); -extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs; -extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block; - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index bde3ca3c21c1..2812d88a8bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val) reg); } -static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev, - enum idh_request req) -{ - u32 reg; - - reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, - mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); - reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, - MSGBUF_DATA, req); - WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), - reg); - - xgpu_ai_mailbox_set_valid(adev, true); -} - static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev, enum idh_event event) { @@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) return r; } - -static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, - enum idh_request req) -{ +static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) { + u32 reg; int r; - xgpu_ai_mailbox_trans_msg(adev, req); + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_ai_mailbox_set_valid(adev, true); /* start to poll ack */ r = xgpu_ai_poll_ack(adev); @@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, pr_err("Doesn't get ack from pf, continue\n"); xgpu_ai_mailbox_set_valid(adev, false); +} + +static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); /* start to check msg if request is idh_req_gpu_init_access */ if (req == IDH_REQ_GPU_INIT_ACCESS || @@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, .reset_gpu = xgpu_ai_request_reset, + .trans_msg = xgpu_ai_mailbox_trans_msg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index 9aefc44d2c34..1e91b9a1c591 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -31,7 +31,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; enum idh_event { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 171a658135b5..c25a831f94ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = { .req_full_gpu = xgpu_vi_request_full_gpu_access, .rel_full_gpu = xgpu_vi_release_full_gpu_access, .reset_gpu = xgpu_vi_request_reset, + .trans_msg = NULL, /* Does not need to trans VF errors to host. */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h index 2db741131bc6..c791d73d2d54 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -32,7 +32,9 @@ enum idh_request { IDH_REL_GPU_INIT_ACCESS, IDH_REQ_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS, - IDH_REQ_GPU_RESET_ACCESS + IDH_REQ_GPU_RESET_ACCESS, + + IDH_LOG_VF_ERROR = 200, }; /* VI mailbox messages data */ diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 1e272f785def..045988b18bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -32,6 +32,7 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CONFIG_CNTL 0x11180044 u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { @@ -67,7 +68,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) @@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } } + +void nbio_v6_1_init_registers(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); +} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index f6f8bc045518..686e4b4d296a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable); void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags); void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev); +void nbio_v6_1_init_registers(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index aa04632523fa..11b70d601922 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) { - WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); } u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 2258323a3c26..f7cf994b1da2 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -86,6 +86,52 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * return 0; } +int psp_v10_0_init_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + const char *chip_name; + char fw_name[30]; + int err = 0; + const struct psp_firmware_header_v1_0 *hdr; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_RAVEN: + chip_name = "raven"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->psp.asd_fw); + if (err) + goto out; + + hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; + adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version); + adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + adev->psp.asd_start_addr = (uint8_t *)hdr + + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + + return 0; +out: + if (err) { + dev_err(adev->dev, + "psp v10.0: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; + } + + return err; +} + int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) { int ret; @@ -110,7 +156,6 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - unsigned int psp_ring_reg = 0; struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; @@ -130,6 +175,16 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) return ret; } + return 0; +} + +int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + unsigned int psp_ring_reg = 0; + struct psp_ring *ring = &psp->km_ring; + struct amdgpu_device *adev = psp->adev; + /* Write low address of the ring to C2PMSG_69 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); @@ -143,13 +198,42 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) psp_ring_reg = ring_type; psp_ring_reg = psp_ring_reg << 16; WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* There might be handshake issue with hardware which needs delay */ + mdelay(20); + /* Wait for response flag (bit 31) in C2PMSG_64 */ - psp_ring_reg = 0; - while ((psp_ring_reg & 0x80000000) == 0) { - psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64); - } + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, false); - return 0; + return ret; +} + +int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) +{ + int ret = 0; + struct psp_ring *ring; + unsigned int psp_ring_reg = 0; + struct amdgpu_device *adev = psp->adev; + + ring = &psp->km_ring; + + /* Write the ring destroy command to C2PMSG_64 */ + psp_ring_reg = 3 << 16; + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg); + + /* There might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) in C2PMSG_64 */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + + return ret; } int psp_v10_0_cmd_submit(struct psp_context *psp, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h index 2022b7b7151e..e76cde2f01f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h @@ -27,10 +27,15 @@ #include "amdgpu_psp.h" +extern int psp_v10_0_init_microcode(struct psp_context *psp); extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd); extern int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type); +extern int psp_v10_0_ring_create(struct psp_context *psp, + enum psp_ring_type ring_type); +extern int psp_v10_0_ring_destroy(struct psp_context *psp, + enum psp_ring_type ring_type); extern int psp_v10_0_cmd_submit(struct psp_context *psp, struct amdgpu_firmware_info *ucode, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c98d77d0c8f8..2a535a4b8d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); -#if 0 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); -#endif return ret; } @@ -341,10 +339,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), 0x80000000, 0x80000000, false); - if (ring->ring_mem) - amdgpu_bo_free_kernel(&adev->firmware.rbuf, - &ring->ring_mem_mc_addr, - (void **)&ring->ring_mem); + amdgpu_bo_free_kernel(&adev->firmware.rbuf, + &ring->ring_mem_mc_addr, + (void **)&ring->ring_mem); + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1d766ae98dc8..b1de44f22824 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -551,17 +551,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); - if (enable) + if (enable) { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 1); - else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + if (amdgpu_sdma_phase_quantum) { + WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], + phase_quantum); + WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], + phase_quantum); + } + } else { f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, 0); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + ATC_L1_ENABLE, 1); + } + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); } } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4a65697ccc94..fd7c72aaafa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) DRM_DEBUG("Setting write pointer\n"); if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + DRM_DEBUG("Using doorbell -- " "wptr_offs == 0x%08x " "lower_32_bits(ring->wptr) << 2 == 0x%08x " @@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), upper_32_bits(ring->wptr << 2)); /* XXX check if swapping is necessary on BE */ - adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); - adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); + WRITE_ONCE(*wb, (ring->wptr << 2)); DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2); @@ -493,13 +494,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { - u32 f32_cntl; + u32 f32_cntl, phase_quantum = 0; int i; + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> + SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> + SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; + } + for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL)); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl); } @@ -541,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; u32 rb_bufsz; u32 wb_offset; u32 doorbell; u32 doorbell_offset; u32 temp; + u64 wptr_gpu_addr; int i, r; for (i = 0; i < adev->sdma.num_instances; i++) { @@ -628,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp); } + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + lower_32_bits(wptr_gpu_addr)); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); + if (amdgpu_sriov_vf(adev)) + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); + else + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); + WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); + /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl); @@ -655,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) if (adev->mman.buffer_funcs_ring == ring) amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } return 0; @@ -751,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) const struct sdma_firmware_header_v1_0 *hdr; const __le32 *fw_data; u32 fw_size; - u32 digest_size = 0; int i, j; /* halt the MEs */ sdma_v4_0_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - uint16_t version_major; - uint16_t version_minor; if (!adev->sdma.instance[i].fw) return -EINVAL; @@ -767,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - version_major = le16_to_cpu(hdr->header.header_version_major); - version_minor = le16_to_cpu(hdr->header.header_version_minor); - - if (version_major == 1 && version_minor >= 1) { - const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr; - digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size); - } - - fw_size -= digest_size; - fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0); - for (j = 0; j < fw_size; j++) WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4267fa417997..8284d5dbfc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev) return r; } +#define mmROM_INDEX 0x2A +#define mmROM_DATA 0x2B + +static bool si_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + /* set rom index to 0 */ + WREG32(mmROM_INDEX, 0); + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(mmROM_DATA); + + return true; +} + //xxx: not implemented static int si_asic_reset(struct amdgpu_device *adev) { @@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev) static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, + .read_bios_from_rom = &si_read_bios_from_rom, .read_register = &si_read_register, .reset = &si_asic_reset, .set_vga_state = &si_vga_set_state, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index a7ad8390981c..d63873f3f574 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev) case 0x682C: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; break; case 0x6825: case 0x6827: diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index a7341d88a320..f2c3a49f73a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <drm/drmP.h> #include "amdgpu.h" -#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" #include "amdgpu_ih.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" @@ -62,8 +62,6 @@ #include "dce_virtual.h" #include "mxgpu_ai.h" -MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); - #define mmFabricConfigAccessControl 0x0410 #define mmFabricConfigAccessControl_BASE_IDX 0 #define mmFabricConfigAccessControl_DEFAULT 0x00000000 @@ -198,6 +196,50 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + +static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); + return r; +} + +static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->se_cac_idx_lock, flags); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); + WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); +} + static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) @@ -392,11 +434,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); soc15_gpu_pci_config_reset(adev); - amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false); + amdgpu_atombios_scratch_regs_engine_hung(adev, false); return 0; } @@ -524,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) return nbio_v6_1_get_rev_id(adev); } - -int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - /* to be implemented in MC IP*/ - return 0; -} - static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, @@ -557,6 +592,10 @@ static int soc15_common_early_init(void *handle) adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; adev->didt_wreg = &soc15_didt_wreg; + adev->gc_cac_rreg = &soc15_gc_cac_rreg; + adev->gc_cac_wreg = &soc15_gc_cac_wreg; + adev->se_cac_rreg = &soc15_se_cac_rreg; + adev->se_cac_wreg = &soc15_se_cac_wreg; adev->asic_funcs = &soc15_asic_funcs; @@ -681,6 +720,9 @@ static int soc15_common_hw_init(void *handle) soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); + /* setup nbio registers */ + if (!(adev->flags & AMD_IS_APU)) + nbio_v6_1_init_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index e2d330eed952..7a8e4e28abb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -77,6 +77,13 @@ struct nbio_pcie_index_data { (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ (ip##_BASE__INST##inst##_SEG4 + reg))))), value) +#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ + WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ + (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ + (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \ + (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \ + (ip##_BASE__INST##inst##_SEG4 + reg))))), value) + #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index e79befd80eed..7f408f85fdb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -250,6 +250,7 @@ #define PACKET3_SET_UCONFIG_REG 0x79 #define PACKET3_SET_UCONFIG_REG_START 0x0000c000 #define PACKET3_SET_UCONFIG_REG_END 0x0000c400 +#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28) #define PACKET3_SCRATCH_RAM_WRITE 0x7D #define PACKET3_SCRATCH_RAM_READ 0x7E #define PACKET3_LOAD_CONST_RAM 0x80 diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 987b958368ac..23a85750edd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) { DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", @@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle) return r; } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); if (r) @@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); + WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; + adev->uvd.ring_enc[0].wptr = 0; + adev->uvd.ring_enc[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); return 0; } @@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; ring = &adev->uvd.ring; + ring->wptr = 0; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0xFFFFFFFF, 0x00000004); /* mc resume*/ @@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG), - adev->gfx.config.gb_addr_config); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); /* mc resume end*/ @@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) UVD_LMI_CTRL__REQ_MODE_MASK | 0x00100000L)); - /* disable byte swapping */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88); - /* take all subblocks out of reset, except VCPU */ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); @@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK); - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* enable master interrupt */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), @@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) /* force RBC into idle state */ size = order_base_2(ring->ring_size); tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - /* set the write pointer delay */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0); - - /* set the wb address */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR), - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), - lower_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), - upper_32_bits(ring->gpu_addr)); - - ring->wptr = 0; ring = &adev->uvd.ring_enc[0]; + ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); + /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->uvd_table_size = table_size; - return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1ecd6bb90c1f..11134d5f7443 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); + WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); + adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0; + adev->vce.ring[0].wptr = 0; + adev->vce.ring[0].wptr_old = 0; + /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); @@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); return -EBUSY; } - WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); return 0; } @@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), - 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, + VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); /* end of MC_RESUME */ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), @@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; header->vce_table_size = table_size; - - return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } - return -EINVAL; /* already initializaed ? */ + return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); } /** @@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle) if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ ring->use_doorbell = true; + + /* currently only use the first encoding ring for sriov, + * so set unused location for other unused rings. + */ if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2; - else if (i == 1) - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; else - ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1; + ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); if (r) @@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, { uint32_t val = 0; - if (state == AMDGPU_IRQ_STATE_ENABLE) - val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; + if (!amdgpu_sriov_vf(adev)) { + if (state == AMDGPU_IRQ_STATE_ENABLE) + val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, + ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6cac291c96da..9ff69b90df36 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle) /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | + adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_UVD | |