diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-12 04:12:22 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-12 04:12:22 +0300 |
commit | 6b25e21fa6f26d0f0d45f161d169029411c84286 (patch) | |
tree | fdff805ecd81ec46951f49577efe450ddb7d060a /drivers/gpu/drm/amd/amdgpu | |
parent | a379f71a30dddbd2e7393624e455ce53c87965d1 (diff) | |
parent | 69405d3da98b48633b78a49403e4f9cdb7c6a0f5 (diff) | |
download | linux-6b25e21fa6f26d0f0d45f161d169029411c84286.tar.xz |
Merge tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Core:
- Fence destaging work
- DRIVER_LEGACY to split off legacy drm drivers
- drm_mm refactoring
- Splitting drm_crtc.c into chunks and documenting better
- Display info fixes
- rbtree support for prime buffer lookup
- Simple VGA DAC driver
Panel:
- Add Nexus 7 panel
- More simple panels
i915:
- Refactoring GEM naming
- Refactored vma/active tracking
- Lockless request lookups
- Better stolen memory support
- FBC fixes
- SKL watermark fixes
- VGPU improvements
- dma-buf fencing support
- Better DP dongle support
amdgpu:
- Powerplay for Iceland asics
- Improved GPU reset support
- UVD/VEC powergating support for CZ/ST
- Preinitialised VRAM buffer support
- Virtual display support
- Initial SI support
- GTT rework
- PCI shutdown callback support
- HPD IRQ storm fixes
amdkfd:
- bugfixes
tilcdc:
- Atomic modesetting support
mediatek:
- AAL + GAMMA engine support
- Hook up gamma LUT
- Temporal dithering support
imx:
- Pixel clock from devicetree
- drm bridge support for LVDS bridges
- active plane reconfiguration
- VDIC deinterlacer support
- Frame synchronisation unit support
- Color space conversion support
analogix:
- PSR support
- Better panel on/off support
rockchip:
- rk3399 vop/crtc support
- PSR support
vc4:
- Interlaced vblank timing
- 3D rendering CPU overhead reduction
- HDMI output fixes
tda998x:
- HDMI audio ASoC support
sunxi:
- Allwinner A33 support
- better TCON support
msm:
- DT binding cleanups
- Explicit fence-fd support
sti:
- remove sti415/416 support
etnaviv:
- MMUv2 refactoring
- GC3000 support
exynos:
- Refactoring HDMI DCC/PHY
- G2D pm regression fix
- Page fault issues with wait for vblank
There is no nouveau work in this tree, as Ben didn't get a pull
request in, and he was fighting moving to atomic and adding mst
support, so maybe best it waits for a cycle"
* tag 'drm-for-v4.9' of git://people.freedesktop.org/~airlied/linux: (1412 commits)
drm/crtc: constify drm_crtc_index parameter
drm/i915: Fix conflict resolution from backmerge of v4.8-rc8 to drm-next
drm/i915/guc: Unwind GuC workqueue reservation if request construction fails
drm/i915: Reset the breadcrumbs IRQ more carefully
drm/i915: Force relocations via cpu if we run out of idle aperture
drm/i915: Distinguish last emitted request from last submitted request
drm/i915: Allow DP to work w/o EDID
drm/i915: Move long hpd handling into the hotplug work
drm/i915/execlists: Reinitialise context image after GPU hang
drm/i915: Use correct index for backtracking HUNG semaphores
drm/i915: Unalias obj->phys_handle and obj->userptr
drm/i915: Just clear the mmiodebug before a register access
drm/i915/gen9: only add the planes actually affected by ddb changes
drm/i915: Allow PCH DPLL sharing regardless of DPLL_SDVO_HIGH_SPEED
drm/i915/bxt: Fix HDMI DPLL configuration
drm/i915/gen9: fix the watermark res_blocks value
drm/i915/gen9: fix plane_blocks_per_line on watermarks calculations
drm/i915/gen9: minimum scanlines for Y tile is not always 4
drm/i915/gen9: fix the WaWmMemoryReadLatency implementation
drm/i915/kbl: KBL also needs to run the SAGV code
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
105 files changed, 27769 insertions, 5343 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 7335c0420c70..61360e27715f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -1,3 +1,10 @@ +config DRM_AMDGPU_SI + bool "Enable amdgpu support for SI parts" + depends on DRM_AMDGPU + help + Choose this option if you want to enable experimental support + for SI asics. + config DRM_AMDGPU_CIK bool "Enable amdgpu support for CIK parts" depends on DRM_AMDGPU @@ -25,3 +32,4 @@ config DRM_AMDGPU_GART_DEBUGFS Selecting this option creates a debugfs file to inspect the mapped pages. Uses more memory for housekeeping, enable only for debugging. +source "drivers/gpu/drm/amd/acp/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c7fcdcedaadb..248a05d02917 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -23,13 +23,16 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ - amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o + amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ + amdgpu_gtt_mgr.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ amdgpu_amdkfd_gfx_v7.o +amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o + amdgpu-y += \ vi.o @@ -50,15 +53,13 @@ amdgpu-y += \ amdgpu-y += \ amdgpu_dpm.o \ amdgpu_powerplay.o \ - cz_smc.o cz_dpm.o \ - tonga_smc.o tonga_dpm.o \ - fiji_smc.o fiji_dpm.o \ - iceland_smc.o iceland_dpm.o + cz_smc.o cz_dpm.o # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o + dce_v11_0.o \ + dce_virtual.o # add GFX block amdgpu-y += \ @@ -110,14 +111,10 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o -ifneq ($(CONFIG_DRM_AMD_POWERPLAY),) - include $(FULL_AMD_PATH)/powerplay/Makefile amdgpu-y += $(AMD_POWERPLAY_FILES) -endif - obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o CFLAGS_amdgpu_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 06192698bd96..b8d66670bb17 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -90,6 +90,7 @@ #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 +#define ENCODER_OBJECT_ID_VIRTUAL 0x28 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF @@ -119,6 +120,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 /* deleted */ @@ -147,6 +149,7 @@ #define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID7 0x07 +#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 /****************************************************/ /* Graphics Object ID Bit definition */ @@ -408,6 +411,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) +#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 700c56baf2de..039b57e4644c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -51,11 +51,13 @@ #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" +#include "amdgpu_ttm.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" #include "amdgpu_acp.h" #include "gpu_scheduler.h" +#include "amdgpu_virt.h" /* * Modules parameters. @@ -63,6 +65,7 @@ extern int amdgpu_modeset; extern int amdgpu_vram_limit; extern int amdgpu_gart_size; +extern int amdgpu_moverate; extern int amdgpu_benchmarking; extern int amdgpu_testing; extern int amdgpu_audio; @@ -91,6 +94,9 @@ extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; +extern int amdgpu_sclk_deep_sleep_en; +extern char *amdgpu_virtual_display; +extern unsigned amdgpu_pp_feature_mask; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -105,7 +111,7 @@ extern char *amdgpu_disable_cu; #define AMDGPU_MAX_RINGS 16 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 -#define AMDGPU_MAX_VCE_RINGS 2 +#define AMDGPU_MAX_VCE_RINGS 3 /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 @@ -248,10 +254,9 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t src, unsigned count); /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, @@ -316,6 +321,10 @@ struct amdgpu_ring_funcs { /* note usage for clock and power gating */ void (*begin_use)(struct amdgpu_ring *ring); void (*end_use)(struct amdgpu_ring *ring); + void (*emit_switch_buffer) (struct amdgpu_ring *ring); + void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); + unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); + unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); }; /* @@ -396,48 +405,8 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* - * TTM. + * BO. */ - -#define AMDGPU_TTM_LRU_SIZE 20 - -struct amdgpu_mman_lru { - struct list_head *lru[TTM_NUM_MEM_TYPES]; - struct list_head *swap_lru; -}; - -struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; - struct ttm_bo_device bdev; - bool mem_global_referenced; - bool initialized; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; -#endif - - /* buffer handling */ - const struct amdgpu_buffer_funcs *buffer_funcs; - struct amdgpu_ring *buffer_funcs_ring; - /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; - - /* custom LRU management */ - struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; - /* guard for log2_size array, don't add anything in between */ - struct amdgpu_mman_lru guard; -}; - -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, - struct reservation_object *resv, - struct fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); - struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; @@ -476,8 +445,6 @@ struct amdgpu_bo_va { #define AMDGPU_GEM_DOMAIN_MAX 0x3 struct amdgpu_bo { - /* Protected by gem.mutex */ - struct list_head list; /* Protected by tbo.reserved */ u32 prefered_domains; u32 allowed_domains; @@ -500,10 +467,12 @@ struct amdgpu_bo { struct amdgpu_device *adev; struct drm_gem_object gem_base; struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; struct list_head mn_list; + struct list_head shadow_list; }; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) @@ -653,6 +622,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags); +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); /* * GPU MC structures, functions & helpers @@ -679,6 +649,8 @@ struct amdgpu_mc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint32_t srbm_soft_reset; + struct amdgpu_mode_mc_save save; }; /* @@ -723,13 +695,14 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, */ struct amdgpu_flip_work { - struct work_struct flip_work; + struct delayed_work flip_work; struct work_struct unpin_work; struct amdgpu_device *adev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; - struct amdgpu_bo *old_rbo; + struct amdgpu_bo *old_abo; struct fence *excl; unsigned shared_count; struct fence **shared; @@ -817,13 +790,17 @@ struct amdgpu_ring { /* maximum number of VMIDs */ #define AMDGPU_NUM_VM 16 +/* Maximum number of PTEs the hardware can write with one command */ +#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF + /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) -#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) + +/* LOG2 number of continuous pages for the fragment field */ +#define AMDGPU_LOG2_PAGES_PER_FRAG 4 #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) @@ -835,10 +812,7 @@ struct amdgpu_ring { #define AMDGPU_PTE_READABLE (1 << 5) #define AMDGPU_PTE_WRITEABLE (1 << 6) -/* PTE (Page Table Entry) fragment field for different page sizes */ -#define AMDGPU_PTE_FRAG_4KB (0 << 7) -#define AMDGPU_PTE_FRAG_64KB (4 << 7) -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 @@ -848,6 +822,7 @@ struct amdgpu_ring { struct amdgpu_vm_pt { struct amdgpu_bo_list_entry entry; uint64_t addr; + uint64_t shadow_addr; }; struct amdgpu_vm { @@ -950,7 +925,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -959,7 +933,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); + bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, @@ -994,6 +968,7 @@ struct amdgpu_ctx { spinlock_t ring_lock; struct fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; + bool preamble_presented; }; struct amdgpu_ctx_mgr { @@ -1197,6 +1172,10 @@ struct amdgpu_gfx { unsigned ce_ram_size; struct amdgpu_cu_info cu_info; const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1249,11 +1228,16 @@ struct amdgpu_cs_parser { struct fence *fence; uint64_t bytes_moved_threshold; uint64_t bytes_moved; + struct amdgpu_bo_list_entry *evictable; /* user fence */ struct amdgpu_bo_list_entry uf_entry; }; +#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ +#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ +#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ + struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; @@ -1262,9 +1246,10 @@ struct amdgpu_job { struct amdgpu_sync sync; struct amdgpu_ib *ibs; struct fence *fence; /* the hw fence */ + uint32_t preamble_status; uint32_t num_ibs; void *owner; - uint64_t ctx; + uint64_t fence_ctx; /* the fence_context this job uses */ bool vm_needs_flush; unsigned vm_id; uint64_t vm_pd_addr; @@ -1685,6 +1670,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1711,6 +1697,8 @@ struct amdgpu_vce { struct amdgpu_irq_src irq; unsigned harvest_config; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; + unsigned num_rings; }; /* @@ -1728,9 +1716,14 @@ struct amdgpu_sdma_instance { struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; +#ifdef CONFIG_DRM_AMDGPU_SI + //SI DMA has a difference trap irq number for the second engine + struct amdgpu_irq_src trap_irq_1; +#endif struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; int num_instances; + uint32_t srbm_soft_reset; }; /* @@ -1832,6 +1825,7 @@ struct amdgpu_asic_funcs { bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_bios_from_rom)(struct amdgpu_device *adev, u8 *bios, u32 length_bytes); + void (*detect_hw_virtualization) (struct amdgpu_device *adev); int (*read_register)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); @@ -1841,8 +1835,9 @@ struct amdgpu_asic_funcs { /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); - /* query virtual capabilities */ - u32 (*get_virtual_caps)(struct amdgpu_device *adev); + /* static power management */ + int (*get_pcie_lanes)(struct amdgpu_device *adev); + void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); }; /* @@ -1935,16 +1930,6 @@ struct amdgpu_atcs { struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); - -/* GPU virtualization */ -#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) -#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) -struct amdgpu_virtualization { - bool supports_sr_iov; - bool is_virtual; - u32 caps; -}; - /* * Core structure, functions and helpers. */ @@ -1958,6 +1943,8 @@ struct amdgpu_ip_block_status { bool valid; bool sw; bool hw; + bool late_initialized; + bool hang; }; struct amdgpu_device { @@ -2016,6 +2003,8 @@ struct amdgpu_device { spinlock_t pcie_idx_lock; amdgpu_rreg_t pcie_rreg; amdgpu_wreg_t pcie_wreg; + amdgpu_rreg_t pciep_rreg; + amdgpu_wreg_t pciep_wreg; /* protects concurrent UVD register access */ spinlock_t uvd_ctx_idx_lock; amdgpu_rreg_t uvd_ctx_rreg; @@ -2056,7 +2045,16 @@ struct amdgpu_device { atomic64_t num_evictions; atomic_t gpu_reset_counter; + /* data for buffer migration throttling */ + struct { + spinlock_t lock; + s64 last_update_us; + s64 accum_us; /* accumulated microseconds */ + u32 log2_max_MBps; + } mm_stats; + /* display */ + bool enable_virtual_display; struct amdgpu_mode_info mode_info; struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; @@ -2119,6 +2117,14 @@ struct amdgpu_device { struct kfd_dev *kfd; struct amdgpu_virtualization virtualization; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; + /* link all gtt */ + spinlock_t gtt_list_lock; + struct list_head gtt_list; + }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2151,6 +2157,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) +#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) +#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) @@ -2194,6 +2202,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET_FIELD(value, reg, field) \ (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ @@ -2237,14 +2248,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) -#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) +#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) +#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) +#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) +#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) @@ -2259,9 +2273,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) +#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) +#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) +#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) +#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) @@ -2293,6 +2311,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) +#define amdgpu_dpm_read_sensor(adev, idx, value) \ + ((adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ + -EINVAL) + #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ @@ -2344,11 +2367,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ (adev)->pm.funcs->powergate_vce((adev), (g))) -#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ - ((adev)->pp_enabled ? \ - (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ - (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) - #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) @@ -2389,6 +2407,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_reset(struct amdgpu_device *adev); +bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); @@ -2397,7 +2416,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring); -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); +void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, @@ -2414,6 +2433,10 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); +int amdgpu_ttm_global_init(struct amdgpu_device *adev); +int amdgpu_ttm_init(struct amdgpu_device *adev); +void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); @@ -2425,11 +2448,13 @@ void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); +bool amdgpu_atpx_dgpu_req_power_for_displays(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } +static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } #endif /* @@ -2446,8 +2471,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv); -int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); -int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); +int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); +int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); @@ -2493,6 +2518,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } struct amdgpu_bo_va_mapping * amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo); +int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser); #include "amdgpu_object.h" #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 5cd7b736a9de..5796539a0bcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -25,6 +25,7 @@ #include <linux/acpi.h> #include <linux/slab.h> #include <linux/power_supply.h> +#include <linux/pm_runtime.h> #include <acpi/video.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev, #endif } } + if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { + if ((adev->flags & AMD_IS_PX) && + amdgpu_atpx_dgpu_req_power_for_displays()) { + pm_runtime_get_sync(adev->ddev->dev); + /* Just fire off a uevent and let userspace tell us what to do */ + drm_helper_hpd_irq_event(adev->ddev); + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + } + } /* TODO: check other events */ /* We've handled the event, stop the notifier chain. The ACPI interface diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d080d0807a5b..dba8a5b25e66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev) return r; } -u32 pool_to_domain(enum kgd_memory_pool p) -{ - switch (p) { - case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM; - default: return AMDGPU_GEM_DOMAIN_GTT; - } -} - int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 362bedc9e507..1a0a5f7cccbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int timeout); + unsigned int utimeout); static int kgd_address_watch_disable(struct kgd_dev *kgd); static int kgd_address_watch_execute(struct kgd_dev *kgd, unsigned int watch_point_id, @@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) } static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; + int timeout = utimeout; acquire_queue(kgd, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); @@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, temp = RREG32(mmCP_HQD_ACTIVE); if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) break; - if (timeout == 0) { - pr_err("kfd: cp queue preemption time out (%dms)\n", - temp); + if (timeout <= 0) { + pr_err("kfd: cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } @@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, } static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int timeout) + unsigned int utimeout) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_base_addr; uint32_t temp; + int timeout = utimeout; m = get_sdma_mqd(mqd); sdma_base_addr = get_sdma_base_addr(m); @@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) break; - if (timeout == 0) + if (timeout <= 0) return -ETIME; msleep(20); timeout -= 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 04b744d64b57..6697612239c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int timeout); + unsigned int utimeout); static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); static int kgd_address_watch_disable(struct kgd_dev *kgd); static int kgd_address_watch_execute(struct kgd_dev *kgd, @@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) } static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; + int timeout = utimeout; acquire_queue(kgd, pipe_id, queue_id); @@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, temp = RREG32(mmCP_HQD_ACTIVE); if (temp & CP_HQD_ACTIVE__ACTIVE_MASK) break; - if (timeout == 0) { - pr_err("kfd: cp queue preemption time out (%dms)\n", - temp); + if (timeout <= 0) { + pr_err("kfd: cp queue preemption time out.\n"); release_queue(kgd); return -ETIME; } @@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, } static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int timeout) + unsigned int utimeout) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_base_addr; uint32_t temp; + int timeout = utimeout; m = get_sdma_mqd(mqd); sdma_base_addr = get_sdma_base_addr(m); @@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) break; - if (timeout == 0) + if (timeout <= 0) return -ETIME; msleep(20); timeout -= 20; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index fe872b82e619..8e6bf548d689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -259,6 +259,33 @@ static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown }; +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 size, data_offset; + u8 frev, crev; + ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; + ATOM_OBJECT_HEADER *obj_header; + + if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + return false; + + if (crev < 2) + return false; + + obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); + path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) + (ctx->bios + data_offset + + le16_to_cpu(obj_header->usDisplayPathTableOffset)); + + if (path_obj->ucNumOfDispPath) + return true; + else + return false; +} + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; @@ -964,6 +991,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, return -EINVAL; switch (crev) { + case 2: + case 3: + case 5: + /* r6xx, r7xx, evergreen, ni, si. + * TODO: add support for asic_type <= CHIP_RV770*/ + if (clock_type == COMPUTE_ENGINE_PLL_PARAM) { + args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock); + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v3.ucPostDiv; + dividers->enable_post_div = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; + dividers->enable_dithen = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; + dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); + dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); + dividers->ref_div = args.v3.ucRefDiv; + dividers->vco_mode = (args.v3.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; + } else { + /* for SI we use ComputeMemoryClockParam for memory plls */ + if (adev->asic_type >= CHIP_TAHITI) + return -EINVAL; + args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock); + if (strobe_mode) + args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN; + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + + dividers->post_div = args.v5.ucPostDiv; + dividers->enable_post_div = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; + dividers->enable_dithen = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; + dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv); + dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac); + dividers->ref_div = args.v5.ucRefDiv; + dividers->vco_mode = (args.v5.ucCntlFlag & + ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0; + } + break; case 4: /* fusion */ args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */ @@ -1108,6 +1177,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); } +void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, + u16 *vddc, u16 *vddci, u16 *mvdd) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + u8 frev, crev; + u16 data_offset; + union firmware_info *firmware_info; + + *vddc = 0; + *vddci = 0; + *mvdd = 0; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); + if ((frev == 2) && (crev >= 2)) { + *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); + *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage); + } + } +} + union set_voltage { struct _SET_VOLTAGE_PS_ALLOCATION alloc; struct _SET_VOLTAGE_PARAMETERS v1; @@ -1115,6 +1210,52 @@ union set_voltage { struct _SET_VOLTAGE_PARAMETERS_V1_3 v3; }; +int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, + u16 voltage_id, u16 *voltage) +{ + union set_voltage args; + int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); + u8 frev, crev; + + if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) + return -EINVAL; + + switch (crev) { + case 1: + return -EINVAL; + case 2: + args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE; + args.v2.ucVoltageMode = 0; + args.v2.usVoltageLevel = 0; + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + + *voltage = le16_to_cpu(args.v2.usVoltageLevel); + break; + case 3: + args.v3.ucVoltageType = voltage_type; + args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL; + args.v3.usVoltageLevel = cpu_to_le16(voltage_id); + + amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); + + *voltage = le16_to_cpu(args.v3.usVoltageLevel); + break; + default: + DRM_ERROR("Unknown table version %d, %d\n", frev, crev); + return -EINVAL; + } + + return 0; +} + +int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev, + u16 *voltage, + u16 leakage_idx) +{ + return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); +} + void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, u16 voltage_level, u8 voltage_type) @@ -1335,6 +1476,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL return NULL; } +int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, + u8 voltage_type, + u8 *svd_gpio_id, u8 *svc_gpio_id) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (adev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 3: + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3, + voltage_type, + VOLTAGE_OBJ_SVID2); + if (voltage_object) { + *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId; + *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId; + } else { + return -EINVAL; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + + } + return 0; +} + bool amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev, u8 voltage_type, u8 voltage_mode) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8c2e69661799..17356151db38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device * uint8_t id); void amdgpu_atombios_i2c_init(struct amdgpu_device *adev); +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev); int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); @@ -206,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); - +int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, + u16 voltage_id, u16 *voltage); +int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev, + u16 *voltage, + u16 leakage_idx); +void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, + u16 *vddc, u16 *vddci, u16 *mvdd); +int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, + u8 clock_type, + u32 clock, + bool strobe_mode, + struct atom_clock_dividers *dividers); +int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, + u8 voltage_type, + u8 *svd_gpio_id, u8 *svc_gpio_id); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 10b5ddf2c588..dae35a96a694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -29,6 +29,7 @@ struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; bool is_hybrid; + bool dgpu_req_power_for_displays; }; static struct amdgpu_atpx_priv { @@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) { return amdgpu_atpx_priv.atpx.is_hybrid; } +bool amdgpu_atpx_dgpu_req_power_for_displays(void) { + return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -204,6 +209,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = true; } + atpx->dgpu_req_power_for_displays = false; + if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS) + atpx->dgpu_req_power_for_displays = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 33e47a43ae32..345305235349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, start_jiffies = jiffies; for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); + r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, + false); if (r) goto exit_do_move; r = fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index bc0440f7a31d..7a8bfa34682f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); } -int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, +static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state) { @@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, return r; } -int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, +static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_powergating_state state) { @@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode return -EINVAL; } +static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, + enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + uint16_t fw_version; + + switch (type) { + case CGS_UCODE_ID_SDMA0: + fw_version = adev->sdma.instance[0].fw_version; + break; + case CGS_UCODE_ID_SDMA1: + fw_version = adev->sdma.instance[1].fw_version; + break; + case CGS_UCODE_ID_CP_CE: + fw_version = adev->gfx.ce_fw_version; + break; + case CGS_UCODE_ID_CP_PFP: + fw_version = adev->gfx.pfp_fw_version; + break; + case CGS_UCODE_ID_CP_ME: + fw_version = adev->gfx.me_fw_version; + break; + case CGS_UCODE_ID_CP_MEC: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT1: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_CP_MEC_JT2: + fw_version = adev->gfx.mec_fw_version; + break; + case CGS_UCODE_ID_RLC_G: + fw_version = adev->gfx.rlc_fw_version; + break; + default: + DRM_ERROR("firmware type %d do not have version\n", type); + fw_version = 0; + } + return fw_version; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, info->mc_addr = gpu_addr; info->image_size = data_size; info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); + info->fw_version = amdgpu_get_firmware_version(cgs_device, type); info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); } else { char fw_name[30] = {0}; @@ -848,6 +890,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_GFX_SE_INFO: sys_info->value = adev->gfx.config.max_shader_engines; break; + case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID: + sys_info->value = adev->pdev->subsystem_device; + break; + case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: + sys_info->value = adev->pdev->subsystem_vendor; + break; default: return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ff0b55a65ca3..2e3a0543760d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -168,12 +168,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) } /* Any defined maximum tmds clock limit we must not exceed? */ - if (connector->max_tmds_clock > 0) { + if (connector->display_info.max_tmds_clock > 0) { /* mode_clock is clock in kHz for mode to be modeset on this connector */ mode_clock = amdgpu_connector->pixelclock_for_modeset; /* Maximum allowable input clock in kHz */ - max_tmds_clock = connector->max_tmds_clock * 1000; + max_tmds_clock = connector->display_info.max_tmds_clock; DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n", connector->name, mode_clock, max_tmds_clock); @@ -769,8 +769,10 @@ static void amdgpu_connector_destroy(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_connector->ddc_bus->has_aux) + if (amdgpu_connector->ddc_bus->has_aux) { drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); + amdgpu_connector->ddc_bus->has_aux = false; + } amdgpu_connector_free_edid(connector); kfree(amdgpu_connector->con_priv); drm_connector_unregister(connector); @@ -1504,6 +1506,88 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .force = amdgpu_connector_dvi_force, }; +static struct drm_encoder * +amdgpu_connector_virtual_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_encoder *encoder; + int i; + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; + + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) + continue; + + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + return encoder; + } + + /* pick the first one */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + return NULL; +} + +static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) +{ + struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); + + if (encoder) { + amdgpu_connector_add_common_modes(encoder, connector); + } + + return 0; +} + +static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static int +amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) +{ + return 0; +} + +static enum drm_connector_status + +amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static int +amdgpu_connector_virtual_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void amdgpu_connector_virtual_force(struct drm_connector *connector) +{ + return; +} + +static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { + .get_modes = amdgpu_connector_virtual_get_modes, + .mode_valid = amdgpu_connector_virtual_mode_valid, + .best_encoder = amdgpu_connector_virtual_encoder, +}; + +static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { + .dpms = amdgpu_connector_virtual_dpms, + .detect = amdgpu_connector_virtual_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = amdgpu_connector_virtual_set_property, + .destroy = amdgpu_connector_destroy, + .force = amdgpu_connector_virtual_force, +}; + void amdgpu_connector_add(struct amdgpu_device *adev, uint32_t connector_id, @@ -1888,6 +1972,17 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->interlace_allowed = false; connector->doublescan_allowed = false; break; + case DRM_MODE_CONNECTOR_VIRTUAL: + amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); + if (!amdgpu_dig_connector) + goto failed; + amdgpu_connector->con_priv = amdgpu_dig_connector; + drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); + drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); + subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0307ff5887c5..b0f6e6957536 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, uint32_t *offset) { struct drm_gem_object *gobj; + unsigned long size; gobj = drm_gem_object_lookup(p->filp, data->handle); if (gobj == NULL) @@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; + + size = amdgpu_bo_size(p->uf_entry.robj); + if (size != PAGE_SIZE || (data->offset + 8) > size) + return -EINVAL; + *offset = data->offset; drm_gem_object_unreference_unlocked(gobj); @@ -235,70 +241,212 @@ free_chunk: return ret; } -/* Returns how many bytes TTM can move per IB. +/* Convert microseconds to bytes. */ +static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) +{ + if (us <= 0 || !adev->mm_stats.log2_max_MBps) + return 0; + + /* Since accum_us is incremented by a million per second, just + * multiply it by the number of MB/s to get the number of bytes. + */ + return us << adev->mm_stats.log2_max_MBps; +} + +static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) +{ + if (!adev->mm_stats.log2_max_MBps) + return 0; + + return bytes >> adev->mm_stats.log2_max_MBps; +} + +/* Returns how many bytes TTM can move right now. If no bytes can be moved, + * it returns 0. If it returns non-zero, it's OK to move at least one buffer, + * which means it can go over the threshold once. If that happens, the driver + * will be in debt and no other buffer migrations can be done until that debt + * is repaid. + * + * This approach allows moving a buffer of any size (it's important to allow + * that). + * + * The currency is simply time in microseconds and it increases as the clock + * ticks. The accumulated microseconds (us) are converted to bytes and + * returned. */ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) { - u64 real_vram_size = adev->mc.real_vram_size; - u64 vram_usage = atomic64_read(&adev->vram_usage); + s64 time_us, increment_us; + u64 max_bytes; + u64 free_vram, total_vram, used_vram; - /* This function is based on the current VRAM usage. + /* Allow a maximum of 200 accumulated ms. This is basically per-IB + * throttling. * - * - If all of VRAM is free, allow relocating the number of bytes that - * is equal to 1/4 of the size of VRAM for this IB. + * It means that in order to get full max MBps, at least 5 IBs per + * second must be submitted and not more than 200ms apart from each + * other. + */ + const s64 us_upper_bound = 200000; - * - If more than one half of VRAM is occupied, only allow relocating - * 1 MB of data for this IB. - * - * - From 0 to one half of used VRAM, the threshold decreases - * linearly. - * __________________ - * 1/4 of -|\ | - * VRAM | \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | \________|1 MB - * |----------------| - * VRAM 0 % 100 % - * used used - * - * Note: It's a threshold, not a limit. The threshold must be crossed - * for buffer relocations to stop, so any buffer of an arbitrary size - * can be moved as long as the threshold isn't crossed before - * the relocation takes place. We don't want to disable buffer - * relocations completely. + if (!adev->mm_stats.log2_max_MBps) + return 0; + + total_vram = adev->mc.real_vram_size - adev->vram_pin_size; + used_vram = atomic64_read(&adev->vram_usage); + free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; + + spin_lock(&adev->mm_stats.lock); + + /* Increase the amount of accumulated us. */ + time_us = ktime_to_us(ktime_get()); + increment_us = time_us - adev->mm_stats.last_update_us; + adev->mm_stats.last_update_us = time_us; + adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, + us_upper_bound); + + /* This prevents the short period of low performance when the VRAM + * usage is low and the driver is in debt or doesn't have enough + * accumulated us to fill VRAM quickly. * - * The idea is that buffers should be placed in VRAM at creation time - * and TTM should only do a minimum number of relocations during - * command submission. In practice, you need to submit at least - * a dozen IBs to move all buffers to VRAM if they are in GTT. + * The situation can occur in these cases: + * - a lot of VRAM is freed by userspace + * - the presence of a big buffer causes a lot of evictions + * (solution: split buffers into smaller ones) * - * Also, things can get pretty crazy under memory pressure and actual - * VRAM usage can change a lot, so playing safe even at 50% does - * consistently increase performance. + * If 128 MB or 1/8th of VRAM is free, start filling it now by setting + * accum_us to a positive number. + */ + if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { + s64 min_us; + + /* Be more aggresive on dGPUs. Try to fill a portion of free + * VRAM now. + */ + if (!(adev->flags & AMD_IS_APU)) + min_us = bytes_to_us(adev, free_vram / 4); + else + min_us = 0; /* Reset accum_us on APUs. */ + + adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); + } + + /* This returns 0 if the driver is in debt to disallow (optional) + * buffer moves. + */ + max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); + + spin_unlock(&adev->mm_stats.lock); + return max_bytes; +} + +/* Report how many bytes have really been moved for the last command + * submission. This can result in a debt that can stop buffer migrations + * temporarily. + */ +static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, + u64 num_bytes) +{ + spin_lock(&adev->mm_stats.lock); + adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); + spin_unlock(&adev->mm_stats.lock); +} + +static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, + struct amdgpu_bo *bo) +{ + u64 initial_bytes_moved; + uint32_t domain; + int r; + + if (bo->pin_count) + return 0; + + /* Don't move this buffer if we have depleted our allowance + * to move it. Don't move anything if the threshold is zero. */ + if (p->bytes_moved < p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + +retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; + } - u64 half_vram = real_vram_size >> 1; - u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; - u64 bytes_moved_threshold = half_free_vram >> 1; - return max(bytes_moved_threshold, 1024*1024ull); + return r; } -int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, +/* Last resort, try to evict something from the current working set */ +static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, + struct amdgpu_bo_list_entry *lobj) +{ + uint32_t domain = lobj->robj->allowed_domains; + int r; + + if (!p->evictable) + return false; + + for (;&p->evictable->tv.head != &p->validated; + p->evictable = list_prev_entry(p->evictable, tv.head)) { + + struct amdgpu_bo_list_entry *candidate = p->evictable; + struct amdgpu_bo *bo = candidate->robj; + u64 initial_bytes_moved; + uint32_t other; + + /* If we reached our current BO we can forget it */ + if (candidate == lobj) + break; + + other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + + /* Check if this BO is in one of the domains we need space for */ + if (!(other & domain)) + continue; + + /* Check if we can move this BO somewhere else */ + other = bo->allowed_domains & ~domain; + if (!other) + continue; + + /* Good we can try to move this BO somewhere else */ + amdgpu_ttm_placement_from_domain(bo, other); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) + break; + + p->evictable = list_prev_entry(p->evictable, tv.head); + list_move(&candidate->tv.head, &p->validated); + + return true; + } + + return false; +} + +static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = lobj->robj; bool binding_userptr = false; struct mm_struct *usermm; - uint32_t domain; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm && usermm != current->mm) @@ -313,35 +461,19 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, binding_userptr = true; } - if (bo->pin_count) - continue; - - /* Avoid moving this one if we have moved too many buffers - * for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if (p->bytes_moved <= p->bytes_moved_threshold) - domain = bo->prefered_domains; - else - domain = bo->allowed_domains; - - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - - initial_bytes_moved; + if (p->evictable == lobj) + p->evictable = NULL; - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } + do { + r = amdgpu_cs_bo_validate(p, bo); + } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj)); + if (r) return r; + + if (bo->shadow) { + r = amdgpu_cs_bo_validate(p, bo); + if (r) + return r; } if (binding_userptr) { @@ -386,8 +518,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); goto error_free_pages; + } /* Without a BO list we don't have userptr BOs */ if (!p->bo_list) @@ -427,9 +561,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, /* Unreserve everything again. */ ttm_eu_backoff_reservation(&p->ticket, &p->validated); - /* We tried to often, just abort */ + /* We tried too many times, just abort */ if (!--tries) { r = -EDEADLK; + DRM_ERROR("deadlock in %s\n", __func__); goto error_free_pages; } @@ -441,11 +576,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, sizeof(struct page*)); if (!e->user_pages) { r = -ENOMEM; + DRM_ERROR("calloc failure in %s\n", __func__); goto error_free_pages; } r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); if (r) { + DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); drm_free_large(e->user_pages); e->user_pages = NULL; goto error_free_pages; @@ -460,14 +597,23 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved = 0; + p->evictable = list_last_entry(&p->validated, + struct amdgpu_bo_list_entry, + tv.head); r = amdgpu_cs_list_validate(p, &duplicates); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); goto error_validate; + } r = amdgpu_cs_list_validate(p, &p->validated); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); goto error_validate; + } + + amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved); fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); @@ -499,8 +645,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } } - if (p->uf_entry.robj) - p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); + if (!r && p->uf_entry.robj) { + struct amdgpu_bo *uf = p->uf_entry.robj; + + r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem); + p->job->uf_addr += amdgpu_bo_gpu_offset(uf); + } error_validate: if (r) { @@ -617,7 +767,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; @@ -710,6 +860,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; + if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { + parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; + if (!parser->ctx->preamble_presented) { + parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; + parser->ctx->preamble_presented = true; + } + } + if (parser->job->ring && parser->job->ring != ring) return -EINVAL; @@ -849,7 +1007,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, } job->owner = p->filp; - job->ctx = entity->fence_context; + job->fence_ctx = entity->fence_context; p->fence = fence_get(&job->base.s_fence->finished); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; @@ -1015,3 +1173,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, return NULL; } + +/** + * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM + * + * @parser: command submission parser context + * + * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM. + */ +int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) +{ + unsigned i; + int r; + + if (!parser->bo_list) + return 0; + + for (i = 0; i < parser->bo_list->num_entries; i++) { + struct amdgpu_bo *bo = parser->bo_list->array[i].robj; + + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (unlikely(r)) + return r; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 17e13621fae9..e203e5561107 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -60,6 +60,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) amd_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx->fences); + ctx->fences = NULL; return r; } return 0; @@ -77,6 +78,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) for (j = 0; j < amdgpu_sched_jobs; ++j) fence_put(ctx->rings[i].fences[j]); kfree(ctx->fences); + ctx->fences = NULL; for (i = 0; i < adev->num_rings; i++) amd_sched_entity_fini(&adev->rings[i]->sched, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39c01b942ee4..7dbe85d67d26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -41,16 +41,26 @@ #include "atom.h" #include "amdgpu_atombios.h" #include "amd_pcie.h" +#ifdef CONFIG_DRM_AMDGPU_SI +#include "si.h" +#endif #ifdef CONFIG_DRM_AMDGPU_CIK #include "cik.h" #endif #include "vi.h" #include "bif/bif_4_1_d.h" +#include <linux/pci.h> +#include <linux/firmware.h> static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { + "TAHITI", + "PITCAIRN", + "VERDE", + "OLAND", + "HAINAN", "BONAIRE", "KAVERI", "KABINI", @@ -101,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -642,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev) } +static bool amdgpu_vpost_needed(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return false; + + if (amdgpu_passthrough(adev)) { + /* for FIJI: In whole GPU pass-through virtualization case + * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) + * so amdgpu_card_posted return false and driver will incorrectly skip vPost. + * but if we force vPost do in pass-through case, the driver reload will hang. + * whether doing vPost depends on amdgpu_card_posted if smc version is above + * 00160e00 for FIJI. + */ + if (adev->asic_type == CHIP_FIJI) { + int err; + uint32_t fw_ver; + err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); + /* force vPost if error occured */ + if (err) + return true; + + fw_ver = *((uint32_t *)adev->pm.fw->data + 69); + if (fw_ver >= 0x00160e00) + return !amdgpu_card_posted(adev); + } + } else { + /* in bare-metal case, amdgpu_card_posted return false + * after system reboot/boot, and return true if driver + * reloaded. + * we shouldn't do vPost after driver reload otherwise GPU + * could hang. + */ + if (amdgpu_card_posted(adev)) + return false; + } + + /* we assume vPost is neede for all other cases */ + return true; +} + /** * amdgpu_dummy_page_init - init dummy page used by the driver * @@ -1026,7 +1076,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_resume_kms(dev, true, true); + amdgpu_device_resume(dev, true, true); dev->pdev->d3_delay = d3_delay; @@ -1036,7 +1086,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero printk(KERN_INFO "amdgpu: switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_suspend_kms(dev, true, true); + amdgpu_device_suspend(dev, true, true); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1181,10 +1231,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, return 1; } +static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) +{ + adev->enable_virtual_display = false; + + if (amdgpu_virtual_display) { + struct drm_device *ddev = adev->ddev; + const char *pci_address_name = pci_name(ddev->pdev); + char *pciaddstr, *pciaddstr_tmp, *pciaddname; + + pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); + pciaddstr_tmp = pciaddstr; + while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { + if (!strcmp(pci_address_name, pciaddname)) { + adev->enable_virtual_display = true; + break; + } + } + + DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display); + + kfree(pciaddstr); + } +} + static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; + amdgpu_whether_enable_virtual_display(adev); + switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: @@ -1202,6 +1280,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev) if (r) return r; break; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + case CHIP_HAINAN: + adev->family = AMDGPU_FAMILY_SI; + r = si_set_ip_blocks(adev); + if (r) + return r; + break; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: @@ -1318,6 +1408,9 @@ static int amdgpu_late_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_block_status[i].valid) continue; + if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD || + adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE) + continue; /* enable clockgating to save power */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, AMD_CG_STATE_GATE); @@ -1331,6 +1424,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); return r; } + adev->ip_block_status[i].late_initialized = true; } } @@ -1376,8 +1470,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (!adev->ip_block_status[i].late_initialized) + continue; if (adev->ip_blocks[i].funcs->late_fini) adev->ip_blocks[i].funcs->late_fini((void *)adev); + adev->ip_block_status[i].late_initialized = false; } return 0; @@ -1433,13 +1530,10 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } -static bool amdgpu_device_is_virtual(void) +static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { -#ifdef CONFIG_X86 - return boot_cpu_has(X86_FEATURE_HYPERVISOR); -#else - return false; -#endif + if (amdgpu_atombios_has_gpu_virtualization_table(adev)) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } /** @@ -1461,6 +1555,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, { int r, i; bool runtime = false; + u32 max_MBps; adev->shutdown = false; adev->dev = &pdev->dev; @@ -1484,6 +1579,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->smc_wreg = &amdgpu_invalid_wreg; adev->pcie_rreg = &amdgpu_invalid_rreg; adev->pcie_wreg = &amdgpu_invalid_wreg; + adev->pciep_rreg = &amdgpu_invalid_rreg; + adev->pciep_wreg = &amdgpu_invalid_wreg; adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; @@ -1520,9 +1617,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); + spin_lock_init(&adev->mm_stats.lock); + + INIT_LIST_HEAD(&adev->shadow_list); + mutex_init(&adev->shadow_list_lock); + + INIT_LIST_HEAD(&adev->gtt_list); + spin_lock_init(&adev->gtt_list_lock); + + if (adev->asic_type >= CHIP_BONAIRE) { + adev->rmmio_base = pci_resource_start(adev->pdev, 5); + adev->rmmio_size = pci_resource_len(adev->pdev, 5); + } else { + adev->rmmio_base = pci_resource_start(adev->pdev, 2); + adev->rmmio_size = pci_resource_len(adev->pdev, 2); + } - adev->rmmio_base = pci_resource_start(adev->pdev, 5); - adev->rmmio_size = pci_resource_len(adev->pdev, 5); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -1530,8 +1640,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* doorbell bar mapping */ - amdgpu_doorbell_init(adev); + if (adev->asic_type >= CHIP_BONAIRE) + /* doorbell bar mapping */ + amdgpu_doorbell_init(adev); /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -1579,25 +1690,24 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } - /* See if the asic supports SR-IOV */ - adev->virtualization.supports_sr_iov = - amdgpu_atombios_has_gpu_virtualization_table(adev); - - /* Check if we are executing in a virtualized environment */ - adev->virtualization.is_virtual = amdgpu_device_is_virtual(); - adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* detect if we are with an SRIOV vbios */ + amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (!amdgpu_card_posted(adev) || - (adev->virtualization.is_virtual && - !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { + if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); + dev_err(adev->dev, "no vBIOS found\n"); r = -EINVAL; goto failed; } - DRM_INFO("GPU not posted. posting now...\n"); - amdgpu_atom_asic_init(adev->mode_info.atom_context); + DRM_INFO("GPU posting now...\n"); + r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (r) { + dev_err(adev->dev, "gpu post error!\n"); + goto failed; + } + } else { + DRM_INFO("GPU post is not needed\n"); } /* Initialize clocks */ @@ -1628,6 +1738,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->accel_working = true; + /* Initialize the buffer migration limit. */ + if (amdgpu_moverate >= 0) + max_MBps = amdgpu_moverate; + else + max_MBps = 8; /* Allow 8 MB/s. */ + /* Get a log2 for easy divisions. */ + adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); + amdgpu_fbdev_init(adev); r = amdgpu_ib_pool_init(adev); @@ -1732,7 +1850,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rio_mem = NULL; iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); + if (adev->asic_type >= CHIP_BONAIRE) + amdgpu_doorbell_fini(adev); amdgpu_debugfs_regs_cleanup(adev); amdgpu_debugfs_remove_files(adev); } @@ -1742,7 +1861,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Suspend & resume. */ /** - * amdgpu_suspend_kms - initiate device suspend + * amdgpu_device_suspend - initiate device suspend * * @pdev: drm dev pointer * @state: suspend state @@ -1751,7 +1870,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) { struct amdgpu_device *adev; struct drm_crtc *crtc; @@ -1819,6 +1938,10 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); + } else { + r = amdgpu_asic_reset(adev); + if (r) + DRM_ERROR("amdgpu asic reset failed\n"); } if (fbcon) { @@ -1830,7 +1953,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) } /** - * amdgpu_resume_kms - initiate device resume + * amdgpu_device_resume - initiate device resume * * @pdev: drm dev pointer * @@ -1838,7 +1961,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; struct amdgpu_device *adev = dev->dev_private; @@ -1848,22 +1971,26 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { + if (fbcon) console_lock(); - } + if (resume) { pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); - if (pci_enable_device(dev->pdev)) { + r = pci_enable_device(dev->pdev); + if (r) { if (fbcon) console_unlock(); - return -1; + return r; } } /* post card */ - if (!amdgpu_card_posted(adev)) - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!amdgpu_card_posted(adev) || !resume) { + r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (r) + DRM_ERROR("amdgpu asic init failed\n"); + } r = amdgpu_resume(adev); if (r) @@ -1937,6 +2064,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) return 0; } +static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +{ + int i; + bool asic_hang = false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].funcs->check_soft_reset) + adev->ip_blocks[i].funcs->check_soft_reset(adev); + if (adev->ip_block_status[i].hang) { + DRM_INFO("IP block:%d is hang!\n", i); + asic_hang = true; + } + } + return asic_hang; +} + +static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->pre_soft_reset) { + r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +{ + if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + return false; +} + +static int amdgpu_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->soft_reset) { + r = adev->ip_blocks[i].funcs->soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->post_soft_reset) + r = adev->ip_blocks[i].funcs->post_soft_reset(adev); + if (r) + return r; + } + + return 0; +} + +bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_lockup_timeout > 0 ? true : false; +} + +static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct fence **fence) +{ + uint32_t domain; + int r; + + if (!bo->shadow) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (r) + return r; + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + /* if bo has been evicted, then no need to recover */ + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + r = amdgpu_bo_restore_from_shadow(adev, ring, bo, + NULL, fence, true); + if (r) { + DRM_ERROR("recover page table failed!\n"); + goto err; + } + } +err: + amdgpu_bo_unreserve(bo); + return r; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1949,6 +2196,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) { int i, r; int resched; + bool need_full_reset; + + if (!amdgpu_check_soft_reset(adev)) { + DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); + return 0; + } atomic_inc(&adev->gpu_reset_counter); @@ -1967,40 +2220,93 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(adev); - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_suspend(adev); + need_full_reset = amdgpu_need_full_reset(adev); -retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } } - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (need_full_reset) { + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume(adev); +retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + + r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume(adev); + } + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); } - /* restore scratch */ - amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); + if (need_full_reset && amdgpu_need_backup(adev)) { + r = amdgpu_ttm_recover_gart(adev); + if (r) + DRM_ERROR("gart recovery failed!!!\n"); + } r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); r = amdgpu_suspend(adev); + need_full_reset = true; goto retry; } + /** + * recovery vm page tables, since we cannot depend on VRAM is + * consistent after gpu full reset. + */ + if (need_full_reset && amdgpu_need_backup(adev)) { + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_bo *bo, *tmp; + struct fence *fence = NULL, *next = NULL; + + DRM_INFO("recover vram bo from shadow\n"); + mutex_lock(&adev->shadow_list_lock); + list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { + amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); + if (fence) { + r = fence_wait(fence, false); + if (r) { + WARN(r, "recovery from shadow isn't comleted\n"); + break; + } + } + fence_put(fence); + fence = next; + } + mutex_unlock(&adev->shadow_list_lock); + if (fence) { + r = fence_wait(fence, false); + if (r) + WARN(r, "recovery from shadow isn't comleted\n"); + } + fence_put(fence); + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } @@ -2020,7 +2326,6 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } - amdgpu_irq_gpu_reset_resume_helper(adev); return r; } @@ -2178,22 +2483,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; - bool use_bank; + bool pm_pg_lock, use_bank; unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + if (*pos & (1ULL << 62)) { se_bank = (*pos >> 24) & 0x3FF; sh_bank = (*pos >> 34) & 0x3FF; instance_bank = (*pos >> 44) & 0x3FF; use_bank = 1; - *pos &= 0xFFFFFF; } else { use_bank = 0; } + *pos &= 0x3FFFF; + if (use_bank) { if (sh_bank >= adev->gfx.config.max_sh_per_se || se_bank >= adev->gfx.config.max_shader_engines) @@ -2203,6 +2512,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, sh_bank, instance_bank); } + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + while (size) { uint32_t value; @@ -2228,6 +2540,9 @@ end: mutex_unlock(&adev->grbm_idx_mutex); } + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + return result; } @@ -2385,7 +2700,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_SMC(*pos >> 2); + value = RREG32_SMC(*pos); r = put_user(value, (uint32_t *)buf); if (r) return r; @@ -2416,7 +2731,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * if (r) return r; - WREG32_SMC(*pos >> 2, value); + WREG32_SMC(*pos, value); result += 4; buf += 4; @@ -2438,12 +2753,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; - config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); if (!config) return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 0; + config[no_regs++] = 2; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2468,6 +2783,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->gfx.config.gb_addr_config; config[no_regs++] = adev->gfx.config.num_rbs; + /* rev==1 */ + config[no_regs++] = adev->rev_id; + config[no_regs++] = adev->pg_flags; + config[no_regs++] = adev->cg_flags; + + /* rev==2 */ + config[no_regs++] = adev->family; + config[no_regs++] = adev->external_rev_id; + while (size && (*pos < no_regs * 4)) { uint32_t value; @@ -2488,6 +2812,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return result; } +static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + int idx, r; + int32_t value; + + if (size != 4 || *pos & 0x3) + return -EINVAL; + + /* convert offset to sensor number */ + idx = *pos >> 2; + + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) + r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value); + else + return -EINVAL; + + if (!r) + r = put_user(value, (int32_t *)buf); + + return !r ? 4 : r; +} static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, @@ -2520,12 +2867,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_sensors_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_sensor_read, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, &amdgpu_debugfs_gca_config_fops, + &amdgpu_debugfs_sensors_fops, }; static const char *debugfs_regs_names[] = { @@ -2534,6 +2888,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_regs_pcie", "amdgpu_regs_smc", "amdgpu_gca_config", + "amdgpu_sensors", }; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 76f96028313d..083e2b429872 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) container_of(cb, struct amdgpu_flip_work, cb); fence_put(f); - schedule_work(&work->flip_work); + schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, @@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static void amdgpu_flip_work_func(struct work_struct *__work) { + struct delayed_work *delayed_work = + container_of(__work, struct delayed_work, work); struct amdgpu_flip_work *work = - container_of(__work, struct amdgpu_flip_work, flip_work); + container_of(delayed_work, struct amdgpu_flip_work, flip_work); struct amdgpu_device *adev = work->adev; struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; - unsigned i, repcnt = 4; - int vpos, hpos, stat, min_udelay = 0; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + unsigned i; + int vpos, hpos; if (amdgpu_flip_handle_fence(work, &work->excl)) return; @@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work) if (amdgpu_flip_handle_fence(work, &work->shared[i])) return; - /* We borrow the event spin lock for protecting flip_status */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for - * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip */ - while (amdgpuCrtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (amdgpuCrtc->enabled && + (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { + schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); + return; } - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); /* Do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); @@ -154,25 +123,25 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) int r; /* unpin of the old buffer */ - r = amdgpu_bo_reserve(work->old_rbo, false); + r = amdgpu_bo_reserve(work->old_abo, false); if (likely(r == 0)) { - r = amdgpu_bo_unpin(work->old_rbo); + r = amdgpu_bo_unpin(work->old_abo); if (unlikely(r != 0)) { DRM_ERROR("failed to unpin buffer after flip\n"); } - amdgpu_bo_unreserve(work->old_rbo); + amdgpu_bo_unreserve(work->old_abo); } else DRM_ERROR("failed to reserve buffer after flip\n"); - amdgpu_bo_unref(&work->old_rbo); + amdgpu_bo_unref(&work->old_abo); kfree(work->shared); kfree(work); } -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -181,7 +150,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, struct amdgpu_framebuffer *new_amdgpu_fb; struct drm_gem_object *obj; struct amdgpu_flip_work *work; - struct amdgpu_bo *new_rbo; + struct amdgpu_bo *new_abo; unsigned long flags; u64 tiling_flags; u64 base; @@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, if (work == NULL) return -ENOMEM; - INIT_WORK(&work->flip_work, amdgpu_flip_work_func); + INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); work->event = event; @@ -204,28 +173,28 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, obj = old_amdgpu_fb->obj; /* take a reference to the old object */ - work->old_rbo = gem_to_amdgpu_bo(obj); - amdgpu_bo_ref(work->old_rbo); + work->old_abo = gem_to_amdgpu_bo(obj); + amdgpu_bo_ref(work->old_abo); new_amdgpu_fb = to_amdgpu_framebuffer(fb); obj = new_amdgpu_fb->obj; - new_rbo = gem_to_amdgpu_bo(obj); + new_abo = gem_to_amdgpu_bo(obj); /* pin the new buffer */ - r = amdgpu_bo_reserve(new_rbo, false); + r = amdgpu_bo_reserve(new_abo, false); if (unlikely(r != 0)) { - DRM_ERROR("failed to reserve new rbo buffer before flip\n"); + DRM_ERROR("failed to reserve new abo buffer before flip\n"); goto cleanup; } - r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); + r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); if (unlikely(r != 0)) { r = -EINVAL; - DRM_ERROR("failed to pin new rbo buffer before flip\n"); + DRM_ERROR("failed to pin new abo buffer before flip\n"); goto unreserve; } - r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, + r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { @@ -233,16 +202,12 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, goto unpin; } - amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); - amdgpu_bo_unreserve(new_rbo); + amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); + amdgpu_bo_unreserve(new_abo); work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; @@ -262,26 +227,23 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - amdgpu_flip_work_func(&work->flip_work); + amdgpu_flip_work_func(&work->flip_work.work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: - if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { - DRM_ERROR("failed to reserve new rbo in error path\n"); + if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { + DRM_ERROR("failed to reserve new abo in error path\n"); goto cleanup; } unpin: - if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { - DRM_ERROR("failed to unpin new rbo in error path\n"); + if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { + DRM_ERROR("failed to unpin new abo in error path\n"); } unreserve: - amdgpu_bo_unreserve(new_rbo); + amdgpu_bo_unreserve(new_abo); cleanup: - amdgpu_bo_unref(&work->old_rbo); + amdgpu_bo_unref(&work->old_abo); fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) fence_put(work->shared[i]); @@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) return ret; } -static const char *encoder_names[38] = { +static const char *encoder_names[41] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -374,6 +336,9 @@ static const char *encoder_names[38] = { "TRAVIS", "INTERNAL_VCE", "INTERNAL_UNIPHY3", + "HDMI_ANX9805", + "INTERNAL_AMCLK", + "VIRTUAL", }; static const char *hpd_names[6] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9aa533cf4ad1..71ed27eb3dde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -53,13 +53,19 @@ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. * - 3.3.0 - Add VM support for UVD on supported hardware. + * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. + * - 3.5.0 - Add support for new UVD_NO_OP register. + * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. + * - 3.7.0 - Add support for VCE clock list packet + * - 3.8.0 - Add support raster config init in the kernel */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 3 +#define KMS_DRIVER_MINOR 8 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; int amdgpu_gart_size = -1; /* auto */ +int amdgpu_moverate = -1; /* auto */ int amdgpu_benchmarking = 0; int amdgpu_testing = 0; int amdgpu_audio = -1; @@ -84,11 +90,14 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; int amdgpu_powercontainment = 1; +int amdgpu_sclk_deep_sleep_en = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; char *amdgpu_disable_cu = NULL; +char *amdgpu_virtual_display = NULL; +unsigned amdgpu_pp_feature_mask = 0xffffffff; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -96,6 +105,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)"); module_param_named(gartsize, amdgpu_gart_size, int, 0600); +MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)"); +module_param_named(moverate, amdgpu_moverate, int, 0600); + MODULE_PARM_DESC(benchmark, "Run benchmark"); module_param_named(benchmark, amdgpu_benchmarking, int, 0444); @@ -162,13 +174,17 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); -#ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)"); module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); -#endif + +MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); +module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); + +MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)"); +module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444); MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); @@ -185,7 +201,84 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); +MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); +module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); + static const struct pci_device_id pciidlist[] = { +#ifdef CONFIG_DRM_AMDGPU_SI + {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, + {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, + {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, + {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY}, + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN}, + {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, + {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, + {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, + {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY}, + {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND}, + {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY}, + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE}, + {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, + {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, + {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, + {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, + {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, + {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, +#endif #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, @@ -341,7 +434,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev) #ifdef CONFIG_X86 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary); kfree(ap); return 0; @@ -383,32 +476,70 @@ amdgpu_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } +static void +amdgpu_pci_shutdown(struct pci_dev *pdev) +{ + /* if we are running in a VM, make sure the device + * torn down properly on reboot/shutdown. + * unfortunately we can't detect certain + * hypervisors so just do this all the time. + */ + amdgpu_pci_remove(pdev); +} + static int amdgpu_pmops_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); - return amdgpu_suspend_kms(drm_dev, true, true); + return amdgpu_device_suspend(drm_dev, true, true); } static int amdgpu_pmops_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - return amdgpu_resume_kms(drm_dev, true, true); + + /* GPU comes up enabled by the bios on resume */ + if (amdgpu_device_is_px(drm_dev)) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return amdgpu_device_resume(drm_dev, true, true); } static int amdgpu_pmops_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); - return amdgpu_suspend_kms(drm_dev, false, true); + return amdgpu_device_suspend(drm_dev, false, true); } static int amdgpu_pmops_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + + struct drm_device *drm_dev = pci_get_drvdata(pdev); + return amdgpu_device_resume(drm_dev, false, true); +} + +static int amdgpu_pmops_poweroff(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + struct drm_device *drm_dev = pci_get_drvdata(pdev); + return amdgpu_device_suspend(drm_dev, true, true); +} + +static int amdgpu_pmops_restore(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); - return amdgpu_resume_kms(drm_dev, false, true); + return amdgpu_device_resume(drm_dev, false, true); } static int amdgpu_pmops_runtime_suspend(struct device *dev) @@ -426,7 +557,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) drm_kms_helper_poll_disable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); - ret = amdgpu_suspend_kms(drm_dev, false, false); + ret = amdgpu_device_suspend(drm_dev, false, false); pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); @@ -459,7 +590,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = amdgpu_resume_kms(drm_dev, false, false); + ret = amdgpu_device_resume(drm_dev, false, false); drm_kms_helper_poll_enable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; @@ -513,8 +644,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = { .resume = amdgpu_pmops_resume, .freeze = amdgpu_pmops_freeze, .thaw = amdgpu_pmops_thaw, - .poweroff = amdgpu_pmops_freeze, - .restore = amdgpu_pmops_resume, + .poweroff = amdgpu_pmops_poweroff, + .restore = amdgpu_pmops_restore, .runtime_suspend = amdgpu_pmops_runtime_suspend, .runtime_resume = amdgpu_pmops_runtime_resume, .runtime_idle = amdgpu_pmops_runtime_idle, @@ -596,6 +727,7 @@ static struct pci_driver amdgpu_kms_pci_driver = { .id_table = pciidlist, .probe = amdgpu_pci_probe, .remove = amdgpu_pci_remove, + .shutdown = amdgpu_pci_shutdown, .driver.pm = &amdgpu_pm_ops, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 919146780a15..9fb8aa4d6bae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -25,7 +25,7 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> +#include <linux/pm_runtime.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> @@ -48,8 +48,35 @@ struct amdgpu_fbdev { struct amdgpu_device *adev; }; +static int +amdgpufb_open(struct fb_info *info, int user) +{ + struct amdgpu_fbdev *rfbdev = info->par; + struct amdgpu_device *adev = rfbdev->adev; + int ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0 && ret != -EACCES) { + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return ret; + } + return 0; +} + +static int +amdgpufb_release(struct fb_info *info, int user) +{ + struct amdgpu_fbdev *rfbdev = info->par; + struct amdgpu_device *adev = rfbdev->adev; + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + return 0; +} + static struct fb_ops amdgpufb_ops = { .owner = THIS_MODULE, + .fb_open = amdgpufb_open, + .fb_release = amdgpufb_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, @@ -88,14 +115,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) { - struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj); + struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); int ret; - ret = amdgpu_bo_reserve(rbo, false); + ret = amdgpu_bo_reserve(abo, false); if (likely(ret == 0)) { - amdgpu_bo_kunmap(rbo); - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_kunmap(abo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } drm_gem_object_unreference_unlocked(gobj); } @@ -106,7 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, { struct amdgpu_device *adev = rfbdev->adev; struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *rbo = NULL; + struct amdgpu_bo *abo = NULL; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; int ret; @@ -132,30 +159,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, aligned_size); return -ENOMEM; } - rbo = gem_to_amdgpu_bo(gobj); + abo = gem_to_amdgpu_bo(gobj); if (fb_tiled) tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); - ret = amdgpu_bo_reserve(rbo, false); + ret = amdgpu_bo_reserve(abo, false); if (unlikely(ret != 0)) goto out_unref; if (tiling_flags) { - ret = amdgpu_bo_set_tiling_flags(rbo, + ret = amdgpu_bo_set_tiling_flags(abo, tiling_flags); if (ret) dev_err(adev->dev, "FB failed to set tiling flags\n"); } - ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); + ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL); if (ret) { - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unreserve(abo); goto out_unref; } - ret = amdgpu_bo_kmap(rbo, NULL); - amdgpu_bo_unreserve(rbo); + ret = amdgpu_bo_kmap(abo, NULL); + amdgpu_bo_unreserve(abo); if (ret) { goto out_unref; } @@ -177,7 +204,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *rbo = NULL; + struct amdgpu_bo *abo = NULL; int ret; unsigned long tmp; @@ -196,7 +223,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, return ret; } - rbo = gem_to_amdgpu_bo(gobj); + abo = gem_to_amdgpu_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = drm_fb_helper_alloc_fbi(helper); @@ -219,7 +246,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, /* setup helper */ rfbdev->helper.fb = fb; - memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo)); + memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo)); strcpy(info->fix.id, "amdgpudrmfb"); @@ -228,11 +255,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &amdgpufb_ops; - tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start; + tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; info->fix.smem_start = adev->mc.aper_base + tmp; - info->fix.smem_len = amdgpu_bo_size(rbo); - info->screen_base = rbo->kptr; - info->screen_size = amdgpu_bo_size(rbo); + info->fix.smem_len = amdgpu_bo_size(abo); + info->screen_base = abo->kptr; + info->screen_size = amdgpu_bo_size(abo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); @@ -249,7 +276,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); - DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo)); + DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); @@ -259,7 +286,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unref: - if (rbo) { + if (abo) { } if (fb && ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 0b109aebfec6..3a2e42f4b897 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -454,6 +454,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); + ring->fence_drv.fences = NULL; ring->fence_drv.initialized = false; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 0feea347f680..21a1242fc13b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -238,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_AMDGPU_GART_DEBUGFS +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS adev->gart.pages[p] = NULL; #endif page_base = adev->dummy_page.addr; @@ -286,7 +286,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { -#ifdef CONFIG_AMDGPU_GART_DEBUGFS +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS adev->gart.pages[p] = pagelist[i]; #endif if (adev->gart.ptr) { @@ -331,7 +331,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev) DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); -#ifdef CONFIG_AMDGPU_GART_DEBUGFS +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS /* Allocate pages table */ adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); if (adev->gart.pages == NULL) { @@ -357,7 +357,7 @@ void amdgpu_gart_fini(struct amdgpu_device *adev) amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); } adev->gart.ready = false; -#ifdef CONFIG_AMDGPU_GART_DEBUGFS +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS vfree(adev->gart.pages); adev->gart.pages = NULL; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index 503d54098128..e73728d90388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -31,14 +31,6 @@ #define AMDGPU_GWS_SHIFT PAGE_SHIFT #define AMDGPU_OA_SHIFT PAGE_SHIFT -#define AMDGPU_PL_GDS TTM_PL_PRIV0 -#define AMDGPU_PL_GWS TTM_PL_PRIV1 -#define AMDGPU_PL_OA TTM_PL_PRIV2 - -#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 -#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 -#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 - struct amdgpu_ring; struct amdgpu_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 88fbed2389c0..a7ea9a3b454e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) */ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = rbo->adev; + struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = abo->adev; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; int r; - r = amdgpu_bo_reserve(rbo, false); + r = amdgpu_bo_reserve(abo, false); if (r) return r; - bo_va = amdgpu_vm_bo_find(vm, rbo); + bo_va = amdgpu_vm_bo_find(vm, abo); if (!bo_va) { - bo_va = amdgpu_vm_bo_add(adev, vm, rbo); + bo_va = amdgpu_vm_bo_add(adev, vm, abo); } else { ++bo_va->ref_count; } - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unreserve(abo); return 0; } @@ -528,7 +528,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, goto error_unreserve; if (operation == AMDGPU_VA_OP_MAP) - r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); + r = amdgpu_vm_bo_update(adev, bo_va, false); error_unreserve: ttm_eu_backoff_reservation(&ticket, &list); @@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gobj; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; struct ttm_validate_buffer tv, tv_pd; struct ww_acquire_ctx ticket; @@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) return -ENOENT; - rbo = gem_to_amdgpu_bo(gobj); + abo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&duplicates); - tv.bo = &rbo->tbo; + tv.bo = &abo->tbo; tv.shared = true; list_add(&tv.head, &list); @@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return r; } - bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); + bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo); if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c new file mode 100644 index 000000000000..f86c84427778 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -0,0 +1,239 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#include <drm/drmP.h> +#include "amdgpu.h" + +struct amdgpu_gtt_mgr { + struct drm_mm mm; + spinlock_t lock; + uint64_t available; +}; + +/** + * amdgpu_gtt_mgr_init - init GTT manager and DRM MM + * + * @man: TTM memory type manager + * @p_size: maximum size of GTT + * + * Allocate and initialize the GTT manager. + */ +static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct amdgpu_gtt_mgr *mgr; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return -ENOMEM; + + drm_mm_init(&mgr->mm, 0, p_size); + spin_lock_init(&mgr->lock); + mgr->available = p_size; + man->priv = mgr; + return 0; +} + +/** + * amdgpu_gtt_mgr_fini - free and destroy GTT manager + * + * @man: TTM memory type manager + * + * Destroy and free the GTT manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + + spin_lock(&mgr->lock); + if (!drm_mm_clean(&mgr->mm)) { + spin_unlock(&mgr->lock); + return -EBUSY; + } + + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); + kfree(mgr); + man->priv = NULL; + return 0; +} + +/** + * amdgpu_gtt_mgr_alloc - allocate new ranges + * + * @man: TTM memory type manager + * @tbo: TTM BO we need this range for + * @place: placement flags and restrictions + * @mem: the resulting mem object + * + * Allocate the address space for a node. + */ +int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct drm_mm_node *node = mem->mm_node; + enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; + enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; + unsigned long fpfn, lpfn; + int r; + + if (node->start != AMDGPU_BO_INVALID_OFFSET) + return 0; + + if (place) + fpfn = place->fpfn; + else + fpfn = 0; + + if (place && place->lpfn) + lpfn = place->lpfn; + else + lpfn = man->size; + + if (place && place->flags & TTM_PL_FLAG_TOPDOWN) { + sflags = DRM_MM_SEARCH_BELOW; + aflags = DRM_MM_CREATE_TOP; + } + + spin_lock(&mgr->lock); + r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages, + mem->page_alignment, 0, + fpfn, lpfn, sflags, aflags); + spin_unlock(&mgr->lock); + + if (!r) { + mem->start = node->start; + if (&tbo->mem == mem) + tbo->offset = (tbo->mem.start << PAGE_SHIFT) + + tbo->bdev->man[tbo->mem.mem_type].gpu_offset; + } + + return r; +} + +/** + * amdgpu_gtt_mgr_new - allocate a new node + * + * @man: TTM memory type manager + * @tbo: TTM BO we need this range for + * @place: placement flags and restrictions + * @mem: the resulting mem object + * + * Dummy, allocate the node but no space for it yet. + */ +static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct drm_mm_node *node; + int r; + + spin_lock(&mgr->lock); + if (mgr->available < mem->num_pages) { + spin_unlock(&mgr->lock); + return 0; + } + mgr->available -= mem->num_pages; + spin_unlock(&mgr->lock); + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + node->start = AMDGPU_BO_INVALID_OFFSET; + mem->mm_node = node; + + if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { + r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); + if (unlikely(r)) { + kfree(node); + mem->mm_node = NULL; + } + } else { + mem->start = node->start; + } + + return 0; +} + +/** + * amdgpu_gtt_mgr_del - free ranges + * + * @man: TTM memory type manager + * @tbo: TTM BO we need this range for + * @place: placement flags and restrictions + * @mem: TTM memory object + * + * Free the allocated GTT again. + */ +static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct drm_mm_node *node = mem->mm_node; + + if (!node) + return; + + spin_lock(&mgr->lock); + if (node->start != AMDGPU_BO_INVALID_OFFSET) + drm_mm_remove_node(node); + mgr->available += mem->num_pages; + spin_unlock(&mgr->lock); + + kfree(node); + mem->mm_node = NULL; +} + +/** + * amdgpu_gtt_mgr_debug - dump VRAM table + * + * @man: TTM memory type manager + * @prefix: text prefix + * + * Dump the table content using printk. + */ +static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, + const char *prefix) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + + spin_lock(&mgr->lock); + drm_mm_debug_table(&mgr->mm, prefix); + spin_unlock(&mgr->lock); +} + +const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { + amdgpu_gtt_mgr_init, + amdgpu_gtt_mgr_fini, + amdgpu_gtt_mgr_new, + amdgpu_gtt_mgr_del, + amdgpu_gtt_mgr_debug +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 31a676376d73..91d367399956 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -158,8 +158,8 @@ static const struct i2c_algorithm amdgpu_atombios_i2c_algo = { }; struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, - struct amdgpu_i2c_bus_rec *rec, - const char *name) + const struct amdgpu_i2c_bus_rec *rec, + const char *name) { struct amdgpu_i2c_chan *i2c; int ret; @@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the amdgpu bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), @@ -222,6 +220,7 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) { if (!i2c) return; + WARN_ON(i2c->has_aux); i2c_del_adapter(&i2c->adapter); kfree(i2c); } @@ -251,8 +250,8 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev) /* Add additional buses */ void amdgpu_i2c_add(struct amdgpu_device *adev, - struct amdgpu_i2c_bus_rec *rec, - const char *name) + const struct amdgpu_i2c_bus_rec *rec, + const char *name) { struct drm_device *dev = adev->ddev; int i; @@ -268,7 +267,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev, /* looks up bus based on id */ struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, - struct amdgpu_i2c_bus_rec *i2c_bus) + const struct amdgpu_i2c_bus_rec *i2c_bus) { int i; @@ -338,7 +337,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, /* ddc router switching */ void -amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) +amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector) { u8 val; @@ -367,7 +366,7 @@ amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) /* clock/data router switching */ void -amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector) +amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector) { u8 val; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h index d81e19b53973..63c2ff7499e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h @@ -25,20 +25,20 @@ #define __AMDGPU_I2C_H__ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, - struct amdgpu_i2c_bus_rec *rec, - const char *name); + const struct amdgpu_i2c_bus_rec *rec, + const char *name); void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c); void amdgpu_i2c_init(struct amdgpu_device *adev); void amdgpu_i2c_fini(struct amdgpu_device *adev); void amdgpu_i2c_add(struct amdgpu_device *adev, - struct amdgpu_i2c_bus_rec *rec, - const char *name); + const struct amdgpu_i2c_bus_rec *rec, + const char *name); struct amdgpu_i2c_chan * amdgpu_i2c_lookup(struct amdgpu_device *adev, - struct amdgpu_i2c_bus_rec *i2c_bus); + const struct amdgpu_i2c_bus_rec *i2c_bus); void -amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector); +amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector); void -amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector); +amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index ec1282af2479..6a6c86c9c169 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; - uint64_t ctx; + uint64_t fence_ctx; + uint32_t status = 0, alloc_size; unsigned i; int r = 0; @@ -135,14 +136,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* ring tests don't use a job */ if (job) { vm = job->vm; - ctx = job->ctx; + fence_ctx = job->fence_ctx; } else { vm = NULL; - ctx = 0; + fence_ctx = 0; } if (!ring->ready) { - dev_err(adev->dev, "couldn't schedule ib\n"); + dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } @@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } - r = amdgpu_ring_alloc(ring, 256 * num_ibs); + alloc_size = amdgpu_ring_get_dma_frame_size(ring) + + num_ibs * amdgpu_ring_get_emit_ib_size(ring); + + r = amdgpu_ring_alloc(ring, alloc_size); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; @@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* always set cond_exec_polling to CONTINUE */ *ring->cond_exe_cpu_addr = 1; - skip_preamble = ring->current_ctx == ctx; - need_ctx_switch = ring->current_ctx != ctx; + skip_preamble = ring->current_ctx == fence_ctx; + need_ctx_switch = ring->current_ctx != fence_ctx; + if (job && ring->funcs->emit_cntxcntl) { + if (need_ctx_switch) + status |= AMDGPU_HAVE_CTX_SWITCH; + status |= job->preamble_status; + amdgpu_ring_emit_cntxcntl(ring, status); + } + for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; /* drop preamble IBs if we don't have a context switch */ - if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && + skip_preamble && + !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST)) continue; amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, @@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); - ring->current_ctx = ctx; + ring->current_ctx = fence_ctx; + if (ring->funcs->emit_switch_buffer) + amdgpu_ring_emit_switch_buffer(ring); amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 534fc04e80fd..3ab4c65ecc8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) /* Allocate ring buffer */ if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &adev->irq.ih.ring_obj); + r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->irq.ih.ring_obj, + &adev->irq.ih.gpu_addr, + (void **)&adev->irq.ih.ring); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; } - r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->irq.ih.ring_obj, - AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->irq.ih.ring_obj, - (void **)&adev->irq.ih.ring); - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - if (r) { - DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r); - return r; - } } return 0; } @@ -136,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, */ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) { - int r; - if (adev->irq.ih.use_bus_addr) { if (adev->irq.ih.ring) { /* add 8 bytes for the rptr/wptr shadows and @@ -149,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) adev->irq.ih.ring = NULL; } } else { - if (adev->irq.ih.ring_obj) { - r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->irq.ih.ring_obj); - amdgpu_bo_unpin(adev->irq.ih.ring_obj); - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - } - amdgpu_bo_unref(&adev->irq.ih.ring_obj); - adev->irq.ih.ring = NULL; - adev->irq.ih.ring_obj = NULL; - } + amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj, + &adev->irq.ih.gpu_addr, + (void **)&adev->irq.ih.ring); amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); amdgpu_wb_free(adev, adev->irq.ih.rptr_offs); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 7ef09352e534..f016464035b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -70,6 +70,7 @@ struct amdgpu_irq { /* gen irq stuff */ struct irq_domain *domain; /* GPU irq controller domain */ unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; + uint32_t srbm_soft_reset; }; void amdgpu_irq_preinstall(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 6674d40eb3ab..8c5807994073 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i], f); } -void amdgpu_job_free_cb(struct amd_sched_job *s_job) +static void amdgpu_job_free_cb(struct amd_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); @@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, return r; job->owner = owner; - job->ctx = entity->fence_context; + job->fence_ctx = entity->fence_context; *f = fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d942654a1de0..c2c7fb140338 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -292,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; - for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) + for (i = 0; i < adev->vce.num_rings; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 1; break; default: return -EINVAL; @@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_INFO_NUM_BYTES_MOVED: ui64 = atomic64_read(&adev->num_bytes_moved); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_NUM_EVICTIONS: + ui64 = atomic64_read(&adev->num_evictions); + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: ui64 = atomic64_read(&adev->vram_usage); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; @@ -539,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); - if (unlikely(!fpriv)) - return -ENOMEM; + if (unlikely(!fpriv)) { + r = -ENOMEM; + goto out_suspend; + } r = amdgpu_vm_init(adev, &fpriv->vm); - if (r) - goto error_free; + if (r) { + kfree(fpriv); + goto out_suspend; + } mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles); @@ -553,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = fpriv; +out_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); - return 0; - -error_free: - kfree(fpriv); return r; } @@ -597,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); } /** @@ -611,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { + pm_runtime_get_sync(dev->dev); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 6b1d7d306564..7b0eff7d060b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -39,6 +39,8 @@ #include <drm/drm_plane_helper.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/hrtimer.h> +#include "amdgpu_irq.h" struct amdgpu_bo; struct amdgpu_device; @@ -339,6 +341,8 @@ struct amdgpu_mode_info { int num_dig; /* number of dig blocks */ int disp_priority; const struct amdgpu_display_funcs *funcs; + struct hrtimer vblank_timer; + enum amdgpu_interrupt_state vsync_timer_enabled; }; #define AMDGPU_MAX_BL_LEVEL 0xFF @@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile void amdgpu_print_display_setup(struct drm_device *dev); int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_crtc_set_config(struct drm_mode_set *set); -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags); +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target); extern const struct drm_mode_config_funcs amdgpu_mode_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f0873c75a25..aa074fac0c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -38,20 +38,17 @@ #include "amdgpu_trace.h" -int amdgpu_ttm_init(struct amdgpu_device *adev); -void amdgpu_ttm_fini(struct amdgpu_device *adev); static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, struct ttm_mem_reg *mem) { - u64 ret = 0; - if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { - ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; - } - return ret; + if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) + return 0; + + return ((mem->start << PAGE_SHIFT) + mem->size) > + adev->mc.visible_vram_size ? + adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : + mem->size; } static void amdgpu_update_memory_usage(struct amdgpu_device *adev, @@ -99,6 +96,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); + if (!list_empty(&bo->shadow_list)) { + mutex_lock(&bo->adev->shadow_list_lock); + list_del_init(&bo->shadow_list); + mutex_unlock(&bo->adev->shadow_list_lock); + } kfree(bo->metadata); kfree(bo); } @@ -112,90 +114,99 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_placement *placement, - struct ttm_place *placements, + struct ttm_place *places, u32 domain, u64 flags) { - u32 c = 0, i; - - placement->placement = placements; - placement->busy_placement = placements; + u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && - adev->mc.visible_vram_size < adev->mc.real_vram_size) { - placements[c].fpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; + !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && + adev->mc.visible_vram_size < adev->mc.real_vram_size) { + places[c].fpfn = visible_pfn; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_TOPDOWN; + c++; } - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; - if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) - placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + places[c].lpfn = visible_pfn; + else + places[c].flags |= TTM_PL_FLAG_TOPDOWN; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GTT) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_TT; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_SYSTEM; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GDS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_GWS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GWS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_OA) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_OA; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + c++; } if (!c) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + c++; } + placement->num_placement = c; - placement->num_busy_placement = c; + placement->placement = places; - for (i = 0; i < c; i++) { - if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - (placements[i].flags & TTM_PL_FLAG_VRAM) && - !placements[i].fpfn) - placements[i].lpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - else - placements[i].lpfn = 0; - } + placement->num_busy_placement = c; + placement->busy_placement = places; } -void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) +void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) { - amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, - rbo->placements, domain, rbo->flags); + amdgpu_ttm_placement_init(abo->adev, &abo->placement, + abo->placements, domain, abo->flags); } static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, @@ -211,6 +222,98 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, bo->placement.busy_placement = bo->placements; } +/** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); + return r; + } + + r = amdgpu_bo_reserve(*bo_ptr, false); + if (r) { + dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); + goto error_free; + } + + r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); + goto error_unreserve; + } + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo map failed\n", r); + goto error_unreserve; + } + } + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; + +error_unreserve: + amdgpu_bo_unreserve(*bo_ptr); + +error_free: + amdgpu_bo_unref(bo_ptr); + + return r; +} + +/** + * amdgpu_bo_free_kernel - free BO for kernel use + * + * @bo: amdgpu BO to free + * + * unmaps and unpin a BO for kernel internal use. + */ +void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, + void **cpu_addr) +{ + if (*bo == NULL) + return; + + if (likely(amdgpu_bo_reserve(*bo, false) == 0)) { + if (cpu_addr) + amdgpu_bo_kunmap(*bo); + + amdgpu_bo_unpin(*bo); + amdgpu_bo_unreserve(*bo); + } + amdgpu_bo_unref(bo); + + if (gpu_addr) + *gpu_addr = 0; + + if (cpu_addr) + *cpu_addr = NULL; +} + int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, @@ -249,7 +352,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, return r; } bo->adev = adev; - INIT_LIST_HEAD(&bo->list); + INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | @@ -277,11 +380,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (unlikely(r != 0)) { return r; } + + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && + bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + struct fence *fence; + + if (adev->mman.buffer_funcs_ring == NULL || + !adev->mman.buffer_funcs_ring->ready) { + r = -EBUSY; + goto fail_free; + } + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto fail_free; + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (unlikely(r != 0)) + goto fail_unreserve; + + amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + amdgpu_bo_fence(bo, fence, false); + amdgpu_bo_unreserve(bo); + fence_put(bo->tbo.moving); + bo->tbo.moving = fence_get(fence); + fence_put(fence); + } *bo_ptr = bo; trace_amdgpu_bo_create(bo); return 0; + +fail_unreserve: + amdgpu_bo_unreserve(bo); +fail_free: + amdgpu_bo_unref(&bo); + return r; +} + +static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, + unsigned long size, int byte_align, + struct amdgpu_bo *bo) +{ + struct ttm_placement placement = {0}; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; + + if (bo->shadow) + return 0; + + bo->flags |= AMDGPU_GEM_CREATE_SHADOW; + memset(&placements, 0, + (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + + amdgpu_ttm_placement_init(adev, &placement, + placements, AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC); + + r = amdgpu_bo_create_restricted(adev, size, byte_align, true, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC, + NULL, &placement, + bo->tbo.resv, + &bo->shadow); + if (!r) { + bo->shadow->parent = amdgpu_bo_ref(bo); + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&bo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); + } + + return r; } int amdgpu_bo_create(struct amdgpu_device *adev, @@ -293,6 +464,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; memset(&placements, 0, (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); @@ -300,9 +472,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); + if (r) + return r; + + if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { + r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + if (r) + amdgpu_bo_unref(bo_ptr); + } + + return r; +} + +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; +} + +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -380,16 +626,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; if (bo->pin_count) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (domain != amdgpu_mem_type_to_domain(mem_type)) + return -EINVAL; + bo->pin_count++; if (gpu_addr) *gpu_addr = amdgpu_bo_gpu_offset(bo); if (max_offset != 0) { - u64 domain_start; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - domain_start = bo->adev->mc.vram_start; - else - domain_start = bo->adev->mc.gtt_start; + u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; WARN_ON_ONCE(max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)); } @@ -401,7 +648,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && - (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { + (!max_offset || max_offset > + bo->adev->mc.visible_vram_size)) { if (WARN_ON_ONCE(min_offset > bo->adev->mc.visible_vram_size)) return -EINVAL; @@ -420,19 +668,28 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - bo->pin_count = 1; - if (gpu_addr != NULL) - *gpu_addr = amdgpu_bo_gpu_offset(bo); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - bo->adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size += amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size += amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p pin failed\n", bo); + goto error; + } + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (unlikely(r)) { + dev_err(bo->adev->dev, "%p bind failed\n", bo); + goto error; } + + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = amdgpu_bo_gpu_offset(bo); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + bo->adev->vram_pin_size += amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size += amdgpu_bo_size(bo); + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { + bo->adev->gart_pin_size += amdgpu_bo_size(bo); + } + +error: return r; } @@ -457,16 +714,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - bo->adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size -= amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); + goto error; } + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->adev->vram_pin_size -= amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { + bo->adev->gart_pin_size -= amdgpu_bo_size(bo); + } + +error: return r; } @@ -588,23 +849,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; struct ttm_mem_reg *old_mem = &bo->mem; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; - rbo = container_of(bo, struct amdgpu_bo, tbo); - amdgpu_vm_bo_invalidate(rbo->adev, rbo); + abo = container_of(bo, struct amdgpu_bo, tbo); + amdgpu_vm_bo_invalidate(abo->adev, abo); /* update statistics */ if (!new_mem) return; /* move_notify is called before move happens */ - amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); + amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); - trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); + trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) @@ -637,7 +898,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) for (i = 0; i < abo->placement.num_placement; i++) { /* Force into visible VRAM */ if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) + (!abo->placements[i].lpfn || + abo->placements[i].lpfn > lpfn)) abo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &abo->placement, false, false); @@ -674,3 +936,24 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, else reservation_object_add_excl_fence(resv, fence); } + +/** + * amdgpu_bo_gpu_offset - return GPU offset of bo + * @bo: amdgpu object for which we query the offset + * + * Returns current GPU offset of the object. + * + * Note: object should either be pinned or reserved when calling this + * function, it might be useful to add check for this for debugging. + */ +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) +{ + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && + !amdgpu_ttm_is_bound(bo->tbo.ttm)); + WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && + !bo->pin_count); + WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); + + return bo->tbo.offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index bdb01d932548..8255034d73eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -31,6 +31,8 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" +#define AMDGPU_BO_INVALID_OFFSET LONG_MAX + /** * amdgpu_mem_type_to_domain - return domain corresponding to mem_type * @mem_type: ttm memory type @@ -85,21 +87,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) ttm_bo_unreserve(&bo->tbo); } -/** - * amdgpu_bo_gpu_offset - return GPU offset of bo - * @bo: amdgpu object for which we query the offset - * - * Returns current GPU offset of the object. - * - * Note: object should either be pinned or reserved when calling this - * function, it might be useful to add check for this for debugging. - */ -static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) -{ - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - return bo->tbo.offset; -} - static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { return bo->tbo.num_pages << PAGE_SHIFT; @@ -139,6 +126,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct ttm_placement *placement, struct reservation_object *resv, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); +void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, + void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); @@ -165,6 +158,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, bool direct); +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct); + /* * sub allocation diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c index d15314957732..8e67c1210d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "atom.h" #include "atombios_encoders.h" +#include "amdgpu_pll.h" #include <asm/div64.h> #include <linux/gcd.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 5cc7052e391d..accc908bdc88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1103,54 +1103,46 @@ force: void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { + /* enable/disable UVD */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_uvd(adev, !enable); - else { - if (adev->pm.funcs->powergate_uvd) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - /* enable/disable UVD */ - amdgpu_dpm_powergate_uvd(adev, !enable); + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = false; + mutex_unlock(&adev->pm.mutex); } - + amdgpu_pm_compute_clocks(adev); } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { + /* enable/disable VCE */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_vce(adev, !enable); - else { - if (adev->pm.funcs->powergate_vce) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - amdgpu_dpm_powergate_vce(adev, !enable); + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = false; + mutex_unlock(&adev->pm.mutex); } + amdgpu_pm_compute_clocks(adev); } } @@ -1330,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) */ #if defined(CONFIG_DEBUG_FS) +static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) +{ + int32_t value; + + /* sanity check PP is enabled */ + if (!(adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->read_sensor)) + return -EINVAL; + + /* GPU Clocks */ + seq_printf(m, "GFX Clocks and Power:\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value)) + seq_printf(m, "\t%u MHz (MCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value)) + seq_printf(m, "\t%u MHz (SCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value)) + seq_printf(m, "\t%u mV (VDDGFX)\n", value); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value)) + seq_printf(m, "\t%u mV (VDDNB)\n", value); + seq_printf(m, "\n"); + + /* GPU Temp */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value)) + seq_printf(m, "GPU Temperature: %u C\n", value/1000); + + /* GPU Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value)) + seq_printf(m, "GPU Load: %u %%\n", value); + seq_printf(m, "\n"); + + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); + + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } + } + + return 0; +} + static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1345,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { seq_printf(m, "PX asic powered off\n"); } else if (adev->pp_enabled) { - amdgpu_dpm_debugfs_print_current_performance_level(adev, m); + return amdgpu_debugfs_pm_info_pp(m, adev); } else { mutex_lock(&adev->pm.mutex); if (adev->pm.funcs->debugfs_print_current_performance_level) - amdgpu_dpm_debugfs_print_current_performance_level(adev, m); + adev->pm.funcs->debugfs_print_current_performance_level(adev, m); else seq_printf(m, "Debugfs support not implemented for this asic\n"); mutex_unlock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index c5738a22b690..7532ff822aa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -30,6 +30,7 @@ #include "amdgpu_pm.h" #include <drm/amdgpu_drm.h> #include "amdgpu_powerplay.h" +#include "si_dpm.h" #include "cik_dpm.h" #include "vi_dpm.h" @@ -41,7 +42,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) amd_pp = &(adev->powerplay); if (adev->pp_enabled) { -#ifdef CONFIG_DRM_AMD_POWERPLAY struct amd_pp_init *pp_init; pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL); @@ -52,15 +52,21 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); - pp_init->powercontainment_enabled = amdgpu_powercontainment; - ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); -#endif } else { amd_pp->pp_handle = (void *)adev; switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + amd_pp->ip_funcs = &si_dpm_ip_funcs; + break; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: @@ -72,15 +78,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) amd_pp->ip_funcs = &kv_dpm_ip_funcs; break; #endif - case CHIP_TOPAZ: - amd_pp->ip_funcs = &iceland_dpm_ip_funcs; - break; - case CHIP_TONGA: - amd_pp->ip_funcs = &tonga_dpm_ip_funcs; - break; - case CHIP_FIJI: - amd_pp->ip_funcs = &fiji_dpm_ip_funcs; - break; case CHIP_CARRIZO: case CHIP_STONEY: amd_pp->ip_funcs = &cz_dpm_ip_funcs; @@ -98,19 +95,17 @@ static int amdgpu_pp_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; -#ifdef CONFIG_DRM_AMD_POWERPLAY switch (adev->asic_type) { case CHIP_POLARIS11: case CHIP_POLARIS10: - adev->pp_enabled = true; - break; case CHIP_TONGA: case CHIP_FIJI: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; + case CHIP_TOPAZ: + adev->pp_enabled = true; break; case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: @@ -118,14 +113,10 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: - case CHIP_TOPAZ: default: adev->pp_enabled = false; break; } -#else - adev->pp_enabled = false; -#endif ret = amdgpu_powerplay_init(adev); if (ret) @@ -147,12 +138,11 @@ static int amdgpu_pp_late_init(void *handle) ret = adev->powerplay.ip_funcs->late_init( adev->powerplay.pp_handle); -#ifdef CONFIG_DRM_AMD_POWERPLAY if (adev->pp_enabled && adev->pm.dpm_enabled) { amdgpu_pm_sysfs_init(adev); amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); } -#endif + return ret; } @@ -165,10 +155,8 @@ static int amdgpu_pp_sw_init(void *handle) ret = adev->powerplay.ip_funcs->sw_init( adev->powerplay.pp_handle); -#ifdef CONFIG_DRM_AMD_POWERPLAY if (adev->pp_enabled) adev->pm.dpm_enabled = true; -#endif return ret; } @@ -219,7 +207,6 @@ static int amdgpu_pp_hw_fini(void *handle) static void amdgpu_pp_late_fini(void *handle) { -#ifdef CONFIG_DRM_AMD_POWERPLAY struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pp_enabled) { @@ -230,7 +217,6 @@ static void amdgpu_pp_late_fini(void *handle) if (adev->powerplay.ip_funcs->late_fini) adev->powerplay.ip_funcs->late_fini( adev->powerplay.pp_handle); -#endif } static int amdgpu_pp_suspend(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 85aeb0a804bb..e1fa8731d1e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &ring->ring_obj); + r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; } - r = amdgpu_bo_reserve(ring->ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, - &ring->gpu_addr); - if (r) { - amdgpu_bo_unreserve(ring->ring_obj); - dev_err(adev->dev, "(%d) ring pin failed\n", r); - return r; - } - r = amdgpu_bo_kmap(ring->ring_obj, - (void **)&ring->ring); - memset((void *)ring->ring, 0, ring->ring_size); - - amdgpu_bo_unreserve(ring->ring_obj); - if (r) { - dev_err(adev->dev, "(%d) ring map failed\n", r); - return r; - } } ring->ptr_mask = (ring->ring_size / 4) - 1; ring->max_dw = max_dw; @@ -269,29 +252,20 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, */ void amdgpu_ring_fini(struct amdgpu_ring *ring) { - int r; - struct amdgpu_bo *ring_obj; - - ring_obj = ring->ring_obj; ring->ready = false; - ring->ring = NULL; - ring->ring_obj = NULL; amdgpu_wb_free(ring->adev, ring->cond_exe_offs); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); - if (ring_obj) { - r = amdgpu_bo_reserve(ring_obj, false); - if (likely(r == 0)) { - amdgpu_bo_kunmap(ring_obj); - amdgpu_bo_unpin(ring_obj); - amdgpu_bo_unreserve(ring_obj); - } - amdgpu_bo_unref(&ring_obj); - } + amdgpu_bo_free_kernel(&ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); + amdgpu_debugfs_ring_fini(ring); + + ring->adev->rings[ring->idx] = NULL; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 05a53f4fc334..b827c75e95de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 0d8d65eb46cd..067e5e683bb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -247,7 +247,7 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping, TP_ARGS(mapping) ); -TRACE_EVENT(amdgpu_vm_set_page, +TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags), TP_ARGS(pe, addr, count, incr, flags), @@ -271,6 +271,24 @@ TRACE_EVENT(amdgpu_vm_set_page, __entry->flags, __entry->count) ); +TRACE_EVENT(amdgpu_vm_copy_ptes, + TP_PROTO(uint64_t pe, uint64_t src, unsigned count), + TP_ARGS(pe, src, count), + TP_STRUCT__entry( + __field(u64, pe) + __field(u64, src) + __field(u32, count) + ), + + TP_fast_assign( + __entry->pe = pe; + __entry->src = src; + __entry->count = count; + ), + TP_printk("pe=%010Lx, src=%010Lx, count=%u", + __entry->pe, __entry->src, __entry->count) +); + TRACE_EVENT(amdgpu_vm_flush, TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), TP_ARGS(pd_addr, ring, id), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 716f2afeb6a9..887483b8b818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,6 +34,7 @@ #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> +#include <ttm/ttm_memory.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include <linux/seq_file.h> @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) ttm_mem_global_release(ref->object); } -static int amdgpu_ttm_global_init(struct amdgpu_device *adev) +int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; @@ -88,10 +89,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) global_ref->init = &amdgpu_ttm_mem_global_init; global_ref->release = &amdgpu_ttm_mem_global_release; r = drm_global_item_ref(global_ref); - if (r != 0) { + if (r) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); - return r; + goto error_mem; } adev->mman.bo_global_ref.mem_glob = @@ -102,26 +103,30 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; r = drm_global_item_ref(global_ref); - if (r != 0) { + if (r) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); - drm_global_item_unref(&adev->mman.mem_global_ref); - return r; + goto error_bo; } ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, rq, amdgpu_sched_jobs); - if (r != 0) { + if (r) { DRM_ERROR("Failed setting up TTM BO move run queue.\n"); - drm_global_item_unref(&adev->mman.mem_global_ref); - drm_global_item_unref(&adev->mman.bo_global_ref.ref); - return r; + goto error_entity; } adev->mman.mem_global_referenced = true; return 0; + +error_entity: + drm_global_item_unref(&adev->mman.bo_global_ref.ref); +error_bo: + drm_global_item_unref(&adev->mman.mem_global_ref); +error_mem: + return r; } static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) @@ -155,7 +160,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: - man->func = &ttm_bo_manager_func; + man->func = &amdgpu_gtt_mgr_func; man->gpu_offset = adev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; @@ -190,12 +195,13 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void amdgpu_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; static struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; + unsigned i; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; @@ -204,28 +210,44 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, placement->num_busy_placement = 1; return; } - rbo = container_of(bo, struct amdgpu_bo, tbo); + abo = container_of(bo, struct amdgpu_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: - if (rbo->adev->mman.buffer_funcs_ring->ready == false) - amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); - else - amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); + if (abo->adev->mman.buffer_funcs_ring->ready == false) { + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); + } else { + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + for (i = 0; i < abo->placement.num_placement; ++i) { + if (!(abo->placements[i].flags & + TTM_PL_FLAG_TT)) + continue; + + if (abo->placements[i].lpfn) + continue; + + /* set an upper limit to force directly + * allocating address space for the BO. + */ + abo->placements[i].lpfn = + abo->adev->mc.gtt_size >> PAGE_SHIFT; + } + } break; case TTM_PL_TT: default: - amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); + amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } - *placement = rbo->placement; + *placement = abo->placement; } static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); + struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); if (amdgpu_ttm_tt_get_usermm(bo->ttm)) return -EPERM; - return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); + return drm_vma_node_verify_access(&abo->gem_base.vma_node, + filp->private_data); } static void amdgpu_move_null(struct ttm_buffer_object *bo, @@ -251,26 +273,30 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, adev = amdgpu_get_adev(bo->bdev); ring = adev->mman.buffer_funcs_ring; - old_start = (u64)old_mem->start << PAGE_SHIFT; - new_start = (u64)new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { - case TTM_PL_VRAM: - old_start += adev->mc.vram_start; - break; case TTM_PL_TT: - old_start += adev->mc.gtt_start; + r = amdgpu_ttm_bind(bo, old_mem); + if (r) + return r; + + case TTM_PL_VRAM: + old_start = (u64)old_mem->start << PAGE_SHIFT; + old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } switch (new_mem->mem_type) { - case TTM_PL_VRAM: - new_start += adev->mc.vram_start; - break; case TTM_PL_TT: - new_start += adev->mc.gtt_start; + r = amdgpu_ttm_bind(bo, new_mem); + if (r) + return r; + + case TTM_PL_VRAM: + new_start = (u64)new_mem->start << PAGE_SHIFT; + new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -285,7 +311,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ - bo->resv, &fence); + bo->resv, &fence, false); if (r) return r; @@ -314,7 +340,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = 0; + placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); @@ -335,7 +361,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -361,14 +387,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, placement.num_busy_placement = 1; placement.busy_placement = &placements; placements.fpfn = 0; - placements.lpfn = 0; + placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -435,8 +461,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } @@ -524,6 +549,7 @@ struct amdgpu_ttm_tt { spinlock_t guptasklock; struct list_head guptasks; atomic_t mmu_invalidations; + struct list_head list; }; int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) @@ -641,7 +667,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct amdgpu_ttm_tt *gtt = (void*)ttm; - uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); int r; if (gtt->userptr) { @@ -651,7 +676,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return r; } } - gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); @@ -662,14 +686,71 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, bo_mem->mem_type == AMDGPU_PL_OA) return -EINVAL; + return 0; +} + +bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + + return gtt && !list_empty(>t->list); +} + +int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) +{ + struct ttm_tt *ttm = bo->ttm; + struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; + uint32_t flags; + int r; + + if (!ttm || amdgpu_ttm_is_bound(ttm)) + return 0; + + r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo, + NULL, bo_mem); + if (r) { + DRM_ERROR("Failed to allocate GTT address space (%d)\n", r); + return r; + } + + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); + gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08X\n", - ttm->num_pages, (unsigned)gtt->offset); + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); return r; } + spin_lock(>t->adev->gtt_list_lock); + list_add_tail(>t->list, >t->adev->gtt_list); + spin_unlock(>t->adev->gtt_list_lock); + return 0; +} + +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) +{ + struct amdgpu_ttm_tt *gtt, *tmp; + struct ttm_mem_reg bo_mem; + uint32_t flags; + int r; + + bo_mem.mem_type = TTM_PL_TT; + spin_lock(&adev->gtt_list_lock); + list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { + flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); + r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, + gtt->ttm.ttm.pages, gtt->ttm.dma_address, + flags); + if (r) { + spin_unlock(&adev->gtt_list_lock); + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + gtt->ttm.ttm.num_pages, gtt->offset); + return r; + } + } + spin_unlock(&adev->gtt_list_lock); return 0; } @@ -677,12 +758,19 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + if (gtt->userptr) + amdgpu_ttm_tt_unpin_userptr(ttm); + + if (!amdgpu_ttm_is_bound(ttm)) + return 0; + /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ if (gtt->adev->gart.ready) amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); - if (gtt->userptr) - amdgpu_ttm_tt_unpin_userptr(ttm); + spin_lock(>t->adev->gtt_list_lock); + list_del_init(>t->list); + spin_unlock(>t->adev->gtt_list_lock); return 0; } @@ -720,6 +808,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, kfree(gtt); return NULL; } + INIT_LIST_HEAD(>t->list); return >t->ttm.ttm; } @@ -991,10 +1080,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) unsigned i, j; int r; - r = amdgpu_ttm_global_init(adev); - if (r) { - return r; - } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, adev->mman.bo_global_ref.ref.object, @@ -1159,7 +1244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence) + struct fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1203,8 +1288,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); + if (direct_submit) { + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, + NULL, NULL, fence); + job->fence = fence_get(*fence); + if (r) + DRM_ERROR("Error scheduling IBs (%d)\n", r); + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); + if (r) + goto error_free; + } + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence) +{ + struct amdgpu_device *adev = bo->adev; + struct amdgpu_job *job; + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + + uint32_t max_bytes, byte_count; + uint64_t dst_offset; + unsigned int num_loops, num_dw; + unsigned int i; + int r; + + byte_count = bo->tbo.num_pages << PAGE_SHIFT; + max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + num_loops = DIV_ROUND_UP(byte_count, max_bytes); + num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) + return r; + + if (resv) { + r = amdgpu_sync_resv(adev, &job->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } + } + + dst_offset = bo->tbo.mem.start << PAGE_SHIFT; + for (i = 0; i < num_loops; i++) { + uint32_t cur_size_in_bytes = min(byte_count, max_bytes); + + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, + dst_offset, cur_size_in_bytes); + + dst_offset += cur_size_in_bytes; + byte_count -= cur_size_in_bytes; + } + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); r = amdgpu_job_submit(job, ring, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; @@ -1395,3 +1551,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) #endif } + +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev) +{ + return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h new file mode 100644 index 000000000000..9812c805326c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -0,0 +1,90 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_TTM_H__ +#define __AMDGPU_TTM_H__ + +#include "gpu_scheduler.h" + +#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) +#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) +#define AMDGPU_PL_OA (TTM_PL_PRIV + 2) + +#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0) +#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) +#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) + +#define AMDGPU_TTM_LRU_SIZE 20 + +struct amdgpu_mman_lru { + struct list_head *lru[TTM_NUM_MEM_TYPES]; + struct list_head *swap_lru; +}; + +struct amdgpu_mman { + struct ttm_bo_global_ref bo_global_ref; + struct drm_global_reference mem_global_ref; + struct ttm_bo_device bdev; + bool mem_global_referenced; + bool initialized; + +#if defined(CONFIG_DEBUG_FS) + struct dentry *vram; + struct dentry *gtt; +#endif + + /* buffer handling */ + const struct amdgpu_buffer_funcs *buffer_funcs; + struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; + + /* custom LRU management */ + struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; + /* guard for log2_size array, don't add anything in between */ + struct amdgpu_mman_lru guard; +}; + +extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; + +int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_mem_reg *mem); + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + struct reservation_object *resv, + struct fence **fence, bool direct_submit); +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence); + +int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); +bool amdgpu_ttm_is_bound(struct ttm_tt *ttm); +int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 5cc95f1a7dab..cb3d252f3c78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -247,40 +247,32 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) const struct common_firmware_header *header = NULL; err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); + AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); if (err) { dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); - err = -ENOMEM; goto failed; } err = amdgpu_bo_reserve(*bo, false); if (err) { - amdgpu_bo_unref(bo); dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err); - goto failed; + goto failed_reserve; } err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr); if (err) { - amdgpu_bo_unreserve(*bo); - amdgpu_bo_unref(bo); dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); - goto failed; + goto failed_pin; } err = amdgpu_bo_kmap(*bo, &fw_buf_ptr); if (err) { dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err); - amdgpu_bo_unpin(*bo); - amdgpu_bo_unreserve(*bo); - amdgpu_bo_unref(bo); - goto failed; + goto failed_kmap; } amdgpu_bo_unreserve(*bo); - fw_offset = 0; for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { @@ -290,10 +282,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); } } + return 0; +failed_kmap: + amdgpu_bo_unpin(*bo); +failed_pin: + amdgpu_bo_unreserve(*bo); +failed_reserve: + amdgpu_bo_unref(bo); failed: - if (err) - adev->firmware.smu_load = false; + adev->firmware.smu_load = false; return err; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 4aa993d19018..e3281cacc586 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; - r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, &adev->uvd.vcpu_bo); + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, + &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (r) { - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } - - r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, - &adev->uvd.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) UVD map failed\n", r); - return r; - } - - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, @@ -274,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { - int r; - kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); - if (adev->uvd.vcpu_bo) { - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (!r) { - amdgpu_bo_kunmap(adev->uvd.vcpu_bo); - amdgpu_bo_unpin(adev->uvd.vcpu_bo); - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - } - - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - } + amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, + &adev->uvd.gpu_addr, + (void **)&adev->uvd.cpu_addr); amdgpu_ring_fini(&adev->uvd.ring); @@ -323,7 +289,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.saved_bo) return -ENOMEM; - memcpy(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.saved_bo, ptr, size); return 0; } @@ -340,7 +306,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); + memcpy_toio(ptr, adev->uvd.saved_bo, size); kfree(adev->uvd.saved_bo); adev->uvd.saved_bo = NULL; } else { @@ -349,11 +315,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); + memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + memset_io(ptr, 0, size); } return 0; @@ -385,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) } } -static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) { int i; - for (i = 0; i < rbo->placement.num_placement; ++i) { - rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; - rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + for (i = 0; i < abo->placement.num_placement; ++i) { + abo->placements[i].fpfn = 0 >> PAGE_SHIFT; + abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; } } @@ -843,6 +809,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, return r; break; case mmUVD_ENGINE_CNTL: + case mmUVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", reg); @@ -915,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } + r = amdgpu_cs_sysvm_access_required(parser); + if (r) + return r; + ctx.parser = parser; ctx.buf_sizes = buf_sizes; ctx.ib_idx = ib_idx; @@ -981,8 +952,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->ptr[3] = addr >> 32; ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); ib->ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib->ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0); + ib->ptr[i+1] = 0; + } ib->length_dw = 16; if (direct) { @@ -1114,15 +1087,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned i, fences, handles = 0; - - fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - ++handles; + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - if (fences == 0 && handles == 0) { + if (fences == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 05865ce35351..7fe8fd884f06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -210,6 +210,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) */ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) { + unsigned i; + if (adev->vce.vcpu_bo == NULL) return 0; @@ -217,8 +219,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) amdgpu_bo_unref(&adev->vce.vcpu_bo); - amdgpu_ring_fini(&adev->vce.ring[0]); - amdgpu_ring_fini(&adev->vce.ring[1]); + for (i = 0; i < adev->vce.num_rings; i++) + amdgpu_ring_fini(&adev->vce.ring[i]); release_firmware(adev->vce.fw); mutex_destroy(&adev->vce.idle_mutex); @@ -282,8 +284,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(cpu_addr, (adev->vce.fw->data) + offset, - (adev->vce.fw->size) - offset); + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); amdgpu_bo_kunmap(adev->vce.vcpu_bo); @@ -303,9 +305,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vce.idle_work.work); + unsigned i, count = 0; + + for (i = 0; i < adev->vce.num_rings; i++) + count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); - if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && - (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { + if (count == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, false); } else { @@ -634,7 +639,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t allocated = 0; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r = 0, idx = 0; + int i, r, idx = 0; + + r = amdgpu_cs_sysvm_access_required(p); + if (r) + return r; while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); @@ -687,6 +696,21 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) case 0x04000008: /* rdo */ case 0x04000009: /* vui */ case 0x05000002: /* auxiliary buffer */ + case 0x05000009: /* clock table */ + break; + + case 0x0500000c: /* hw config */ + switch (p->adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + case CHIP_MULLINS: +#endif + case CHIP_CARRIZO: + break; + default: + r = -EINVAL; + goto out; + } break; case 0x03000001: /* encode */ @@ -799,6 +823,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, amdgpu_ring_write(ring, VCE_CMD_END); } +unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 4; /* amdgpu_vce_ring_emit_ib */ +} + +unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ +} + /** * amdgpu_vce_ring_test_ring - test if VCE ring is working * @@ -850,8 +886,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct fence *fence = NULL; long r; - /* skip vce ring1 ib test for now, since it's not reliable */ - if (ring == &ring->adev->vce.ring[1]) + /* skip vce ring1/2 ib test for now, since it's not reliable */ + if (ring != &ring->adev->vce.ring[0]) return 0; r = amdgpu_vce_get_create_msg(ring, 1, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 63f83d0d985c..12729d2852df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout); void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); +unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring); +unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h new file mode 100644 index 000000000000..2c37a374917f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -0,0 +1,57 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Monk.liu@amd.com + */ +#ifndef AMDGPU_VIRT_H +#define AMDGPU_VIRT_H + +#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ +#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ +#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ +#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ +/* GPU virtualization */ +struct amdgpu_virtualization { + uint32_t virtual_caps; +}; + +#define amdgpu_sriov_enabled(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) + +#define amdgpu_sriov_vf(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF) + +#define amdgpu_sriov_bios(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) + +#define amdgpu_passthrough(adev) \ +((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE) + +static inline bool is_virtual_machine(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 80120fa4092c..06f24322e7c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -51,19 +51,22 @@ * SI supports 16. */ -/* Special value that no flush is necessary */ -#define AMDGPU_VM_NO_FLUSH (~0ll) - /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ -struct amdgpu_vm_update_params { +struct amdgpu_pte_update_params { + /* amdgpu device we do this update for */ + struct amdgpu_device *adev; /* address where to copy page table entries from */ uint64_t src; - /* DMA addresses to use for mapping */ - dma_addr_t *pages_addr; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; + /* Function which actually does the update */ + void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, + uint64_t addr, unsigned count, uint32_t incr, + uint32_t flags); + /* indicate update pt or its shadow */ + bool shadow; }; /** @@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, } /** - * amdgpu_vm_update_pages - helper to call the right asic function + * amdgpu_vm_do_set_ptes - helper to call the right asic function * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -480,32 +482,46 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ -static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, +static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) +{ + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + + if (count < 3) { + amdgpu_vm_write_pte(params->adev, params->ib, pe, + addr | flags, count, incr); + + } else { + amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, + count, incr, flags); + } +} + +/** + * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART + * + * @params: see amdgpu_pte_update_params definition + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Traces the parameters and calls the DMA function to copy the PTEs. + */ +static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - - if (vm_update_params->src) { - amdgpu_vm_copy_pte(adev, vm_update_params->ib, - pe, (vm_update_params->src + (addr >> 12) * 8), count); + uint64_t src = (params->src + (addr >> 12) * 8); - } else if (vm_update_params->pages_addr) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, - vm_update_params->pages_addr, - pe, addr, count, incr, flags); - } else if (count < 3) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, - count, incr, flags); + trace_amdgpu_vm_copy_ptes(pe, src, count); - } else { - amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, - count, incr, flags); - } + amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); } /** @@ -523,12 +539,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; unsigned entries; uint64_t addr; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -539,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); + if (r) + goto error; + addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; @@ -546,9 +565,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - vm_update_params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, - 0, 0); + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; + amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); @@ -577,55 +597,46 @@ error: * Look up the physical address of the page that the pte resolves * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) +static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - if (pages_addr) { - /* page table offset */ - result = pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - } else { - /* No mapping required */ - result = addr; - } + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); result &= 0xFFFFFFFFFFFFF000ULL; return result; } -/** - * amdgpu_vm_update_pdes - make sure that page directory is valid - * - * @adev: amdgpu_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * - * Allocates new page tables if necessary - * and updates the page directory. - * Returns 0 for success, error for failure. - */ -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + bool shadow) { struct amdgpu_ring *ring; - struct amdgpu_bo *pd = vm->page_directory; - uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); + struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : + vm->page_directory; + uint64_t pd_addr; uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *fence = NULL; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); + if (!pd) + return 0; + + r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem); + if (r) + return r; + + pd_addr = amdgpu_bo_gpu_offset(pd); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -638,7 +649,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - vm_update_params.ib = &job->ibs[0]; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -648,20 +661,34 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (bo == NULL) continue; + if (bo->shadow) { + struct amdgpu_bo *shadow = bo->shadow; + + r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); + if (r) + return r; + } + pt = amdgpu_bo_gpu_offset(bo); - if (vm->page_tables[pt_idx].addr == pt) - continue; - vm->page_tables[pt_idx].addr = pt; + if (!shadow) { + if (vm->page_tables[pt_idx].addr == pt) + continue; + vm->page_tables[pt_idx].addr = pt; + } else { + if (vm->page_tables[pt_idx].shadow_addr == pt) + continue; + vm->page_tables[pt_idx].shadow_addr = pt; + } pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt)) { + ((last_pt + incr * count) != pt) || + (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, - AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, + last_pt, count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -673,15 +700,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (vm_update_params.ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, vm_update_params.ib); + if (params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(vm_update_params.ib->length_dw > ndw); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -703,92 +729,33 @@ error_free: return r; } -/** - * amdgpu_vm_frag_ptes - add fragment information to PTEs +/* + * amdgpu_vm_update_pdes - make sure that page directory is valid * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition - * @pe_start: first PTE to handle - * @pe_end: last PTE to handle - * @addr: addr those PTEs should point to - * @flags: hw mapping flags + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory. + * Returns 0 for success, error for failure. */ -static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, - uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags) +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm) { - /** - * The MC L1 TLB supports variable sized pages, based on a fragment - * field in the PTE. When this field is set to a non-zero value, page - * granularity is increased from 4KB to (1 << (12 + frag)). The PTE - * flags are considered valid for all PTEs within the fragment range - * and corresponding mappings are assumed to be physically contiguous. - * - * The L1 TLB can store a single PTE for the whole fragment, - * significantly increasing the space available for translation - * caching. This leads to large improvements in throughput when the - * TLB is under pressure. - * - * The L2 TLB distributes small and large fragments into two - * asymmetric partitions. The large fragment cache is significantly - * larger. Thus, we try to use large fragments wherever possible. - * Userspace can support this by aligning virtual base address and - * allocation size to the fragment size. - */ - - /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; - uint64_t frag_align = 0x80; - - uint64_t frag_start = ALIGN(pe_start, frag_align); - uint64_t frag_end = pe_end & ~(frag_align - 1); - - unsigned count; - - /* Abort early if there isn't anything to do */ - if (pe_start == pe_end) - return; - - /* system pages are non continuously */ - if (vm_update_params->src || vm_update_params->pages_addr || - !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { - - count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, - addr, count, AMDGPU_GPU_PAGE_SIZE, - flags); - return; - } - - /* handle the 4K area at the beginning */ - if (pe_start != frag_start) { - count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - addr += AMDGPU_GPU_PAGE_SIZE * count; - } - - /* handle the area in the middle */ - count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); + int r; - /* handle the 4K area at the end */ - if (frag_end != pe_end) { - addr += AMDGPU_GPU_PAGE_SIZE * count; - count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - } + r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); + if (r) + return r; + return amdgpu_vm_update_pd_or_shadow(adev, vm, false); } /** * amdgpu_vm_update_ptes - make sure that page tables are valid * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -797,16 +764,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * * Update the page tables in the range @start - @end. */ -static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, +static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t cur_pe_start, cur_nptes, cur_dst; uint64_t addr; /* next GPU address to be updated */ uint64_t pt_idx; struct amdgpu_bo *pt; @@ -817,7 +782,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, addr = start; pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; - + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else @@ -825,7 +794,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, cur_pe_start = amdgpu_bo_gpu_offset(pt); cur_pe_start += (addr & mask) * 8; - cur_pe_end = cur_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; /* for next ptb*/ @@ -836,6 +805,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, while (addr < end) { pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -845,19 +819,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; - if (cur_pe_end == next_pe_start) { + if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && + ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { /* The next ptb is consecutive to current ptb. - * Don't call amdgpu_vm_frag_ptes now. + * Don't call the update function now. * Will update two ptbs together in future. */ - cur_pe_end += 8 * nptes; + cur_nptes += nptes; } else { - amdgpu_vm_frag_ptes(adev, vm_update_params, - cur_pe_start, cur_pe_end, - cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); cur_pe_start = next_pe_start; - cur_pe_end = next_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; } @@ -866,8 +840,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, - cur_pe_end, cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); +} + +/* + * amdgpu_vm_frag_ptes - add fragment information to PTEs + * + * @params: see amdgpu_pte_update_params definition + * @vm: requested vm + * @start: first PTE to handle + * @end: last PTE to handle + * @dst: addr those PTEs should point to + * @flags: hw mapping flags + */ +static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, + struct amdgpu_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + /** + * The MC L1 TLB supports variable sized pages, based on a fragment + * field in the PTE. When this field is set to a non-zero value, page + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE + * flags are considered valid for all PTEs within the fragment range + * and corresponding mappings are assumed to be physically contiguous. + * + * The L1 TLB can store a single PTE for the whole fragment, + * significantly increasing the space available for translation + * caching. This leads to large improvements in throughput when the + * TLB is under pressure. + * + * The L2 TLB distributes small and large fragments into two + * asymmetric partitions. The large fragment cache is significantly + * larger. Thus, we try to use large fragments wherever possible. + * Userspace can support this by aligning virtual base address and + * allocation size to the fragment size. + */ + + /* SI and newer are optimized for 64KB */ + uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG); + uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + + uint64_t frag_start = ALIGN(start, frag_align); + uint64_t frag_end = end & ~(frag_align - 1); + + /* system pages are non continuously */ + if (params->src || !(flags & AMDGPU_PTE_VALID) || + (frag_start >= frag_end)) { + + amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); + return; + } + + /* handle the 4K area at the beginning */ + if (start != frag_start) { + amdgpu_vm_update_ptes(params, vm, start, frag_start, + dst, flags); + dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; + } + + /* handle the area in the middle */ + amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, + flags | frag_flags); + + /* handle the 4K area at the end */ + if (frag_end != end) { + dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; + amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); + } } /** @@ -900,14 +941,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *f = NULL; int r; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - memset(&vm_update_params, 0, sizeof(vm_update_params)); - vm_update_params.src = src; - vm_update_params.pages_addr = pages_addr; + + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -924,30 +970,53 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (vm_update_params.src) { + if (src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (vm_update_params.pages_addr) { - /* header for write data commands */ - ndw += ncmds * 4; + params.func = amdgpu_vm_do_copy_ptes; + + } else if (pages_addr) { + /* copy commands needed */ + ndw += ncmds * 7; - /* body of write data command */ + /* and also PTEs */ ndw += nptes * 2; + params.func = amdgpu_vm_do_copy_ptes; + } else { /* set page commands needed */ ndw += ncmds * 10; /* two extra commands for begin/end of fragment */ ndw += 2 * 10; + + params.func = amdgpu_vm_do_set_ptes; } r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; - vm_update_params.ib = &job->ibs[0]; + params.ib = &job->ibs[0]; + + if (!src && pages_addr) { + uint64_t *pte; + unsigned i; + + /* Put the PTEs at the end of the IB. */ + i = ndw - nptes * 2; + pte= (uint64_t *)&(job->ibs->ptr[i]); + params.src = job->ibs->gpu_addr + i * 4; + + for (i = 0; i < nptes; ++i) { + pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * + AMDGPU_GPU_PAGE_SIZE); + pte[i] |= flags; + } + addr = 0; + } r = amdgpu_sync_fence(adev, &job->sync, exclusive); if (r) @@ -962,11 +1031,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, - last + 1, addr, flags); + params.shadow = true; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + params.shadow = false; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, vm_update_params.ib); - WARN_ON(vm_update_params.ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, params.ib); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) @@ -1062,28 +1133,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object - * @mem: ttm mem + * @clear: if true clear the entries * * Fill in the page table entries for @bo_va. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem) + bool clear) { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct ttm_mem_reg *mem; struct fence *exclusive; uint64_t addr; int r; - if (mem) { + if (clear) { + mem = NULL; + addr = 0; + exclusive = NULL; + } else { struct ttm_dma_tt *ttm; + mem = &bo_va->bo->tbo.mem; addr = (u64)mem->start << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_TT: @@ -1101,13 +1176,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); - } else { - addr = 0; - exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); - gtt_flags = (adev == bo_va->bo->adev) ? flags : 0; + gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && + adev == bo_va->bo->adev) ? flags : 0; spin_lock(&vm->status_lock); if (!list_empty(&bo_va->vm_status)) @@ -1134,7 +1207,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); - if (!mem) + if (clear) list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); @@ -1197,7 +1270,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, true); if (r) return r; @@ -1342,7 +1415,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, resv, &pt); if (r) goto error_free; @@ -1354,10 +1428,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, r = amdgpu_vm_clear_bo(adev, vm, pt); if (r) { + amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt); goto error_free; } + if (pt->shadow) { + r = amdgpu_vm_clear_bo(adev, vm, pt->shadow); + if (r) { + amdgpu_bo_unref(&pt->shadow); + amdgpu_bo_unref(&pt); + goto error_free; + } + } + entry->robj = pt; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; @@ -1541,7 +1625,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, NULL, &vm->page_directory); if (r) goto error_free_sched_entity; @@ -1551,14 +1636,25 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto error_free_page_directory; r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); - amdgpu_bo_unreserve(vm->page_directory); if (r) - goto error_free_page_directory; + goto error_unreserve; + + if (vm->page_directory->shadow) { + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow); + if (r) + goto error_unreserve; + } + vm->last_eviction_counter = atomic64_read(&adev->num_evictions); + amdgpu_bo_unreserve(vm->page_directory); return 0; +error_unreserve: + amdgpu_bo_unreserve(vm->page_directory); + error_free_page_directory: + amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); vm->page_directory = NULL; @@ -1600,10 +1696,18 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(mapping); } - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) - amdgpu_bo_unref(&vm->page_tables[i].entry.robj); + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { + struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; + + if (!pt) + continue; + + amdgpu_bo_unref(&pt->shadow); + amdgpu_bo_unref(&pt); + } drm_free_large(vm->page_tables); + amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 49a39b1a0a96..f7d236f95e74 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, * SetPixelClock provides the dividers */ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); - args.v6.ucPpll = ATOM_EXT_PLL1; + if (adev->asic_type == CHIP_TAHITI || + adev->asic_type == CHIP_PITCAIRN || + adev->asic_type == CHIP_VERDE || + adev->asic_type == CHIP_OLAND) + args.v6.ucPpll = ATOM_PPLL0; + else + args.v6.ucPpll = ATOM_EXT_PLL1; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 7f85c2c1d681..f81068ba4cc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, /* timeout */ if (args.v2.ucReplyStatus == 1) { - DRM_DEBUG_KMS("dp_aux_ch timeout\n"); r = -ETIMEDOUT; goto done; } @@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) { struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, + msg, DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - amdgpu_atombios_dp_probe_oui(amdgpu_connector); + amdgpu_atombios_dp_probe_oui(amdgpu_connector); - return 0; - } + return 0; } + dig_connector->dpcd[0] = 0; return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c index bc56c8a181e6..b374653bd6cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c @@ -27,6 +27,7 @@ #include "amdgpu.h" #include "atom.h" #include "amdgpu_atombios.h" +#include "atombios_i2c.h" #define TARGET_HW_I2C_CLOCK 50 diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a5c94b482459..1d8c375a3561 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - ci_dpm_powergate_uvd(adev, false); + ci_dpm_powergate_uvd(adev, true); if (!amdgpu_ci_is_smc_running(adev)) return; @@ -5874,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; pi->mclk_strobe_mode_threshold = 40000; pi->mclk_stutter_mode_threshold = 40000; @@ -6033,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->caps_dynamic_ac_timing = true; - pi->uvd_power_gated = false; + pi->uvd_power_gated = true; /* make sure dc limits are valid */ if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || @@ -6176,8 +6179,6 @@ static int ci_dpm_late_init(void *handle) if (ret) return ret; - ci_dpm_powergate_uvd(adev, true); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4efc901f658c..a845b6a93b79 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -67,6 +67,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_powerplay.h" +#include "dce_virtual.h" /* * Indirect registers accessor @@ -962,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static u32 cik_get_virtual_caps(struct amdgpu_device *adev) -{ - /* CIK does not support SR-IOV */ - return 0; -} - static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, @@ -1640,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; } +static void cik_detect_hw_virtualization(struct amdgpu_device *adev) +{ + if (is_virtual_machine()) /* passthrough mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; +} + static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 3, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kabini_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version mullins_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + int cik_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->ip_blocks = bonaire_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); - break; - case CHIP_HAWAII: - adev->ip_blocks = hawaii_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); - break; - case CHIP_KAVERI: - adev->ip_blocks = kaveri_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); - break; - case CHIP_KABINI: - adev->ip_blocks = kabini_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); - break; - case CHIP_MULLINS: - adev->ip_blocks = mullins_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; @@ -2015,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, .read_bios_from_rom = &cik_read_bios_from_rom, + .detect_hw_virtualization = cik_detect_hw_virtualization, .read_register = &cik_read_register, .reset = &cik_asic_reset, .set_vga_state = &cik_vga_set_state, .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, - .get_virtual_caps = &cik_get_virtual_caps, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 77fdd9911c3c..cb952acc7133 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -695,24 +695,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -720,39 +712,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -768,40 +748,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** @@ -887,6 +848,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } +static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 7 + 4; /* cik_sdma_ring_emit_ib */ +} + +static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 6 + /* cik_sdma_ring_emit_hdp_flush */ + 3 + /* cik_sdma_ring_emit_hdp_invalidate */ + 6 + /* cik_sdma_ring_emit_pipeline_sync */ + 12 + /* cik_sdma_ring_emit_vm_flush */ + 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ +} + static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, bool enable) { @@ -1262,6 +1239,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { .test_ib = cik_sdma_ring_test_ib, .insert_nop = cik_sdma_ring_insert_nop, .pad_ib = cik_sdma_ring_pad_ib, + .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size, + .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size, }; static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index c4f6f00d62bc..8659852aea9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -562,4 +562,40 @@ enum { MTYPE_NONCACHED = 3 }; +/* mmPA_SC_RASTER_CONFIG mask */ +#define RB_MAP_PKR0(x) ((x) << 0) +#define RB_MAP_PKR0_MASK (0x3 << 0) +#define RB_MAP_PKR1(x) ((x) << 2) +#define RB_MAP_PKR1_MASK (0x3 << 2) +#define RB_XSEL2(x) ((x) << 4) +#define RB_XSEL2_MASK (0x3 << 4) +#define RB_XSEL (1 << 6) +#define RB_YSEL (1 << 7) +#define PKR_MAP(x) ((x) << 8) +#define PKR_MAP_MASK (0x3 << 8) +#define PKR_XSEL(x) ((x) << 10) +#define PKR_XSEL_MASK (0x3 << 10) +#define PKR_YSEL(x) ((x) << 12) +#define PKR_YSEL_MASK (0x3 << 12) +#define SC_MAP(x) ((x) << 16) +#define SC_MAP_MASK (0x3 << 16) +#define SC_XSEL(x) ((x) << 18) +#define SC_XSEL_MASK (0x3 << 18) +#define SC_YSEL(x) ((x) << 20) +#define SC_YSEL_MASK (0x3 << 20) +#define SE_MAP(x) ((x) << 24) +#define SE_MAP_MASK (0x3 << 24) +#define SE_XSEL(x) ((x) << 26) +#define SE_XSEL_MASK (0x3 << 26) +#define SE_YSEL(x) ((x) << 28) +#define SE_YSEL_MASK (0x3 << 28) + +/* mmPA_SC_RASTER_CONFIG_1 mask */ +#define SE_PAIR_MAP(x) ((x) << 0) +#define SE_PAIR_MAP_MASK (0x3 << 0) +#define SE_PAIR_XSEL(x) ((x) << 2) +#define SE_PAIR_XSEL_MASK (0x3 << 2) +#define SE_PAIR_YSEL(x) ((x) << 4) +#define SE_PAIR_YSEL_MASK (0x3 << 4) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 2a11413ed54a..f80a0834e889 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -44,6 +44,7 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); +static void cz_dpm_fini(struct amdgpu_device *adev); static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) { @@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev) ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); if (ps == NULL) { + for (j = 0; j < i; j++) + kfree(adev->pm.dpm.ps[j].ps_priv); kfree(adev->pm.dpm.ps); return -ENOMEM; } @@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev) ret = amdgpu_get_platform_caps(adev); if (ret) - return ret; + goto err; ret = amdgpu_parse_extended_power_table(adev); if (ret) - return ret; + goto err; pi->sram_end = SMC_RAM_END; @@ -435,7 +438,11 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->caps_td_ramping = true; pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->voting_clients = 0x00c00033; pi->auto_thermal_throttling_enabled = true; pi->bapm_enabled = false; @@ -463,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev) ret = cz_parse_sys_info_table(adev); if (ret) - return ret; + goto err; cz_patch_voltage_values(adev); cz_construct_boot_state(adev); ret = cz_parse_power_table(adev); if (ret) - return ret; + goto err; ret = cz_process_firmware_header(adev); if (ret) - return ret; + goto err; pi->dpm_enabled = true; pi->uvd_dynamic_pg = false; return 0; +err: + cz_dpm_fini(adev); + return ret; } static void cz_dpm_fini(struct amdgpu_device *adev) @@ -668,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev) struct cz_power_info *pi = cz_get_pi(adev); pi->active_process_mask = 0; - } static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, void **table) { - int ret = 0; - - ret = cz_smu_download_pptable(adev, table); - - return ret; + return cz_smu_download_pptable(adev, table); } static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) @@ -818,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev) pi->sclk_dpm.hard_min_clk = 0; cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); level = cz_get_argument(adev); - if (level < table->count) + if (level < table->count) { clock = table->entries[level].clk; - else { + } else { DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); clock = table->entries[table->count - 1].clk; } @@ -846,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev) pi->uvd_dpm.hard_min_clk = 0; cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); level = cz_get_argument(adev); - if (level < table->count) + if (level < table->count) { clock = table->entries[level].vclk; - else { + } else { DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); clock = table->entries[table->count - 1].vclk; } @@ -874,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev) pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); level = cz_get_argument(adev); - if (level < table->count) + if (level < table->count) { clock = table->entries[level].ecclk; - else { + } else { /* future BIOS would fix this error */ DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); clock = table->entries[table->count - 1].ecclk; @@ -903,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev) pi->acp_dpm.hard_min_clk = 0; cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); level = cz_get_argument(adev); - if (level < table->count) + if (level < table->count) { clock = table->entries[level].clk; - else { + } else { DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); clock = table->entries[table->count - 1].clk; } @@ -930,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev) struct cz_power_info *pi = cz_get_pi(adev); pi->low_sclk_interrupt_threshold = 0; - } static void cz_dpm_setup_asic(struct amdgpu_device *adev) @@ -1203,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable) int ret; if (pi->caps_sq_ramping || pi->caps_db_ramping || - pi->caps_td_ramping || pi->caps_tcp_ramping) { + pi->caps_td_ramping || pi->caps_tcp_ramping) { if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { ret = cz_disable_cgpg(adev); if (ret) { @@ -1277,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, ps->force_high = false; ps->need_dfs_bypass = true; pi->video_start = new_rps->dclk || new_rps->vclk || - new_rps->evclk || new_rps->ecclk; + new_rps->evclk || new_rps->ecclk; if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) @@ -1335,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev) } cz_reset_acp_boot_level(adev); - cz_update_current_ps(adev, adev->pm.dpm.boot_ps); return 0; @@ -1665,7 +1668,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) struct amdgpu_ps *ps = &pi->requested_rps; cz_update_current_ps(adev, ps); - } static int cz_dpm_force_highest(struct amdgpu_device *adev) @@ -2108,29 +2110,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) /* disable clockgating so we can properly shut down the block */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); + return; + } + /* shutdown the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - /* XXX: check for errors */ + + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) + if (pi->caps_uvd_pg) { /* power off the UVD block */ - cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); + return; + } + } } else { if (pi->caps_uvd_pg) { /* power on the UVD block */ if (pi->uvd_dynamic_pg) - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); else - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); + return; + } + /* re-init the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); + return; + } + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); - /* XXX: check for errors */ + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); } @@ -2168,7 +2199,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev) /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ if (pi->caps_stable_power_state) { pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; - } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ /* leave it as set by user */ /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/ diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c index ac7fee7b7eca..aed7033c0973 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c @@ -29,6 +29,8 @@ #include "cz_smumgr.h" #include "smu_ucode_xfer_cz.h" #include "amdgpu_ucode.h" +#include "cz_dpm.h" +#include "vi_dpm.h" #include "smu/smu_8_0_d.h" #include "smu/smu_8_0_sh_mask.h" @@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev) return priv; } -int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) +static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) { int i; u32 content = 0, tmp; @@ -99,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) return 0; } -int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev, - u16 msg, u32 parameter) -{ - WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc_async(adev, msg); -} - int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, u16 msg, u32 parameter) { @@ -140,7 +135,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, return 0; } -int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, +static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, u32 value, u32 limit) { int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index c1b04e9aab57..613ebb7ed50f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; if (crtc >= adev->mode_info.num_crtc) return; @@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v10_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v10_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v10_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v10_0_is_counter_moving(adev, crtc)) break; } @@ -425,16 +427,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } - switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: idx = 0; @@ -458,6 +450,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) continue; } + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); + continue; + } + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); @@ -646,8 +651,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, if (save->crtc_enabled[i]) { tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); @@ -712,6 +717,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + num_crtc = 6; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v10_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v10_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2063,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); @@ -2071,6 +2115,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2090,23 +2135,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, * just update base pointers */ obj = amdgpu_fb->obj; - rbo = gem_to_amdgpu_bo(obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; if (atomic) { - fb_location = amdgpu_bo_gpu_offset(rbo); + fb_location = amdgpu_bo_gpu_offset(abo); } else { - r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unreserve(abo); return -EINVAL; } } - amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + amdgpu_bo_unreserve(abo); pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); @@ -2182,8 +2227,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2275,17 +2321,17 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } /* Bytes per pixel may have changed */ @@ -2698,7 +2744,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .gamma_set = dce_v10_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2765,16 +2811,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) if (crtc->primary->fb) { int r; struct amdgpu_framebuffer *amdgpu_fb; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r)) - DRM_ERROR("failed to reserve rbo before unpin\n"); + DRM_ERROR("failed to reserve abo before unpin\n"); else { - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } } /* disable the GRPH */ @@ -2962,10 +3008,11 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_display_funcs(adev); dce_v10_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: - adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; @@ -3141,11 +3188,26 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } +static int dce_v10_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (dce_v10_0_is_display_hung(adev)) + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; + else + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; + + return 0; +} + static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) + return 0; + if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3512,6 +3574,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = { .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, .wait_for_idle = dce_v10_0_wait_for_idle, + .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index 1bfa48ddd8a6..e3dc04d293e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v10_0_ip_funcs; +void dce_v10_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d4bf133908b1..f264b8f17ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } - switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: idx = 0; @@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) continue; } + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); + tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); + WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); + continue; + } + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); @@ -673,6 +676,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_CARRIZO: + num_crtc = 3; + break; + case CHIP_STONEY: + num_crtc = 2; + break; + case CHIP_POLARIS10: + num_crtc = 6; + break; + case CHIP_POLARIS11: + num_crtc = 5; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v11_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v11_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2038,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); @@ -2046,6 +2096,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2065,23 +2116,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, * just update base pointers */ obj = amdgpu_fb->obj; - rbo = gem_to_amdgpu_bo(obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; if (atomic) { - fb_location = amdgpu_bo_gpu_offset(rbo); + fb_location = amdgpu_bo_gpu_offset(abo); } else { - r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unreserve(abo); return -EINVAL; } } - amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + amdgpu_bo_unreserve(abo); pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); @@ -2157,8 +2208,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2250,17 +2302,17 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } /* Bytes per pixel may have changed */ @@ -2708,7 +2760,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .gamma_set = dce_v11_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2775,16 +2827,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) if (crtc->primary->fb) { int r; struct amdgpu_framebuffer *amdgpu_fb; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r)) - DRM_ERROR("failed to reserve rbo before unpin\n"); + DRM_ERROR("failed to reserve abo before unpin\n"); else { - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } } /* disable the GRPH */ @@ -2999,24 +3051,22 @@ static int dce_v11_0_early_init(void *handle) dce_v11_0_set_display_funcs(adev); dce_v11_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_STONEY: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_POLARIS10: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: - adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; @@ -3109,6 +3159,7 @@ static int dce_v11_0_sw_fini(void *handle) dce_v11_0_afmt_fini(adev); + drm_mode_config_cleanup(adev->ddev); adev->mode_info.mode_config_initialized = false; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 84e4618f5253..1f58a65ba2ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v11_0_ip_funcs; +void dce_v11_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c new file mode 100644 index 000000000000..b948d6cb1399 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -0,0 +1,3176 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "atombios_crtc.h" +#include "atombios_encoders.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" +#include "si/si_reg.h" +#include "si/sid.h" + +static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); +static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev); + +static const u32 crtc_offsets[6] = +{ + SI_CRTC0_REGISTER_OFFSET, + SI_CRTC1_REGISTER_OFFSET, + SI_CRTC2_REGISTER_OFFSET, + SI_CRTC3_REGISTER_OFFSET, + SI_CRTC4_REGISTER_OFFSET, + SI_CRTC5_REGISTER_OFFSET +}; + +static const uint32_t dig_offsets[] = { + SI_CRTC0_REGISTER_OFFSET, + SI_CRTC1_REGISTER_OFFSET, + SI_CRTC2_REGISTER_OFFSET, + SI_CRTC3_REGISTER_OFFSET, + SI_CRTC4_REGISTER_OFFSET, + SI_CRTC5_REGISTER_OFFSET, + (0x13830 - 0x7030) >> 2, +}; + +static const struct { + uint32_t reg; + uint32_t vblank; + uint32_t vline; + uint32_t hpd; + +} interrupt_status_offsets[6] = { { + .reg = DISP_INTERRUPT_STATUS, + .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK +}, { + .reg = DISP_INTERRUPT_STATUS_CONTINUE, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK +}, { + .reg = DISP_INTERRUPT_STATUS_CONTINUE2, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK +}, { + .reg = DISP_INTERRUPT_STATUS_CONTINUE3, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK +}, { + .reg = DISP_INTERRUPT_STATUS_CONTINUE4, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK +}, { + .reg = DISP_INTERRUPT_STATUS_CONTINUE5, + .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, + .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, + .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK +} }; + +static const uint32_t hpd_int_control_offsets[6] = { + DC_HPD1_INT_CONTROL, + DC_HPD2_INT_CONTROL, + DC_HPD3_INT_CONTROL, + DC_HPD4_INT_CONTROL, + DC_HPD5_INT_CONTROL, + DC_HPD6_INT_CONTROL, +}; + +static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg) +{ + DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n"); + return 0; +} + +static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev, + u32 block_offset, u32 reg, u32 v) +{ + DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n"); +} + +static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc) +{ + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) + return true; + else + return false; +} + +static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc) +{ + u32 pos1, pos2; + + pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); + pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); + + if (pos1 != pos2) + return true; + else + return false; +} + +/** + * dce_v6_0_wait_for_vblank - vblank wait asic callback. + * + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + unsigned i = 100; + + if (crtc >= adev->mode_info.num_crtc) + return; + + if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN)) + return; + + /* depending on when we hit vblank, we may be close to active; if so, + * wait for another frame. + */ + while (dce_v6_0_is_in_vblank(adev, crtc)) { + if (i++ == 100) { + i = 0; + if (!dce_v6_0_is_counter_moving(adev, crtc)) + break; + } + } + + while (!dce_v6_0_is_in_vblank(adev, crtc)) { + if (i++ == 100) { + i = 0; + if (!dce_v6_0_is_counter_moving(adev, crtc)) + break; + } + } +} + +static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); +} + +static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + +/** + * dce_v6_0_page_flip - pageflip callback. + * + * @adev: amdgpu_device pointer + * @crtc_id: crtc to cleanup pageflip on + * @crtc_base: new address of the crtc (GPU MC address) + * + * Does the actual pageflip (evergreen+). + * During vblank we take the crtc lock and wait for the update_pending + * bit to go high, when it does, we release the lock, and allow the + * double buffered update to take place. + * Returns the current update pending status. + */ +static void dce_v6_0_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + /* flip at hsync for async, default is vsync */ + WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? + EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0); + /* update the scanout addresses */ + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)crtc_base); + + /* post the write */ + RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); +} + +static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]); + *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); + + return 0; + +} + +/** + * dce_v6_0_hpd_sense - hpd sense callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Checks if a digital monitor is connected (evergreen+). + * Returns true if connected, false if not connected. + */ +static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + bool connected = false; + + switch (hpd) { + case AMDGPU_HPD_1: + if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case AMDGPU_HPD_2: + if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case AMDGPU_HPD_3: + if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case AMDGPU_HPD_4: + if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case AMDGPU_HPD_5: + if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + case AMDGPU_HPD_6: + if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) + connected = true; + break; + default: + break; + } + + return connected; +} + +/** + * dce_v6_0_hpd_set_polarity - hpd set polarity callback. + * + * @adev: amdgpu_device pointer + * @hpd: hpd (hotplug detect) pin + * + * Set the polarity of the hpd pin (evergreen+). + */ +static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + u32 tmp; + bool connected = dce_v6_0_hpd_sense(adev, hpd); + + switch (hpd) { + case AMDGPU_HPD_1: + tmp = RREG32(DC_HPD1_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_2: + tmp = RREG32(DC_HPD2_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_3: + tmp = RREG32(DC_HPD3_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_4: + tmp = RREG32(DC_HPD4_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_5: + tmp = RREG32(DC_HPD5_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + break; + case AMDGPU_HPD_6: + tmp = RREG32(DC_HPD6_INT_CONTROL); + if (connected) + tmp &= ~DC_HPDx_INT_POLARITY; + else + tmp |= DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + break; + default: + break; + } +} + +/** + * dce_v6_0_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +static void dce_v6_0_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | + DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + WREG32(DC_HPD1_CONTROL, tmp); + break; + case AMDGPU_HPD_2: + WREG32(DC_HPD2_CONTROL, tmp); + break; + case AMDGPU_HPD_3: + WREG32(DC_HPD3_CONTROL, tmp); + break; + case AMDGPU_HPD_4: + WREG32(DC_HPD4_CONTROL, tmp); + break; + case AMDGPU_HPD_5: + WREG32(DC_HPD5_CONTROL, tmp); + break; + case AMDGPU_HPD_6: + WREG32(DC_HPD6_CONTROL, tmp); + break; + default: + break; + } + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; + break; + case AMDGPU_HPD_2: + dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; + break; + case AMDGPU_HPD_3: + dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; + break; + case AMDGPU_HPD_4: + dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; + break; + case AMDGPU_HPD_5: + dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; + break; + case AMDGPU_HPD_6: + dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; + break; + default: + continue; + } + + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + continue; + } + + dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); + amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } + +} + +/** + * dce_v6_0_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev->ddev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + WREG32(DC_HPD1_CONTROL, 0); + break; + case AMDGPU_HPD_2: + WREG32(DC_HPD2_CONTROL, 0); + break; + case AMDGPU_HPD_3: + WREG32(DC_HPD3_CONTROL, 0); + break; + case AMDGPU_HPD_4: + WREG32(DC_HPD4_CONTROL, 0); + break; + case AMDGPU_HPD_5: + WREG32(DC_HPD5_CONTROL, 0); + break; + case AMDGPU_HPD_6: + WREG32(DC_HPD6_CONTROL, 0); + break; + default: + break; + } + amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); + } +} + +static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return SI_DC_GPIO_HPD_A; +} + +static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev) +{ + DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n"); + + return true; +} + +static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) + return 0; + else + return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); +} + +static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 crtc_enabled, tmp, frame_count; + int i, j; + + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); + + /* disable VGA render */ + WREG32(VGA_RENDER_CONTROL, 0); + + /* blank the display controllers */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; + if (crtc_enabled) { + save->crtc_enabled[i] = true; + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); + + if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { + dce_v6_0_vblank_wait(adev, i); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + /* wait for the next frame */ + frame_count = evergreen_get_vblank_counter(adev, i); + for (j = 0; j < adev->usec_timeout; j++) { + if (evergreen_get_vblank_counter(adev, i) != frame_count) + break; + udelay(1); + } + + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); + tmp &= ~EVERGREEN_CRTC_MASTER_EN; + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + save->crtc_enabled[i] = false; + /* ***** */ + } else { + save->crtc_enabled[i] = false; + } + } +} + +static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp; + int i, j; + + /* update crtc base addresses */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + upper_32_bits(adev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + (u32)adev->mc.vram_start); + } + + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start); + + /* unlock regs and wait for update */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (save->crtc_enabled[i]) { + tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; + WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); + } + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); + if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) { + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); + } + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); + if (tmp & 1) { + tmp &= ~1; + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); + } + for (j = 0; j < adev->usec_timeout; j++) { + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); + if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0) + break; + udelay(1); + } + } + } + + /* Unlock vga access */ + WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(VGA_RENDER_CONTROL, save->vga_render_control); + +} + +static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + if (!render) + WREG32(R_000300_VGA_RENDER_CONTROL, + RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); + +} + +static void dce_v6_0_program_fmt(struct drm_encoder *encoder) +{ + + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); + int bpc = 0; + u32 tmp = 0; + enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + bpc = amdgpu_connector_get_monitor_bpc(connector); + dither = amdgpu_connector->dither; + } + + /* LVDS FMT is set up by atom */ + if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) + return; + + if (bpc == 0) + return; + + + switch (bpc) { + case 6: + if (dither == AMDGPU_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | + FMT_SPATIAL_DITHER_EN); + else + tmp |= FMT_TRUNCATE_EN; + break; + case 8: + if (dither == AMDGPU_FMT_DITHER_ENABLE) + /* XXX sort out optimal dither settings */ + tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | + FMT_RGB_RANDOM_ENABLE | + FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); + else + tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); + break; + case 10: + default: + /* not needed */ + break; + } + + WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); +} + +/** + * cik_get_number_of_dram_channels - get the number of dram channels + * + * @adev: amdgpu_device pointer + * + * Look up the number of video ram channels (CIK). + * Used for display watermark bandwidth calculations + * Returns the number of dram channels + */ +static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(MC_SHARED_CHMAP); + + switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { + case 0: + default: + return 1; + case 1: + return 2; + case 2: + return 4; + case 3: + return 8; + case 4: + return 3; + case 5: + return 6; + case 6: + return 10; + case 7: + return 12; + case 8: + return 16; + } +} + +struct dce6_wm_params { + u32 dram_channels; /* number of dram channels */ + u32 yclk; /* bandwidth per dram data pin in kHz */ + u32 sclk; /* engine clock in kHz */ + u32 disp_clk; /* display clock in kHz */ + u32 src_width; /* viewport width */ + u32 active_time; /* active display time in ns */ + u32 blank_time; /* blank time in ns */ + bool interlaced; /* mode is interlaced */ + fixed20_12 vsc; /* vertical scale ratio */ + u32 num_heads; /* number of active crtcs */ + u32 bytes_per_pixel; /* bytes per pixel display + overlay */ + u32 lb_size; /* line buffer allocated to pipe */ + u32 vtaps; /* vertical scaler taps */ +}; + +/** + * dce_v6_0_dram_bandwidth - get the dram bandwidth + * + * @wm: watermark calculation data + * + * Calculate the raw dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth in MBytes/s + */ +static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm) +{ + /* Calculate raw DRAM Bandwidth */ + fixed20_12 dram_efficiency; /* 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + dram_efficiency.full = dfixed_const(7); + dram_efficiency.full = dfixed_div(dram_efficiency, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display + * + * @wm: watermark calculation data + * + * Calculate the dram bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dram bandwidth for display in MBytes/s + */ +static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm) +{ + /* Calculate DRAM Bandwidth and the part allocated to display. */ + fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ + fixed20_12 yclk, dram_channels, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + yclk.full = dfixed_const(wm->yclk); + yclk.full = dfixed_div(yclk, a); + dram_channels.full = dfixed_const(wm->dram_channels * 4); + a.full = dfixed_const(10); + disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ + disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); + bandwidth.full = dfixed_mul(dram_channels, yclk); + bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v6_0_data_return_bandwidth - get the data return bandwidth + * + * @wm: watermark calculation data + * + * Calculate the data return bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the data return bandwidth in MBytes/s + */ +static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm) +{ + /* Calculate the display Data return Bandwidth */ + fixed20_12 return_efficiency; /* 0.8 */ + fixed20_12 sclk, bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + sclk.full = dfixed_const(wm->sclk); + sclk.full = dfixed_div(sclk, a); + a.full = dfixed_const(10); + return_efficiency.full = dfixed_const(8); + return_efficiency.full = dfixed_div(return_efficiency, a); + a.full = dfixed_const(32); + bandwidth.full = dfixed_mul(a, sclk); + bandwidth.full = dfixed_mul(bandwidth, return_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth + * + * @wm: watermark calculation data + * + * Calculate the dmif bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the dmif bandwidth in MBytes/s + */ +static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm) +{ + /* Calculate the DMIF Request Bandwidth */ + fixed20_12 disp_clk_request_efficiency; /* 0.8 */ + fixed20_12 disp_clk, bandwidth; + fixed20_12 a, b; + + a.full = dfixed_const(1000); + disp_clk.full = dfixed_const(wm->disp_clk); + disp_clk.full = dfixed_div(disp_clk, a); + a.full = dfixed_const(32); + b.full = dfixed_mul(a, disp_clk); + + a.full = dfixed_const(10); + disp_clk_request_efficiency.full = dfixed_const(8); + disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); + + bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v6_0_available_bandwidth - get the min available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the min available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the min available bandwidth in MBytes/s + */ +static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm) +{ + /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ + u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm); + u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm); + u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm); + + return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); +} + +/** + * dce_v6_0_average_bandwidth - get the average available bandwidth + * + * @wm: watermark calculation data + * + * Calculate the average available bandwidth used for display (CIK). + * Used for display watermark bandwidth calculations + * Returns the average available bandwidth in MBytes/s + */ +static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm) +{ + /* Calculate the display mode Average Bandwidth + * DisplayMode should contain the source and destination dimensions, + * timing, etc. + */ + fixed20_12 bpp; + fixed20_12 line_time; + fixed20_12 src_width; + fixed20_12 bandwidth; + fixed20_12 a; + + a.full = dfixed_const(1000); + line_time.full = dfixed_const(wm->active_time + wm->blank_time); + line_time.full = dfixed_div(line_time, a); + bpp.full = dfixed_const(wm->bytes_per_pixel); + src_width.full = dfixed_const(wm->src_width); + bandwidth.full = dfixed_mul(src_width, bpp); + bandwidth.full = dfixed_mul(bandwidth, wm->vsc); + bandwidth.full = dfixed_div(bandwidth, line_time); + + return dfixed_trunc(bandwidth); +} + +/** + * dce_v6_0_latency_watermark - get the latency watermark + * + * @wm: watermark calculation data + * + * Calculate the latency watermark (CIK). + * Used for display watermark bandwidth calculations + * Returns the latency watermark in ns + */ +static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) +{ + /* First calculate the latency in ns */ + u32 mc_latency = 2000; /* 2000 ns. */ + u32 available_bandwidth = dce_v6_0_available_bandwidth(wm); + u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; + u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; + u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ + u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + + (wm->num_heads * cursor_line_pair_return_time); + u32 latency = mc_latency + other_heads_data_return_time + dc_latency; + u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; + u32 tmp, dmif_size = 12288; + fixed20_12 a, b, c; + + if (wm->num_heads == 0) + return 0; + + a.full = dfixed_const(2); + b.full = dfixed_const(1); + if ((wm->vsc.full > a.full) || + ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || + (wm->vtaps >= 5) || + ((wm->vsc.full >= a.full) && wm->interlaced)) + max_src_lines_per_dst_line = 4; + else + max_src_lines_per_dst_line = 2; + + a.full = dfixed_const(available_bandwidth); + b.full = dfixed_const(wm->num_heads); + a.full = dfixed_div(a, b); + + b.full = dfixed_const(mc_latency + 512); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(b, c); + + c.full = dfixed_const(dmif_size); + b.full = dfixed_div(c, b); + + tmp = min(dfixed_trunc(a), dfixed_trunc(b)); + + b.full = dfixed_const(1000); + c.full = dfixed_const(wm->disp_clk); + b.full = dfixed_div(c, b); + c.full = dfixed_const(wm->bytes_per_pixel); + b.full = dfixed_mul(b, c); + + lb_fill_bw = min(tmp, dfixed_trunc(b)); + + a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); + b.full = dfixed_const(1000); + c.full = dfixed_const(lb_fill_bw); + b.full = dfixed_div(c, b); + a.full = dfixed_div(a, b); + line_fill_time = dfixed_trunc(a); + + if (line_fill_time < wm->active_time) + return latency; + else + return latency + (line_fill_time - wm->active_time); + +} + +/** + * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check + * average and available dram bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * dram bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) +{ + if (dce_v6_0_average_bandwidth(wm) <= + (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v6_0_average_bandwidth_vs_available_bandwidth - check + * average and available bandwidth + * + * @wm: watermark calculation data + * + * Check if the display average bandwidth fits in the display + * available bandwidth (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) +{ + if (dce_v6_0_average_bandwidth(wm) <= + (dce_v6_0_available_bandwidth(wm) / wm->num_heads)) + return true; + else + return false; +} + +/** + * dce_v6_0_check_latency_hiding - check latency hiding + * + * @wm: watermark calculation data + * + * Check latency hiding (CIK). + * Used for display watermark bandwidth calculations + * Returns true if the display fits, false if not. + */ +static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm) +{ + u32 lb_partitions = wm->lb_size / wm->src_width; + u32 line_time = wm->active_time + wm->blank_time; + u32 latency_tolerant_lines; + u32 latency_hiding; + fixed20_12 a; + + a.full = dfixed_const(1); + if (wm->vsc.full > a.full) + latency_tolerant_lines = 1; + else { + if (lb_partitions <= (wm->vtaps + 1)) + latency_tolerant_lines = 1; + else + latency_tolerant_lines = 2; + } + + latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); + + if (dce_v6_0_latency_watermark(wm) <= latency_hiding) + return true; + else + return false; +} + +/** + * dce_v6_0_program_watermarks - program display watermarks + * + * @adev: amdgpu_device pointer + * @amdgpu_crtc: the selected display controller + * @lb_size: line buffer size + * @num_heads: number of display controllers in use + * + * Calculate and program the display watermarks for the + * selected display controller (CIK). + */ +static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + u32 lb_size, u32 num_heads) +{ + struct drm_display_mode *mode = &amdgpu_crtc->base.mode; + struct dce6_wm_params wm_low, wm_high; + u32 dram_channels; + u32 pixel_period; + u32 line_time = 0; + u32 latency_watermark_a = 0, latency_watermark_b = 0; + u32 priority_a_mark = 0, priority_b_mark = 0; + u32 priority_a_cnt = PRIORITY_OFF; + u32 priority_b_cnt = PRIORITY_OFF; + u32 tmp, arb_control3; + fixed20_12 a, b, c; + + if (amdgpu_crtc->base.enabled && num_heads && mode) { + pixel_period = 1000000 / (u32)mode->clock; + line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + priority_a_cnt = 0; + priority_b_cnt = 0; + + dram_channels = si_get_number_of_dram_channels(adev); + + /* watermark for high clocks */ + if (adev->pm.dpm_enabled) { + wm_high.yclk = + amdgpu_dpm_get_mclk(adev, false) * 10; + wm_high.sclk = + amdgpu_dpm_get_sclk(adev, false) * 10; + } else { + wm_high.yclk = adev->pm.current_mclk * 10; + wm_high.sclk = adev->pm.current_sclk * 10; + } + + wm_high.disp_clk = mode->clock; + wm_high.src_width = mode->crtc_hdisplay; + wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.blank_time = line_time - wm_high.active_time; + wm_high.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_high.interlaced = true; + wm_high.vsc = amdgpu_crtc->vsc; + wm_high.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_high.vtaps = 2; + wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_high.lb_size = lb_size; + wm_high.dram_channels = dram_channels; + wm_high.num_heads = num_heads; + + if (adev->pm.dpm_enabled) { + /* watermark for low clocks */ + wm_low.yclk = + amdgpu_dpm_get_mclk(adev, true) * 10; + wm_low.sclk = + amdgpu_dpm_get_sclk(adev, true) * 10; + } else { + wm_low.yclk = adev->pm.current_mclk * 10; + wm_low.sclk = adev->pm.current_sclk * 10; + } + + wm_low.disp_clk = mode->clock; + wm_low.src_width = mode->crtc_hdisplay; + wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.blank_time = line_time - wm_low.active_time; + wm_low.interlaced = false; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + wm_low.interlaced = true; + wm_low.vsc = amdgpu_crtc->vsc; + wm_low.vtaps = 1; + if (amdgpu_crtc->rmx_type != RMX_OFF) + wm_low.vtaps = 2; + wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ + wm_low.lb_size = lb_size; + wm_low.dram_channels = dram_channels; + wm_low.num_heads = num_heads; + + /* set for high clocks */ + latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535); + /* set for low clocks */ + latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535); + + /* possibly force display priority to high */ + /* should really do this at mode validation time... */ + if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || + !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) || + !dce_v6_0_check_latency_hiding(&wm_high) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + priority_a_cnt |= PRIORITY_ALWAYS_ON; + priority_b_cnt |= PRIORITY_ALWAYS_ON; + } + if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || + !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) || + !dce_v6_0_check_latency_hiding(&wm_low) || + (adev->mode_info.disp_priority == 2)) { + DRM_DEBUG_KMS("force priority to high\n"); + priority_a_cnt |= PRIORITY_ALWAYS_ON; + priority_b_cnt |= PRIORITY_ALWAYS_ON; + } + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_a); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, amdgpu_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_a_mark = dfixed_trunc(c); + priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; + + a.full = dfixed_const(1000); + b.full = dfixed_const(mode->clock); + b.full = dfixed_div(b, a); + c.full = dfixed_const(latency_watermark_b); + c.full = dfixed_mul(c, b); + c.full = dfixed_mul(c, amdgpu_crtc->hsc); + c.full = dfixed_div(c, a); + a.full = dfixed_const(16); + c.full = dfixed_div(c, a); + priority_b_mark = dfixed_trunc(c); + priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; + } + + /* select wm A */ + arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); + tmp = arb_control3; + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(1); + WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); + WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_a) | + LATENCY_HIGH_WATERMARK(line_time))); + /* select wm B */ + tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); + tmp &= ~LATENCY_WATERMARK_MASK(3); + tmp |= LATENCY_WATERMARK_MASK(2); + WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); + WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset, + (LATENCY_LOW_WATERMARK(latency_watermark_b) | + LATENCY_HIGH_WATERMARK(line_time))); + /* restore original selection */ + WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); + + /* write the priority marks */ + WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); + WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); + + /* save values for DPM */ + amdgpu_crtc->line_time = line_time; + amdgpu_crtc->wm_high = latency_watermark_a; +} + +/* watermark setup */ +static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, + struct amdgpu_crtc *amdgpu_crtc, + struct drm_display_mode *mode, + struct drm_display_mode *other_mode) +{ + u32 tmp, buffer_alloc, i; + u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; + /* + * Line Buffer Setup + * There are 3 line buffers, each one shared by 2 display controllers. + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * the display controllers. The paritioning is done via one of four + * preset allocations specified in bits 21:20: + * 0 - half lb + * 2 - whole lb, other crtc must be disabled + */ + /* this can get tricky if we have two large displays on a paired group + * of crtcs. Ideally for multiple large displays we'd assign them to + * non-linked crtcs for maximum line buffer allocation. + */ + if (amdgpu_crtc->base.enabled && mode) { + if (other_mode) { + tmp = 0; /* 1/2 */ + buffer_alloc = 1; + } else { + tmp = 2; /* whole */ + buffer_alloc = 2; + } + } else { + tmp = 0; + buffer_alloc = 0; + } + + WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, + DC_LB_MEMORY_CONFIG(tmp)); + + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + + if (amdgpu_crtc->base.enabled && mode) { + switch (tmp) { + case 0: + default: + return 4096 * 2; + case 2: + return 8192 * 2; + } + } + + /* controller not enabled, so no lb used */ + return 0; +} + + +/** + * + * dce_v6_0_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + u32 num_heads = 0, lb_size; + int i; + + if (!adev->mode_info.mode_config_initialized) + return; + + amdgpu_update_display_priority(adev); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i]->base.enabled) + num_heads++; + } + for (i = 0; i < adev->mode_info.num_crtc; i += 2) { + mode0 = &adev->mode_info.crtcs[i]->base.mode; + mode1 = &adev->mode_info.crtcs[i+1]->base.mode; + lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1); + dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads); + lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0); + dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads); + } +} +/* +static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev) +{ + int i; + u32 offset, tmp; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + offset = adev->mode_info.audio.pin[i].offset; + tmp = RREG32_AUDIO_ENDPT(offset, + AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); + if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1) + adev->mode_info.audio.pin[i].connected = false; + else + adev->mode_info.audio.pin[i].connected = true; + } + +} + +static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev) +{ + int i; + + dce_v6_0_audio_get_connected_pins(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + if (adev->mode_info.audio.pin[i].connected) + return &adev->mode_info.audio.pin[i]; + } + DRM_ERROR("No connected audio pins found!\n"); + return NULL; +} + +static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder) +{ + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + u32 offset; + + if (!dig || !dig->afmt || !dig->afmt->pin) + return; + + offset = dig->afmt->offset; + + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, + AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); + +} + +static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n"); +} + +static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) +{ + DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n"); +} + +static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) +{ + DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n"); + +} +*/ +static void dce_v6_0_audio_enable(struct amdgpu_device *adev, + struct amdgpu_audio_pin *pin, + bool enable) +{ + DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n"); +} + +static const u32 pin_offsets[7] = +{ + (0x1780 - 0x1780), + (0x1786 - 0x1780), + (0x178c - 0x1780), + (0x1792 - 0x1780), + (0x1798 - 0x1780), + (0x179d - 0x1780), + (0x17a4 - 0x1780), +}; + +static int dce_v6_0_audio_init(struct amdgpu_device *adev) +{ + return 0; +} + +static void dce_v6_0_audio_fini(struct amdgpu_device *adev) +{ + +} + +/* +static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) +{ + DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n"); +} +*/ +/* + * build a HDMI Video Info Frame + */ +/* +static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, + void *buffer, size_t size) +{ + DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n"); +} + +static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) +{ + DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n"); +} +*/ +/* + * update the info frames with the data from the current display mode + */ +static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n"); +} + +static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + if (!dig || !dig->afmt) + return; + + /* Silent, r600_hdmi_enable will raise WARN for us */ + if (enable && dig->afmt->enabled) + return; + if (!enable && !dig->afmt->enabled) + return; + + if (!enable && dig->afmt->pin) { + dce_v6_0_audio_enable(adev, dig->afmt->pin, false); + dig->afmt->pin = NULL; + } + + dig->afmt->enabled = enable; + + DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", + enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); +} + +static int dce_v6_0_afmt_init(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < adev->mode_info.num_dig; i++) + adev->mode_info.afmt[i] = NULL; + + /* DCE6 has audio blocks tied to DIG encoders */ + for (i = 0; i < adev->mode_info.num_dig; i++) { + adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); + if (adev->mode_info.afmt[i]) { + adev->mode_info.afmt[i]->offset = dig_offsets[i]; + adev->mode_info.afmt[i]->id = i; + } else { + for (j = 0; j < i; j++) { + kfree(adev->mode_info.afmt[j]); + adev->mode_info.afmt[j] = NULL; + } + DRM_ERROR("Out of memory allocating afmt table\n"); + return -ENOMEM; + } + } + return 0; +} + +static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->mode_info.num_dig; i++) { + kfree(adev->mode_info.afmt[i]); + adev->mode_info.afmt[i] = NULL; + } +} + +static const u32 vga_control_regs[6] = +{ + AVIVO_D1VGA_CONTROL, + AVIVO_D2VGA_CONTROL, + EVERGREEN_D3VGA_CONTROL, + EVERGREEN_D4VGA_CONTROL, + EVERGREEN_D5VGA_CONTROL, + EVERGREEN_D6VGA_CONTROL, +}; + +static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 vga_control; + + vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; + WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0)); +} + +static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + + WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); +} + +static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, int atomic) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_framebuffer *amdgpu_fb; + struct drm_framebuffer *target_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *abo; + uint64_t fb_location, tiling_flags; + uint32_t fb_format, fb_pitch_pixels, pipe_config; + u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); + u32 viewport_w, viewport_h; + int r; + bool bypass_lut = false; + + /* no fb bound */ + if (!atomic && !crtc->primary->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + if (atomic) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + target_fb = fb; + } else { + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + target_fb = crtc->primary->fb; + } + + /* If atomic, assume fb object is pinned & idle & fenced and + * just update base pointers + */ + obj = amdgpu_fb->obj; + abo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(abo, false); + if (unlikely(r != 0)) + return r; + + if (atomic) { + fb_location = amdgpu_bo_gpu_offset(abo); + } else { + r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + amdgpu_bo_unreserve(abo); + return -EINVAL; + } + } + + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + amdgpu_bo_unreserve(abo); + + switch (target_fb->pixel_format) { + case DRM_FORMAT_C8: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); + break; + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_BGRA5551: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_RGB565: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16); +#endif + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_BGRA1010102: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102)); +#ifdef __BIG_ENDIAN + fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32); +#endif + /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ + bypass_lut = true; + break; + default: + DRM_ERROR("Unsupported screen format %s\n", + drm_get_format_name(target_fb->pixel_format)); + return -EINVAL; + } + + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { + unsigned bankw, bankh, mtaspect, tile_split, num_banks; + + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + + fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); + fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); + fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); + fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect); + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); + } + + pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); + fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config); + + dce_v6_0_vga_enable(crtc, false); + + /* Make sure surface address is updated at vertical blank rather than + * horizontal blank + */ + WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); + WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); + WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap); + + /* + * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT + * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to + * retain the full precision throughout the pipeline. + */ + WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, + (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0), + ~EVERGREEN_LUT_10BIT_BYPASS_EN); + + if (bypass_lut) + DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); + + WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); + WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); + + fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); + + dce_v6_0_grph_enable(crtc, true); + + WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, + target_fb->height); + x &= ~3; + y &= ~1; + WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset, + (x << 16) | y); + viewport_w = crtc->mode.hdisplay; + viewport_h = (crtc->mode.vdisplay + 1) & ~1; + + WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset, + (viewport_w << 16) | viewport_h); + + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); + + if (!atomic && fb && fb != crtc->primary->fb) { + amdgpu_fb = to_amdgpu_framebuffer(fb); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); + if (unlikely(r != 0)) + return r; + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); + } + + /* Bytes per pixel may have changed */ + dce_v6_0_bandwidth_update(adev); + + return 0; + +} + +static void dce_v6_0_set_interleave(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, + EVERGREEN_INTERLEAVE_EN); + else + WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); +} + +static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) +{ + + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + int i; + + DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); + + WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, + (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | + NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); + WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, + NI_GRPH_PRESCALE_BYPASS); + WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, + NI_OVL_PRESCALE_BYPASS); + WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, + (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | + NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); + + + + WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); + + WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); + + WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); + for (i = 0; i < 256; i++) { + WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, + (amdgpu_crtc->lut_r[i] << 20) | + (amdgpu_crtc->lut_g[i] << 10) | + (amdgpu_crtc->lut_b[i] << 0)); + } + + WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, + (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); + WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, + (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | + NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); + WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset, + (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | + NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); + WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, + (NI_OUTPUT_CSC_GRPH_MODE(0) | + NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); + /* XXX match this to the depth of the crtc fmt block, move to modeset? */ + WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); + + +} + +static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + return dig->linkb ? 1 : 0; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + return dig->linkb ? 3 : 2; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + return dig->linkb ? 5 : 4; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + return 6; + default: + DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); + return 0; + } +} + +/** + * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc. + * + * @crtc: drm crtc + * + * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors + * a single PPLL can be used for all DP crtcs/encoders. For non-DP + * monitors a dedicated PPLL must be used. If a particular board has + * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming + * as there is no need to program the PLL itself. If we are not able to + * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to + * avoid messing up an existing monitor. + * + * + */ +static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + u32 pll_in_use; + int pll; + + if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { + if (adev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + else + return ATOM_PPLL0; + } else { + /* use the same PPLL for all monitors with the same clock */ + pll = amdgpu_pll_get_shared_nondp_ppll(crtc); + if (pll != ATOM_PPLL_INVALID) + return pll; + } + + /* PPLL1, and PPLL2 */ + pll_in_use = amdgpu_pll_get_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; +} + +static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) +{ + struct amdgpu_device *adev = crtc->dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint32_t cur_lock; + + cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset); + if (lock) + cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; + else + cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; + WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); +} + +static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset, + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); + + +} + +static void dce_v6_0_show_cursor(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, + upper_32_bits(amdgpu_crtc->cursor_addr)); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, + lower_32_bits(amdgpu_crtc->cursor_addr)); + + WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset, + EVERGREEN_CURSOR_EN | + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); + +} + +static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, + int x, int y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = crtc->dev->dev_private; + int xorigin = 0, yorigin = 0; + + int w = amdgpu_crtc->cursor_width; + + /* avivo cursor are offset into the total surface */ + x += crtc->x; + y += crtc->y; + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + + WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); + WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset, + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); + + amdgpu_crtc->cursor_x = x; + amdgpu_crtc->cursor_y = y; + return 0; +} + +static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc, + int x, int y) +{ + int ret; + + dce_v6_0_lock_cursor(crtc, true); + ret = dce_v6_0_cursor_move_locked(crtc, x, y); + dce_v6_0_lock_cursor(crtc, false); + + return ret; +} + +static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, + struct drm_file *file_priv, + uint32_t handle, + uint32_t width, + uint32_t height, + int32_t hot_x, + int32_t hot_y) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_gem_object *obj; + struct amdgpu_bo *aobj; + int ret; + + if (!handle) { + /* turn off cursor */ + dce_v6_0_hide_cursor(crtc); + obj = NULL; + goto unpin; + } + + if ((width > amdgpu_crtc->max_cursor_width) || + (height > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("bad cursor width or height %d x %d\n", width, height); + return -EINVAL; + } + + obj = drm_gem_object_lookup(file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); + return -ENOENT; + } + + aobj = gem_to_amdgpu_bo(obj); + ret = amdgpu_bo_reserve(aobj, false); + if (ret != 0) { + drm_gem_object_unreference_unlocked(obj); + return ret; + } + + ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); + amdgpu_bo_unreserve(aobj); + if (ret) { + DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); + drm_gem_object_unreference_unlocked(obj); + return ret; + } + + amdgpu_crtc->cursor_width = width; + amdgpu_crtc->cursor_height = height; + + dce_v6_0_lock_cursor(crtc, true); + + if (hot_x != amdgpu_crtc->cursor_hot_x || + hot_y != amdgpu_crtc->cursor_hot_y) { + int x, y; + + x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; + y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; + + dce_v6_0_cursor_move_locked(crtc, x, y); + + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; + } + + dce_v6_0_show_cursor(crtc); + dce_v6_0_lock_cursor(crtc, false); + +unpin: + if (amdgpu_crtc->cursor_bo) { + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); + ret = amdgpu_bo_reserve(aobj, false); + if (likely(ret == 0)) { + amdgpu_bo_unpin(aobj); + amdgpu_bo_unreserve(aobj); + } + drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); + } + + amdgpu_crtc->cursor_bo = obj; + return 0; +} + +static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->cursor_bo) { + dce_v6_0_lock_cursor(crtc, true); + + dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, + amdgpu_crtc->cursor_y); + + dce_v6_0_show_cursor(crtc); + dce_v6_0_lock_cursor(crtc, false); + } +} + +static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int i; + + /* userspace palettes are always correct as is */ + for (i = 0; i < size; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + dce_v6_0_crtc_load_lut(crtc); + + return 0; +} + +static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { + .cursor_set2 = dce_v6_0_crtc_cursor_set2, + .cursor_move = dce_v6_0_crtc_cursor_move, + .gamma_set = dce_v6_0_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_v6_0_crtc_destroy, + .page_flip_target = amdgpu_crtc_page_flip_target, +}; + +static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); + amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); + /* Make sure VBLANK and PFLIP interrupts are still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); + drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); + dce_v6_0_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); + if (amdgpu_crtc->enabled) + amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); + amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); + amdgpu_crtc->enabled = false; + break; + } + /* adjust pm to dpms */ + amdgpu_pm_compute_clocks(adev); +} + +static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc) +{ + /* disable crtc pair power gating before programming */ + amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); + amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); + dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_v6_0_crtc_commit(struct drm_crtc *crtc) +{ + dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); +} + +static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) +{ + + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_atom_ss ss; + int i; + + dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *abo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve abo before unpin\n"); + else { + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); + } + } + /* disable the GRPH */ + dce_v6_0_grph_enable(crtc, false); + + amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->mode_info.crtcs[i] && + adev->mode_info.crtcs[i]->enabled && + i != amdgpu_crtc->crtc_id && + amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + + switch (amdgpu_crtc->pll_id) { + case ATOM_PPLL1: + case ATOM_PPLL2: + /* disable the ppll */ + amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + break; + default: + break; + } +done: + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (!amdgpu_crtc->adjusted_clock) + return -EINVAL; + + amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); + amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); + dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); + amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); + amdgpu_atombios_crtc_scaler_setup(crtc); + dce_v6_0_cursor_reset(crtc); + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; + if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) + return false; + /* pick pll */ + amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc); + /* if we can't get a PPLL for a non-DP encoder, fail */ + if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && + !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) + return false; + + return true; +} + +static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); +} + +static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); +} + +static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { + .dpms = dce_v6_0_crtc_dpms, + .mode_fixup = dce_v6_0_crtc_mode_fixup, + .mode_set = dce_v6_0_crtc_mode_set, + .mode_set_base = dce_v6_0_crtc_set_base, + .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic, + .prepare = dce_v6_0_crtc_prepare, + .commit = dce_v6_0_crtc_commit, + .load_lut = dce_v6_0_crtc_load_lut, + .disable = dce_v6_0_crtc_disable, +}; + +static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + adev->mode_info.crtcs[index] = amdgpu_crtc; + + amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; + amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; + adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; + adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->adjusted_clock = 0; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs); + + return 0; +} + +static int dce_v6_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; + adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; + + dce_v6_0_set_display_funcs(adev); + dce_v6_0_set_irq_funcs(adev); + + switch (adev->asic_type) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_OLAND: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 2; + adev->mode_info.num_dig = 2; + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + + return 0; +} + +static int dce_v6_0_sw_init(void *handle) +{ + int r, i; + bool ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); + if (r) + return r; + } + + for (i = 8; i < 20; i += 2) { + r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); + if (r) + return r; + } + + /* HPD hotplug */ + r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); + if (r) + return r; + + adev->mode_info.mode_config_initialized = true; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + adev->ddev->mode_config.async_page_flip = true; + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_v6_0_crtc_init(adev, i); + if (r) + return r; + } + + ret = amdgpu_atombios_get_connector_info_from_object_table(adev); + if (ret) + amdgpu_print_display_setup(adev->ddev); + else + return -EINVAL; + + /* setup afmt */ + r = dce_v6_0_afmt_init(adev); + if (r) + return r; + + r = dce_v6_0_audio_init(adev); + if (r) + return r; + + drm_kms_helper_poll_init(adev->ddev); + + return r; +} + +static int dce_v6_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + dce_v6_0_audio_fini(adev); + dce_v6_0_afmt_fini(adev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + + return 0; +} + +static int dce_v6_0_hw_init(void *handle) +{ + int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* init dig PHYs, disp eng pll */ + amdgpu_atombios_encoder_init_dig(adev); + amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); + + /* initialize hpd */ + dce_v6_0_hpd_init(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + dce_v6_0_pageflip_interrupt_init(adev); + + return 0; +} + +static int dce_v6_0_hw_fini(void *handle) +{ + int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + dce_v6_0_hpd_fini(adev); + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); + } + + dce_v6_0_pageflip_interrupt_fini(adev); + + return 0; +} + +static int dce_v6_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_atombios_scratch_regs_save(adev); + + return dce_v6_0_hw_fini(handle); +} + +static int dce_v6_0_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + ret = dce_v6_0_hw_init(handle); + + amdgpu_atombios_scratch_regs_restore(adev); + + /* turn on the BL */ + if (adev->mode_info.bl_encoder) { + u8 bl_level = amdgpu_display_backlight_get_level(adev, + adev->mode_info.bl_encoder); + amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, + bl_level); + } + + return ret; +} + +static bool dce_v6_0_is_idle(void *handle) +{ + return true; +} + +static int dce_v6_0_wait_for_idle(void *handle) +{ + return 0; +} + +static int dce_v6_0_soft_reset(void *handle) +{ + DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); + return 0; +} + +static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + u32 reg_block, interrupt_mask; + + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (crtc) { + case 0: + reg_block = SI_CRTC0_REGISTER_OFFSET; + break; + case 1: + reg_block = SI_CRTC1_REGISTER_OFFSET; + break; + case 2: + reg_block = SI_CRTC2_REGISTER_OFFSET; + break; + case 3: + reg_block = SI_CRTC3_REGISTER_OFFSET; + break; + case 4: + reg_block = SI_CRTC4_REGISTER_OFFSET; + break; + case 5: + reg_block = SI_CRTC5_REGISTER_OFFSET; + break; + default: + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + interrupt_mask = RREG32(INT_MASK + reg_block); + interrupt_mask &= ~VBLANK_INT_MASK; + WREG32(INT_MASK + reg_block, interrupt_mask); + break; + case AMDGPU_IRQ_STATE_ENABLE: + interrupt_mask = RREG32(INT_MASK + reg_block); + interrupt_mask |= VBLANK_INT_MASK; + WREG32(INT_MASK + reg_block, interrupt_mask); + break; + default: + break; + } +} + +static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + +} + +static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; + + switch (type) { + case AMDGPU_HPD_1: + dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; + break; + case AMDGPU_HPD_2: + dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; + break; + case AMDGPU_HPD_3: + dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; + break; + case AMDGPU_HPD_4: + dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; + break; + case AMDGPU_HPD_5: + dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; + break; + case AMDGPU_HPD_6: + dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; + break; + default: + DRM_DEBUG("invalid hdp %d\n", type); + return 0; + } + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK2: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK3: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK4: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK5: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VBLANK6: + dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state); + break; + case AMDGPU_CRTC_IRQ_VLINE1: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state); + break; + case AMDGPU_CRTC_IRQ_VLINE2: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state); + break; + case AMDGPU_CRTC_IRQ_VLINE3: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state); + break; + case AMDGPU_CRTC_IRQ_VLINE4: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state); + break; + case AMDGPU_CRTC_IRQ_VLINE5: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state); + break; + case AMDGPU_CRTC_IRQ_VLINE6: + dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state); + break; + default: + break; + } + return 0; +} + +static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = entry->src_id - 1; + uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); + unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); + + switch (entry->src_data) { + case 0: /* vblank */ + if (disp_int & interrupt_status_offsets[crtc].vblank) + WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + break; + case 1: /* vline */ + if (disp_int & interrupt_status_offsets[crtc].vline) + WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); + else + DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); + + DRM_DEBUG("IH: D%d vline\n", crtc + 1); + break; + default: + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + break; + } + + return 0; +} + +static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 reg; + + if (type >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + + reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]); + if (state == AMDGPU_IRQ_STATE_DISABLE) + WREG32(GRPH_INT_CONTROL + crtc_offsets[type], + reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + else + WREG32(GRPH_INT_CONTROL + crtc_offsets[type], + reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); + + return 0; +} + +static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned long flags; + unsigned crtc_id; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = (entry->src_id - 8) >> 1; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + if (crtc_id >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) & + GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) + WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id], + GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); + + return 0; +} + +static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t disp_int, mask, int_control, tmp; + unsigned hpd; + + if (entry->src_data >= adev->mode_info.num_hpd) { + DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); + return 0; + } + + hpd = entry->src_data; + disp_int = RREG32(interrupt_status_offsets[hpd].reg); + mask = interrupt_status_offsets[hpd].hpd; + int_control = hpd_int_control_offsets[hpd]; + + if (disp_int & mask) { + tmp = RREG32(int_control); + tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; + WREG32(int_control, tmp); + schedule_work(&adev->hotplug_work); + DRM_INFO("IH: HPD%d\n", hpd + 1); + } + + return 0; + +} + +static int dce_v6_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dce_v6_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs dce_v6_0_ip_funcs = { + .name = "dce_v6_0", + .early_init = dce_v6_0_early_init, + .late_init = NULL, + .sw_init = dce_v6_0_sw_init, + .sw_fini = dce_v6_0_sw_fini, + .hw_init = dce_v6_0_hw_init, + .hw_fini = dce_v6_0_hw_fini, + .suspend = dce_v6_0_suspend, + .resume = dce_v6_0_resume, + .is_idle = dce_v6_0_is_idle, + .wait_for_idle = dce_v6_0_wait_for_idle, + .soft_reset = dce_v6_0_soft_reset, + .set_clockgating_state = dce_v6_0_set_clockgating_state, + .set_powergating_state = dce_v6_0_set_powergating_state, +}; + +static void +dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->pixel_clock = adjusted_mode->clock; + + /* need to call this here rather than in prepare() since we need some crtc info */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + /* set scaler clears this on some chips */ + dce_v6_0_set_interleave(encoder->crtc, mode); + + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { + dce_v6_0_afmt_enable(encoder, true); + dce_v6_0_afmt_setmode(encoder, adjusted_mode); + } +} + +static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) +{ + + struct amdgpu_device *adev = encoder->dev->dev_private; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); + + if ((amdgpu_encoder->active_device & + (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || + (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != + ENCODER_OBJECT_ID_NONE)) { + struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + if (dig) { + dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder); + if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) + dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; + } + } + + amdgpu_atombios_scratch_regs_lock(adev, true); + + if (connector) { + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + + /* select the clock/data port if it uses a router */ + if (amdgpu_connector->router.cd_valid) + amdgpu_i2c_router_select_cd_port(amdgpu_connector); + + /* turn eDP panel on for mode set */ + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_atombios_encoder_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } + + /* this is needed for the pll/ss setup to work correctly in some cases */ + amdgpu_atombios_encoder_set_crtc_source(encoder); + /* set up the FMT blocks */ + dce_v6_0_program_fmt(encoder); +} + +static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) +{ + + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; + + /* need to call this here as we need the crtc set up */ + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + amdgpu_atombios_scratch_regs_lock(adev, false); +} + +static void dce_v6_0_encoder_disable(struct drm_encoder *encoder) +{ + + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct amdgpu_encoder_atom_dig *dig; + + amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + + if (amdgpu_atombios_encoder_is_digital(encoder)) { + if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) + dce_v6_0_afmt_enable(encoder, false); + dig = amdgpu_encoder->enc_priv; + dig->dig_encoder = -1; + } + amdgpu_encoder->active_device = 0; +} + +/* these are handled by the primary encoders */ +static void dce_v6_0_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void dce_v6_0_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +dce_v6_0_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void dce_v6_0_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = { + .dpms = dce_v6_0_ext_dpms, + .mode_fixup = dce_v6_0_ext_mode_fixup, + .prepare = dce_v6_0_ext_prepare, + .mode_set = dce_v6_0_ext_mode_set, + .commit = dce_v6_0_ext_commit, + .disable = dce_v6_0_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + +static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v6_0_encoder_prepare, + .mode_set = dce_v6_0_encoder_mode_set, + .commit = dce_v6_0_encoder_commit, + .disable = dce_v6_0_encoder_disable, + .detect = amdgpu_atombios_encoder_dig_detect, +}; + +static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = { + .dpms = amdgpu_atombios_encoder_dpms, + .mode_fixup = amdgpu_atombios_encoder_mode_fixup, + .prepare = dce_v6_0_encoder_prepare, + .mode_set = dce_v6_0_encoder_mode_set, + .commit = dce_v6_0_encoder_commit, + .detect = amdgpu_atombios_encoder_dac_detect, +}; + +static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = { + .destroy = dce_v6_0_encoder_destroy, +}; + +static void dce_v6_0_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + switch (adev->mode_info.num_crtc) { + case 1: + encoder->possible_crtcs = 0x1; + break; + case 2: + default: + encoder->possible_crtcs = 0x3; + break; + case 4: + encoder->possible_crtcs = 0xf; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } + + amdgpu_encoder->enc_priv = NULL; + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + switch (amdgpu_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs); + break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + amdgpu_encoder->rmx_type = RMX_FULL; + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS, NULL); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); + } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } else { + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); + } + drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs); + break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: + /* these are handled by the primary encoders */ + amdgpu_encoder->is_ext_encoder = true; + if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_LVDS, NULL); + else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + else + drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs); + break; + } +} + +static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { + .set_vga_render_state = &dce_v6_0_set_vga_render_state, + .bandwidth_update = &dce_v6_0_bandwidth_update, + .vblank_get_counter = &dce_v6_0_vblank_get_counter, + .vblank_wait = &dce_v6_0_vblank_wait, + .is_display_hung = &dce_v6_0_is_display_hung, + .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, + .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, + .hpd_sense = &dce_v6_0_hpd_sense, + .hpd_set_polarity = &dce_v6_0_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg, + .page_flip = &dce_v6_0_page_flip, + .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, + .add_encoder = &dce_v6_0_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_v6_0_stop_mc_access, + .resume_mc_access = &dce_v6_0_resume_mc_access, +}; + +static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_v6_0_display_funcs; +} + +static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { + .set = dce_v6_0_set_crtc_interrupt_state, + .process = dce_v6_0_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = { + .set = dce_v6_0_set_pageflip_interrupt_state, + .process = dce_v6_0_pageflip_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = { + .set = dce_v6_0_set_hpd_interrupt_state, + .process = dce_v6_0_hpd_irq, +}; + +static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs; + + adev->hpd_irq.num_types = AMDGPU_HPD_LAST; + adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h index c031ff99fe3e..6a5528105bb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,22 +21,9 @@ * */ -#ifndef TONGA_SMUMGR_H -#define TONGA_SMUMGR_H +#ifndef __DCE_V6_0_H__ +#define __DCE_V6_0_H__ -#include "tonga_ppsmc.h" - -int tonga_smu_init(struct amdgpu_device *adev); -int tonga_smu_fini(struct amdgpu_device *adev); -int tonga_smu_start(struct amdgpu_device *adev); - -struct tonga_smu_private_data -{ - uint8_t *header; - uint32_t smu_buffer_addr_high; - uint32_t smu_buffer_addr_low; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; +extern const struct amd_ip_funcs dce_v6_0_ip_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4fdfab1e9200..5966166ec94c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc) */ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) { - unsigned i = 0; + unsigned i = 100; if (crtc >= adev->mode_info.num_crtc) return; @@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc) * wait for another frame. */ while (dce_v8_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v8_0_is_counter_moving(adev, crtc)) break; } } while (!dce_v8_0_is_in_vblank(adev, crtc)) { - if (i++ % 100 == 0) { + if (i++ == 100) { + i = 0; if (!dce_v8_0_is_counter_moving(adev, crtc)) break; } @@ -395,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { - /* don't try to enable hpd on eDP or LVDS avoid breaking the - * aux dp channel on imac and help (but not completely fix) - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 - * also avoid interrupt storms during dpms. - */ - continue; - } switch (amdgpu_connector->hpd.hpd) { case AMDGPU_HPD_1: WREG32(mmDC_HPD1_CONTROL, tmp); @@ -426,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) default: break; } + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; + + switch (amdgpu_connector->hpd.hpd) { + case AMDGPU_HPD_1: + dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; + break; + case AMDGPU_HPD_2: + dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; + break; + case AMDGPU_HPD_3: + dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; + break; + case AMDGPU_HPD_4: + dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; + break; + case AMDGPU_HPD_5: + dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; + break; + case AMDGPU_HPD_6: + dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; + break; + default: + continue; + } + + dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); + dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; + WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); + continue; + } + dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } @@ -604,6 +636,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + num_crtc = 6; + break; + case CHIP_KAVERI: + num_crtc = 4; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + num_crtc = 2; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v8_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v8_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -1501,13 +1579,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->format == eld_reg_to_type[i][1]) { if (sad->channels > max_channels) { - value = (sad->channels << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | - (sad->byte2 << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | - (sad->freq << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); - max_channels = sad->channels; + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); + max_channels = sad->channels; } if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) @@ -1613,7 +1691,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; - WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT)); WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); @@ -1693,6 +1771,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; + offset = dig->afmt->offset; /* hdmi deep color mode general control packets setup, if bpc > 8 */ @@ -1817,7 +1896,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ - HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */ WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ @@ -1826,13 +1905,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ - /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); - /* enable audio after to setting up hw */ + /* enable audio after setting up hw */ dce_v8_0_audio_enable(adev, dig->afmt->pin, true); } @@ -1944,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; uint64_t fb_location, tiling_flags; uint32_t fb_format, fb_pitch_pixels; u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); @@ -1952,6 +2030,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1971,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, * just update base pointers */ obj = amdgpu_fb->obj; - rbo = gem_to_amdgpu_bo(obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; if (atomic) { - fb_location = amdgpu_bo_gpu_offset(rbo); + fb_location = amdgpu_bo_gpu_offset(abo); } else { - r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); + r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unreserve(abo); return -EINVAL; } } - amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + amdgpu_bo_unreserve(abo); pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); @@ -1999,7 +2078,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | - (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif @@ -2056,8 +2135,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2137,17 +2217,17 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) return r; - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } /* Bytes per pixel may have changed */ @@ -2552,7 +2632,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .gamma_set = dce_v8_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2619,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) if (crtc->primary->fb) { int r; struct amdgpu_framebuffer *amdgpu_fb; - struct amdgpu_bo *rbo; + struct amdgpu_bo *abo; amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); - r = amdgpu_bo_reserve(rbo, false); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); if (unlikely(r)) - DRM_ERROR("failed to reserve rbo before unpin\n"); + DRM_ERROR("failed to reserve abo before unpin\n"); else { - amdgpu_bo_unpin(rbo); - amdgpu_bo_unreserve(rbo); + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); } } /* disable the GRPH */ @@ -2653,7 +2733,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ @@ -2803,21 +2883,20 @@ static int dce_v8_0_early_init(void *handle) dce_v8_0_set_display_funcs(adev); dce_v8_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_KAVERI: - adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_KABINI: case CHIP_MULLINS: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; /* ? */ break; @@ -3236,7 +3315,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, drm_handle_vblank(adev->ddev, crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - break; case 1: /* vline */ if (disp_int & interrupt_status_offsets[crtc].vline) @@ -3245,7 +3323,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: D%d vline\n", crtc + 1); - break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 77016852b252..7d0770c3a49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v8_0_ip_funcs; +void dce_v8_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c new file mode 100644 index 000000000000..c2bd9f045532 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -0,0 +1,802 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "dce_v8_0.h" +#endif +#include "dce_v10_0.h" +#include "dce_v11_0.h" +#include "dce_virtual.h" + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + +/** + * dce_virtual_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + return; +} + +static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + return 0; +} + +static void dce_virtual_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + return; +} + +static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + *vbl = 0; + *position = 0; + + return -EINVAL; +} + +static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return true; +} + +static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return; +} + +static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return 0; +} + +static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) +{ + return false; +} + +static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + dce_v8_0_disable_dce(adev); + break; +#endif + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: + /* no DCE */ + return; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } + + return; +} +static void dce_virtual_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + return; +} + +static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + return; +} + +/** + * dce_virtual_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) +{ + return; +} + +static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int i; + + /* userspace palettes are always correct as is */ + for (i = 0; i < size; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + + return 0; +} + +static void dce_virtual_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { + .cursor_set2 = NULL, + .cursor_move = NULL, + .gamma_set = dce_virtual_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_virtual_crtc_destroy, + .page_flip_target = amdgpu_crtc_page_flip_target, +}; + +static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + /* Make sure VBLANK and PFLIP interrupts are still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); + drm_vblank_on(dev, amdgpu_crtc->crtc_id); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_off(dev, amdgpu_crtc->crtc_id); + amdgpu_crtc->enabled = false; + break; + } +} + + +static void dce_virtual_crtc_prepare(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_virtual_crtc_commit(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void dce_virtual_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *abo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(abo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve abo before unpin\n"); + else { + amdgpu_bo_unpin(abo); + amdgpu_bo_unreserve(abo); + } + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + + return true; +} + + +static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc) +{ + return; +} + +static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return 0; +} + +static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { + .dpms = dce_virtual_crtc_dpms, + .mode_fixup = dce_virtual_crtc_mode_fixup, + .mode_set = dce_virtual_crtc_mode_set, + .mode_set_base = dce_virtual_crtc_set_base, + .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, + .prepare = dce_virtual_crtc_prepare, + .commit = dce_virtual_crtc_commit, + .load_lut = dce_virtual_crtc_load_lut, + .disable = dce_virtual_crtc_disable, +}; + +static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + adev->mode_info.crtcs[index] = amdgpu_crtc; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); + + return 0; +} + +static int dce_virtual_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + dce_virtual_set_display_funcs(adev); + dce_virtual_set_irq_funcs(adev); + + adev->mode_info.num_crtc = 1; + adev->mode_info.num_hpd = 1; + adev->mode_info.num_dig = 1; + return 0; +} + +static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) +{ + struct amdgpu_i2c_bus_rec ddc_bus; + struct amdgpu_router router; + struct amdgpu_hpd hpd; + + /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = AMDGPU_HPD_NONE; + /* needed for aux chan transactions */ + ddc_bus.hpd = hpd.hpd; + + memset(&router, 0, sizeof(router)); + router.ddc_valid = false; + router.cd_valid = false; + amdgpu_display_add_connector(adev, + 0, + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, + CONNECTOR_OBJECT_ID_VIRTUAL, + &hpd, + &router); + + amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, + ATOM_DEVICE_CRT1_SUPPORT, + 0); + + amdgpu_link_encoder_connector(adev->ddev); + + return true; +} + +static int dce_virtual_sw_init(void *handle) +{ + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq); + if (r) + return r; + + adev->ddev->max_vblank_count = 0; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_virtual_crtc_init(adev, i); + if (r) + return r; + } + + dce_virtual_get_connector_info(adev); + amdgpu_print_display_setup(adev->ddev); + + drm_kms_helper_poll_init(adev->ddev); + + adev->mode_info.mode_config_initialized = true; + return 0; +} + +static int dce_virtual_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + return 0; +} + +static int dce_virtual_hw_init(void *handle) +{ + return 0; +} + +static int dce_virtual_hw_fini(void *handle) +{ + return 0; +} + +static int dce_virtual_suspend(void *handle) +{ + return dce_virtual_hw_fini(handle); +} + +static int dce_virtual_resume(void *handle) +{ + return dce_virtual_hw_init(handle); +} + +static bool dce_virtual_is_idle(void *handle) +{ + return true; +} + +static int dce_virtual_wait_for_idle(void *handle) +{ + return 0; +} + +static int dce_virtual_soft_reset(void *handle) +{ + return 0; +} + +static int dce_virtual_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dce_virtual_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs dce_virtual_ip_funcs = { + .name = "dce_virtual", + .early_init = dce_virtual_early_init, + .late_init = NULL, + .sw_init = dce_virtual_sw_init, + .sw_fini = dce_virtual_sw_fini, + .hw_init = dce_virtual_hw_init, + .hw_fini = dce_virtual_hw_fini, + .suspend = dce_virtual_suspend, + .resume = dce_virtual_resume, + .is_idle = dce_virtual_is_idle, + .wait_for_idle = dce_virtual_wait_for_idle, + .soft_reset = dce_virtual_soft_reset, + .set_clockgating_state = dce_virtual_set_clockgating_state, + .set_powergating_state = dce_virtual_set_powergating_state, +}; + +/* these are handled by the primary encoders */ +static void dce_virtual_encoder_prepare(struct drm_encoder *encoder) +{ + return; +} + +static void dce_virtual_encoder_commit(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return; +} + +static void dce_virtual_encoder_disable(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + return; +} + +static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + /* set the active encoder to connector routing */ + amdgpu_encoder_set_active_device(encoder); + + return true; +} + +static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = { + .dpms = dce_virtual_encoder_dpms, + .mode_fixup = dce_virtual_encoder_mode_fixup, + .prepare = dce_virtual_encoder_prepare, + .mode_set = dce_virtual_encoder_mode_set, + .commit = dce_virtual_encoder_commit, + .disable = dce_virtual_encoder_disable, +}; + +static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { + .destroy = dce_virtual_encoder_destroy, +}; + +static void dce_virtual_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = 0x1; + amdgpu_encoder->enc_priv = NULL; + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); + DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); +} + +static const struct amdgpu_display_funcs dce_virtual_display_funcs = { + .set_vga_render_state = &dce_virtual_set_vga_render_state, + .bandwidth_update = &dce_virtual_bandwidth_update, + .vblank_get_counter = &dce_virtual_vblank_get_counter, + .vblank_wait = &dce_virtual_vblank_wait, + .is_display_hung = &dce_virtual_is_display_hung, + .backlight_set_level = NULL, + .backlight_get_level = NULL, + .hpd_sense = &dce_virtual_hpd_sense, + .hpd_set_polarity = &dce_virtual_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, + .page_flip = &dce_virtual_page_flip, + .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, + .add_encoder = &dce_virtual_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_virtual_stop_mc_access, + .resume_mc_access = &dce_virtual_resume_mc_access, +}; + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_virtual_display_funcs; +} + +static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) +{ + struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); + struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); + unsigned crtc = 0; + drm_handle_vblank(adev->ddev, crtc); + dce_virtual_pageflip_irq(adev, NULL, NULL); + hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + if (state && !adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Enable software vsync timer\n"); + hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); + adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; + hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + } else if (!state && adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Disable software vsync timer\n"); + hrtimer_cancel(&adev->mode_info.vblank_timer); + } + + adev->mode_info.vsync_timer_enabled = state; + DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); +} + + +static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + default: + break; + } + return 0; +} + +static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } +} + +static int dce_virtual_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = 0; + unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; + + dce_virtual_crtc_vblank_int_ack(adev, crtc); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + dce_virtual_pageflip_irq(adev, NULL, NULL); + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + return 0; +} + +static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + if (type >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); + + return 0; +} + +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned long flags; + unsigned crtc_id = 0; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = 0; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + if (crtc_id >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); + + return 0; +} + +static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { + .set = dce_virtual_set_crtc_irq_state, + .process = dce_virtual_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { + .set = dce_virtual_set_pageflip_irq_state, + .process = dce_virtual_pageflip_irq, +}; + +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h index 5983e3150cc5..e239243f6ebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h @@ -21,21 +21,11 @@ * */ -#ifndef ICELAND_SMUM_H -#define ICELAND_SMUM_H +#ifndef __DCE_VIRTUAL_H__ +#define __DCE_VIRTUAL_H__ -#include "ppsmc.h" - -extern int iceland_smu_init(struct amdgpu_device *adev); -extern int iceland_smu_fini(struct amdgpu_device *adev); -extern int iceland_smu_start(struct amdgpu_device *adev); - -struct iceland_smu_private_data -{ - uint8_t *header; - uint8_t *mec_image; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; +extern const struct amd_ip_funcs dce_virtual_ip_funcs; +#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 #endif + diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c deleted file mode 100644 index ed03b75175d4..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "fiji_smum.h" - -MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); - -static void fiji_dpm_set_funcs(struct amdgpu_device *adev); - -static int fiji_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_set_funcs(adev); - - return 0; -} - -static int fiji_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/fiji_smc.bin"; - int err; - - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int fiji_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = fiji_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int fiji_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int fiji_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - ret = fiji_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = fiji_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int fiji_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - mutex_lock(&adev->pm.mutex); - fiji_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int fiji_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_hw_fini(adev); - - return 0; -} - -static int fiji_dpm_resume(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - fiji_dpm_hw_init(adev); - - return 0; -} - -static int fiji_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int fiji_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs fiji_dpm_ip_funcs = { - .name = "fiji_dpm", - .early_init = fiji_dpm_early_init, - .late_init = NULL, - .sw_init = fiji_dpm_sw_init, - .sw_fini = fiji_dpm_sw_fini, - .hw_init = fiji_dpm_hw_init, - .hw_fini = fiji_dpm_hw_fini, - .suspend = fiji_dpm_suspend, - .resume = fiji_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = fiji_dpm_set_clockgating_state, - .set_powergating_state = fiji_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs fiji_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void fiji_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &fiji_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c deleted file mode 100644 index b3e19ba4c57f..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ /dev/null @@ -1,863 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "fiji_ppsmc.h" -#include "fiji_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_3_d.h" -#include "smu/smu_7_1_3_sh_mask.h" - -#define FIJI_SMC_SIZE 0x20000 - -static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = fiji_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = fiji_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = fiji_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -static bool fiji_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, 0x20000); - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!fiji_is_smc_ram_running(adev)) - { - return -EINVAL; - } - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - if (!fiji_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return fiji_send_msg_to_smc(adev, msg); -} - -static int fiji_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return fiji_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!fiji_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t *data; - unsigned long flags; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (adev->virtualization.supports_sr_iov) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > FIJI_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - WREG32(mmSMC_IND_DATA_0, data[0]); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int fiji_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = fiji_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = fiji_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int fiji_smu_stop_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - return 0; -} -#endif - -static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((fw_type == UCODE_ID_CP_MEC_JT1) || - (fw_type == UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int fiji_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); - - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK; - - if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - int i; - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = fiji_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Clear status */ - WREG32_SMC(ixSMU_STATUS, 0); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Set SMU Auto Start */ - val = RREG32_SMC(ixSMU_INPUT_DATA); - val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); - WREG32_SMC(ixSMU_INPUT_DATA, val); - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Interrupt is not enabled by firmware\n"); - return -EINVAL; - } - - /* Call Test SMU message with 0x20000 offset - * to trigger SMU start - */ - fiji_send_msg_to_smc_offset(adev); - DRM_INFO("[FM]try triger smu start\n"); - /* Wait for done bit to be set */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMU_STATUS); - if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMU start\n"); - return -EINVAL; - } - - /* Check pass/failed indicator */ - val = RREG32_SMC(ixSMU_STATUS); - if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { - DRM_ERROR("SMU Firmware start failed\n"); - return -EINVAL; - } - DRM_INFO("[FM]smu started\n"); - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMU firmware initialization failed\n"); - return -EINVAL; - } - DRM_INFO("[FM]smu initialized\n"); - - return 0; -} - -static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev) -{ - int i, result; - uint32_t val; - - /* wait for smc boot up */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); - if (val) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMC boot sequence is not completed\n"); - return -EINVAL; - } - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = fiji_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Set smc instruct start point at 0x0 */ - fiji_program_jump_on_start(adev); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMC firmware initialization\n"); - return -EINVAL; - } - - return 0; -} - -int fiji_smu_start(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - - if (!fiji_is_smc_ram_running(adev)) { - val = RREG32_SMC(ixSMU_FIRMWARE); - if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { - DRM_INFO("[FM]start smu in nonprotection mode\n"); - result = fiji_smu_start_in_non_protection_mode(adev); - if (result) - return result; - } else { - DRM_INFO("[FM]start smu in protection mode\n"); - result = fiji_smu_start_in_protection_mode(adev); - if (result) - return result; - } - } - - return fiji_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = { - .check_fw_load_finish = fiji_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int fiji_smu_init(struct amdgpu_device *adev) -{ - struct fiji_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - uint32_t smu_internal_buffer_size = 200*4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - void *smu_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Allocate buffer for SMU internal buffer */ - ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, smu_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the SMU internal buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.smu_buf); - private->smu_buffer_addr_low = lower_32_bits(mc_addr); - private->smu_buffer_addr_high = upper_32_bits(mc_addr); - - adev->smu.smumgr_funcs = &fiji_smumgr_funcs; - - return 0; -} - -int fiji_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c new file mode 100644 index 000000000000..40abb6b81c09 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -0,0 +1,3362 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "amdgpu_gfx.h" +#include "amdgpu_ucode.h" +#include "si/clearstate_si.h" +#include "si/sid.h" + +#define GFX6_NUM_GFX_RINGS 1 +#define GFX6_NUM_COMPUTE_RINGS 2 +#define STATIC_PER_CU_PG_ENABLE (1 << 3) +#define DYN_PER_CU_PG_ENABLE (1 << 2) +#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 +#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D + + +static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev); +static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); +static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev); + +MODULE_FIRMWARE("radeon/tahiti_pfp.bin"); +MODULE_FIRMWARE("radeon/tahiti_me.bin"); +MODULE_FIRMWARE("radeon/tahiti_ce.bin"); +MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); + +MODULE_FIRMWARE("radeon/pitcairn_pfp.bin"); +MODULE_FIRMWARE("radeon/pitcairn_me.bin"); +MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); +MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); + +MODULE_FIRMWARE("radeon/verde_pfp.bin"); +MODULE_FIRMWARE("radeon/verde_me.bin"); +MODULE_FIRMWARE("radeon/verde_ce.bin"); +MODULE_FIRMWARE("radeon/verde_rlc.bin"); + +MODULE_FIRMWARE("radeon/oland_pfp.bin"); +MODULE_FIRMWARE("radeon/oland_me.bin"); +MODULE_FIRMWARE("radeon/oland_ce.bin"); +MODULE_FIRMWARE("radeon/oland_rlc.bin"); + +MODULE_FIRMWARE("radeon/hainan_pfp.bin"); +MODULE_FIRMWARE("radeon/hainan_me.bin"); +MODULE_FIRMWARE("radeon/hainan_ce.bin"); +MODULE_FIRMWARE("radeon/hainan_rlc.bin"); + +static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); +//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev); +static void gfx_v6_0_init_pg(struct amdgpu_device *adev); + + +static const u32 verde_rlc_save_restore_register_list[] = +{ + (0x8000 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x8040 << 16) | (0x98f4 >> 2), + 0x00000000, + (0x8000 << 16) | (0xe80 >> 2), + 0x00000000, + (0x8040 << 16) | (0xe80 >> 2), + 0x00000000, + (0x8000 << 16) | (0x89bc >> 2), + 0x00000000, + (0x8040 << 16) | (0x89bc >> 2), + 0x00000000, + (0x8000 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x8040 << 16) | (0x8c1c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x98f0 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xe7c >> 2), + 0x00000000, + (0x8000 << 16) | (0x9148 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9148 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9150 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x897c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8d8c >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac54 >> 2), + 0X00000000, + 0x3, + (0x9c00 << 16) | (0x98f8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9910 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9914 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9918 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x991c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9920 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9924 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9928 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x992c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9930 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9934 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9938 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x993c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9940 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9944 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9948 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x994c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9950 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9954 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9958 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x995c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9960 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9964 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9968 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x996c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9970 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9974 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9978 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x997c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9980 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9984 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9988 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x998c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c00 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c04 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c08 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x8040 << 16) | (0x9b7c >> 2), + 0x00000000, + (0x8000 << 16) | (0xe84 >> 2), + 0x00000000, + (0x8040 << 16) | (0xe84 >> 2), + 0x00000000, + (0x8000 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x8040 << 16) | (0x89c0 >> 2), + 0x00000000, + (0x8000 << 16) | (0x914c >> 2), + 0x00000000, + (0x8040 << 16) | (0x914c >> 2), + 0x00000000, + (0x8000 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x8040 << 16) | (0x8c20 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9354 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9354 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9060 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9364 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9100 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x913c >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e0 >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e4 >> 2), + 0x00000000, + (0x8000 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e0 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e4 >> 2), + 0x00000000, + (0x8040 << 16) | (0x90e8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8bcc >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8b24 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88c4 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e50 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8c0c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e58 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8e5c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9508 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x950c >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9494 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac0c >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac10 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xae00 >> 2), + 0x00000000, + (0x9c00 << 16) | (0xac08 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88d4 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88c8 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x88cc >> 2), + 0x00000000, + (0x9c00 << 16) | (0x89b0 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8b10 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x8a14 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9830 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9834 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9838 >> 2), + 0x00000000, + (0x9c00 << 16) | (0x9a10 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8000 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8001 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8001 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8040 << 16) | (0x9874 >> 2), + 0x00000000, + (0x8041 << 16) | (0x9870 >> 2), + 0x00000000, + (0x8041 << 16) | (0x9874 >> 2), + 0x00000000, + 0x00000000 +}; + +static int gfx_v6_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + const struct gfx_firmware_header_v1_0 *cp_hdr; + const struct rlc_firmware_header_v1_0 *rlc_hdr; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_name = "tahiti"; + break; + case CHIP_PITCAIRN: + chip_name = "pitcairn"; + break; + case CHIP_VERDE: + chip_name = "verde"; + break; + case CHIP_OLAND: + chip_name = "oland"; + break; + case CHIP_HAINAN: + chip_name = "hainan"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.pfp_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.me_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.ce_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); + +out: + if (err) { + printk(KERN_ERR + "gfx6: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + } + return err; +} + +static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev) +{ + const u32 num_tile_mode_states = 32; + u32 reg_offset, gb_tile_moden, split_equal_to_row_size; + + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; + break; + case 2: + default: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; + break; + case 4: + split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; + break; + } + + if (adev->asic_type == CHIP_VERDE || + adev->asic_type == CHIP_OLAND || + adev->asic_type == CHIP_HAINAN) { + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 1: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 2: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 3: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 4: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 5: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 6: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 7: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 8: + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 9: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 10: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 11: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 12: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 13: + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 14: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 15: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 16: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 17: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P4_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 21: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 22: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 23: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 24: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 25: + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) { + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { + switch (reg_offset) { + case 0: /* non-AA compressed depth or any compressed stencil */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 1: /* 2xAA/4xAA compressed depth only */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 2: /* 8xAA compressed depth only */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 8: /* 1D and 1D Array Surfaces */ + gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 9: /* Displayable maps. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 10: /* Display 8bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 11: /* Display 16bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 12: /* Display 32bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 13: /* Thin. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 14: /* Thin 8 bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 15: /* Thin 16 bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 16: /* Thin 32 bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 17: /* Thin 64 bpp. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(split_equal_to_row_size) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + case 21: /* 8 bpp PRT. */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 22: /* 16 bpp PRT */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); + break; + case 23: /* 32 bpp PRT */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 24: /* 64 bpp PRT */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | + NUM_BANKS(ADDR_SURF_16_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); + break; + case 25: /* 128 bpp PRT */ + gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | + MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | + NUM_BANKS(ADDR_SURF_8_BANK) | + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); + break; + default: + gb_tile_moden = 0; + break; + } + adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; + WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden); + } + } else{ + + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + } + +} + +static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = INSTANCE_BROADCAST_WRITES; + else + data = INSTANCE_INDEX(instance); + + if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) + data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; + else if (se_num == 0xffffffff) + data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); + else if (sh_num == 0xffffffff) + data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); + else + data |= SH_INDEX(sh_num) | SE_INDEX(se_num); + WREG32(GRBM_GFX_INDEX, data); +} + +static u32 gfx_v6_0_create_bitmask(u32 bit_width) +{ + return (u32)(((u64)1 << bit_width) - 1); +} + +static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev, + u32 max_rb_num_per_se, + u32 sh_per_se) +{ + u32 data, mask; + + data = RREG32(CC_RB_BACKEND_DISABLE); + data &= BACKEND_DISABLE_MASK; + data |= RREG32(GC_USER_RB_BACKEND_DISABLE); + + data >>= BACKEND_DISABLE_SHIFT; + + mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se); + + return data & mask; +} + +static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) +{ + switch (adev->asic_type) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) | + SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2); + break; + case CHIP_VERDE: + *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1); + break; + case CHIP_OLAND: + *rconf |= RB_YSEL; + break; + case CHIP_HAINAN: + *rconf |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, unsigned rb_mask, + unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on SI */ + gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(PA_SC_RASTER_CONFIG, raster_config_se); + } + + /* GRBM_GFX_INDEX has a different offset on SI */ + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + +static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, + u32 se_num, u32 sh_per_se, + u32 max_rb_num_per_se) +{ + int i, j; + u32 data, mask; + u32 disabled_rbs = 0; + u32 enabled_rbs = 0; + unsigned num_rb_pipes; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); + disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); + } + } + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + mask = 1; + for (i = 0; i < max_rb_num_per_se * se_num; i++) { + if (!(disabled_rbs & mask)) + enabled_rbs |= mask; + mask <<= 1; + } + + adev->gfx.config.backend_enable_mask = enabled_rbs; + adev->gfx.config.num_rbs = hweight32(enabled_rbs); + + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); + data = 0; + for (j = 0; j < sh_per_se; j++) { + switch (enabled_rbs & 3) { + case 1: + data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); + break; + case 2: + data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); + break; + case 3: + default: + data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); + break; + } + enabled_rbs >>= 2; + } + gfx_v6_0_raster_config(adev, &data); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) + WREG32(PA_SC_RASTER_CONFIG, data); + else + gfx_v6_0_write_harvested_raster_configs(adev, data, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} +/* +static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev) +{ +} +*/ + +static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh) +{ + u32 data, mask; + + data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); + data &= INACTIVE_CUS_MASK; + data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); + + data >>= INACTIVE_CUS_SHIFT; + + mask = gfx_v6_0_create_bitmask(cu_per_sh); + + return ~data & mask; +} + + +static void gfx_v6_0_setup_spi(struct amdgpu_device *adev, + u32 se_num, u32 sh_per_se, + u32 cu_per_sh) +{ + int i, j, k; + u32 data, mask; + u32 active_cu = 0; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + data = RREG32(SPI_STATIC_THREAD_MGMT_3); + active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh); + + mask = 1; + for (k = 0; k < 16; k++) { + mask <<= k; + if (active_cu & mask) { + data &= ~mask; + WREG32(SPI_STATIC_THREAD_MGMT_3, data); + break; + } + } + } + } + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + +static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) +{ + u32 gb_addr_config = 0; + u32 mc_shared_chmap, mc_arb_ramcfg; + u32 sx_debug_1; + u32 hdp_host_path_cntl; + u32 tmp; + + switch (adev->asic_type) { + case CHIP_TAHITI: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 12; + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_sh_per_se = 2; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 12; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_PITCAIRN: + adev->gfx.config.max_shader_engines = 2; + adev->gfx.config.max_tile_pipes = 8; + adev->gfx.config.max_cu_per_sh = 5; + adev->gfx.config.max_sh_per_se = 2; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 8; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; + break; + + case CHIP_VERDE: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 5; + adev->gfx.config.max_sh_per_se = 2; + adev->gfx.config.max_backends_per_se = 4; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 32; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x40; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_OLAND: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 6; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; + adev->gfx.config.max_texture_channel_caches = 4; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x40; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; + break; + case CHIP_HAINAN: + adev->gfx.config.max_shader_engines = 1; + adev->gfx.config.max_tile_pipes = 4; + adev->gfx.config.max_cu_per_sh = 5; + adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 1; + adev->gfx.config.max_texture_channel_caches = 2; + adev->gfx.config.max_gprs = 256; + adev->gfx.config.max_gs_threads = 16; + adev->gfx.config.max_hw_contexts = 8; + + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x40; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN; + break; + default: + BUG(); + break; + } + + WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 1); + WREG32(SRBM_INT_ACK, 1); + + WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); + + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; + adev->gfx.config.mem_max_burst_length_bytes = 256; + tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; + adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; + if (adev->gfx.config.mem_row_size_in_kb > 4) + adev->gfx.config.mem_row_size_in_kb = 4; + adev->gfx.config.shader_engine_tile_size = 32; + adev->gfx.config.num_gpus = 1; + adev->gfx.config.multi_gpu_tile_size = 64; + + gb_addr_config &= ~ROW_SIZE_MASK; + switch (adev->gfx.config.mem_row_size_in_kb) { + case 1: + default: + gb_addr_config |= ROW_SIZE(0); + break; + case 2: + gb_addr_config |= ROW_SIZE(1); + break; + case 4: + gb_addr_config |= ROW_SIZE(2); + break; + } + adev->gfx.config.gb_addr_config = gb_addr_config; + + WREG32(GB_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CONFIG, gb_addr_config); + WREG32(DMIF_ADDR_CALC, gb_addr_config); + WREG32(HDP_ADDR_CONFIG, gb_addr_config); + WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); + WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); +#if 0 + if (adev->has_uvd) { + WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + } +#endif + gfx_v6_0_tiling_mode_table_init(adev); + + gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_backends_per_se); + + gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se, + adev->gfx.config.max_cu_per_sh); + + gfx_v6_0_get_cu_info(adev); + + WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | + ROQ_IB2_START(0x2b))); + WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); + + sx_debug_1 = RREG32(SX_DEBUG_1); + WREG32(SX_DEBUG_1, sx_debug_1); + + WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); + + WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) | + SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) | + SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size))); + + WREG32(VGT_NUM_INSTANCES, 1); + WREG32(CP_PERFMON_CNTL, 0); + WREG32(SQ_CONFIG, 0); + WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | + FORCE_EOV_MAX_REZ_CNT(255))); + + WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | + AUTO_INVLD_EN(ES_AND_GS_AUTO)); + + WREG32(VGT_GS_VERTEX_REUSE, 16); + WREG32(PA_SC_LINE_STIPPLE_STATE, 0); + + WREG32(CB_PERFCOUNTER0_SELECT0, 0); + WREG32(CB_PERFCOUNTER0_SELECT1, 0); + WREG32(CB_PERFCOUNTER1_SELECT0, 0); + WREG32(CB_PERFCOUNTER1_SELECT1, 0); + WREG32(CB_PERFCOUNTER2_SELECT0, 0); + WREG32(CB_PERFCOUNTER2_SELECT1, 0); + WREG32(CB_PERFCOUNTER3_SELECT0, 0); + WREG32(CB_PERFCOUNTER3_SELECT1, 0); + + hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); + WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); + + WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); + + udelay(50); +} + + +static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) +{ + int i; + + adev->gfx.scratch.num_reg = 7; + adev->gfx.scratch.reg_base = SCRATCH_REG0; + for (i = 0; i < adev->gfx.scratch.num_reg; i++) { + adev->gfx.scratch.free[i] = true; + adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; + } +} + +static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t scratch; + uint32_t tmp = 0; + unsigned i; + int r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + + r = amdgpu_ring_alloc(ring, 3); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_gfx_scratch_free(adev, scratch); + return r; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", + ring->idx, scratch, tmp); + r = -EINVAL; + } + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + +static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + /* flush hdp cache */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x1); +} + +/** + * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp + * + * @adev: amdgpu_device pointer + * @ridx: amdgpu ring index + * + * Emits an hdp invalidate on the cp. + */ +static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, HDP_DEBUG0); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x1); +} + +static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + /* flush read cache over gart */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | + PACKET3_TC_ACTION_ENA | + PACKET3_SH_KCACHE_ACTION_ENA | + PACKET3_SH_ICACHE_ACTION_ENA); + amdgpu_ring_write(ring, 0xFFFFFFFF); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 10); /* poll interval */ + /* EVENT_WRITE_EOP - flush caches, send int */ + amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + amdgpu_ring_write(ring, upper_32_bits(seq)); +} + +static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) +{ + u32 header, control = 0; + + /* insert SWITCH_BUFFER packet before first IB in the ring frame */ + if (ctx_switch) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } + + if (ib->flags & AMDGPU_IB_FLAG_CE) + header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); + else + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | (vm_id << 24); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + +/** + * gfx_v6_0_ring_test_ib - basic ring IB test + * + * @ring: amdgpu_ring structure holding ring information + * + * Allocate an IB and execute it on the gfx ring (SI). + * Provides a basic gfx ring test to verify that IBs are working. + * Returns 0 on success, error on failure. + */ +static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct fence *f = NULL; + uint32_t scratch; + uint32_t tmp = 0; + long r; + + r = amdgpu_gfx_scratch_get(adev, &scratch); + if (r) { + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + return r; + } + WREG32(scratch, 0xCAFEDEAD); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 256, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err1; + } + ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); + ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START)); + ib.ptr[2] = 0xDEADBEEF; + ib.length_dw = 3; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + if (r) + goto err2; + + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err2; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + goto err2; + } + tmp = RREG32(scratch); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } else { + DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", + scratch, tmp); + r = -EINVAL; + } + +err2: + amdgpu_ib_free(adev, &ib, NULL); + fence_put(f); +err1: + amdgpu_gfx_scratch_free(adev, scratch); + return r; +} + +static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +{ + int i; + if (enable) + WREG32(CP_ME_CNTL, 0); + else { + WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); + WREG32(SCRATCH_UMSK, 0); + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].ready = false; + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].ready = false; + } + udelay(50); +} + +static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev) +{ + unsigned i; + const struct gfx_firmware_header_v1_0 *pfp_hdr; + const struct gfx_firmware_header_v1_0 *ce_hdr; + const struct gfx_firmware_header_v1_0 *me_hdr; + const __le32 *fw_data; + u32 fw_size; + + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) + return -EINVAL; + + gfx_v6_0_cp_gfx_enable(adev, false); + pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); + amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); + + /* PFP */ + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *) + (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + + + WREG32(CP_PFP_UCODE_ADDR, 0); + WREG32(CP_CE_UCODE_ADDR, 0); + WREG32(CP_ME_RAM_WADDR, 0); + WREG32(CP_ME_RAM_RADDR, 0); + return 0; +} + +static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev) +{ + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; + int r, i; + + r = amdgpu_ring_alloc(ring, 7 + 4); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); + amdgpu_ring_write(ring, 0x1); + amdgpu_ring_write(ring, 0x0); + amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1); + amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); + amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); + amdgpu_ring_write(ring, 0xc000); + amdgpu_ring_write(ring, 0xe000); + amdgpu_ring_commit(ring); + + gfx_v6_0_cp_gfx_enable(adev, true); + + r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + amdgpu_ring_write(ring, + PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START); + for (i = 0; i < ext->reg_count; i++) + amdgpu_ring_write(ring, ext->extent[i]); + } + } + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); + + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + amdgpu_ring_write(ring, 0x00000316); + amdgpu_ring_write(ring, 0x0000000e); + amdgpu_ring_write(ring, 0x00000010); + + amdgpu_ring_commit(ring); + + return 0; +} + +static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 tmp; + u32 rb_bufsz; + int r; + u64 rptr_addr; + + WREG32(CP_SEM_WAIT_TIMER, 0x0); + WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); + + /* Set the write pointer delay */ + WREG32(CP_RB_WPTR_DELAY, 0); + + WREG32(CP_DEBUG, 0); + WREG32(SCRATCH_ADDR, 0); + + /* ring 0 - compute and gfx */ + /* Set ring buffer size */ + ring = &adev->gfx.gfx_ring[0]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; + +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(CP_RB0_CNTL, tmp); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); + ring->wptr = 0; + WREG32(CP_RB0_WPTR, ring->wptr); + + /* set the wb address whether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); + + WREG32(SCRATCH_UMSK, 0); + + mdelay(1); + WREG32(CP_RB0_CNTL, tmp); + + WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); + + /* start the rings */ + gfx_v6_0_cp_gfx_start(adev); + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + return 0; +} + +static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->rptr_offs]; +} + +static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->gfx.gfx_ring[0]) + return RREG32(CP_RB0_WPTR); + else if (ring == &adev->gfx.compute_ring[0]) + return RREG32(CP_RB1_WPTR); + else if (ring == &adev->gfx.compute_ring[1]) + return RREG32(CP_RB2_WPTR); + else + BUG(); +} + +static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32(CP_RB0_WPTR, ring->wptr); + (void)RREG32(CP_RB0_WPTR); +} + +static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->gfx.compute_ring[0]) { + WREG32(CP_RB1_WPTR, ring->wptr); + (void)RREG32(CP_RB1_WPTR); + } else if (ring == &adev->gfx.compute_ring[1]) { + WREG32(CP_RB2_WPTR, ring->wptr); + (void)RREG32(CP_RB2_WPTR); + } else { + BUG(); + } + +} + +static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 tmp; + u32 rb_bufsz; + int r; + u64 rptr_addr; + + /* ring1 - compute only */ + /* Set ring buffer size */ + + ring = &adev->gfx.compute_ring[0]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(CP_RB1_CNTL, tmp); + + WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); + ring->wptr = 0; + WREG32(CP_RB1_WPTR, ring->wptr); + + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); + + mdelay(1); + WREG32(CP_RB1_CNTL, tmp); + WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); + + ring = &adev->gfx.compute_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; +#ifdef __BIG_ENDIAN + tmp |= BUF_SWAP_32BIT; +#endif + WREG32(CP_RB2_CNTL, tmp); + + WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); + ring->wptr = 0; + WREG32(CP_RB2_WPTR, ring->wptr); + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); + + mdelay(1); + WREG32(CP_RB2_CNTL, tmp); + WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); + + adev->gfx.compute_ring[0].ready = true; + adev->gfx.compute_ring[1].ready = true; + + r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]); + if (r) { + adev->gfx.compute_ring[0].ready = false; + return r; + } + + r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]); + if (r) { + adev->gfx.compute_ring[1].ready = false; + return r; + } + + return 0; +} + +static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable) +{ + gfx_v6_0_cp_gfx_enable(adev, enable); +} + +static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev) +{ + return gfx_v6_0_cp_gfx_load_microcode(adev); +} + +static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp = RREG32(CP_INT_CNTL_RING0); + u32 mask; + int i; + + if (enable) + tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + else + tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + WREG32(CP_INT_CNTL_RING0, tmp); + + if (!enable) { + /* read a gfx register */ + tmp = RREG32(DB_DEPTH_INFO); + + mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS; + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS)) + break; + udelay(1); + } + } +} + +static int gfx_v6_0_cp_resume(struct amdgpu_device *adev) +{ + int r; + + gfx_v6_0_enable_gui_idle_interrupt(adev, false); + + r = gfx_v6_0_cp_load_microcode(adev); + if (r) + return r; + + r = gfx_v6_0_cp_gfx_resume(adev); + if (r) + return r; + r = gfx_v6_0_cp_compute_resume(adev); + if (r) + return r; + + gfx_v6_0_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ + WAIT_REG_MEM_FUNCTION(3) | /* equal */ + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, 0xffffffff); + amdgpu_ring_write(ring, 4); /* poll interval */ + + if (usepfp) { + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } +} + +static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + + /* write new base address */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(0))); + if (vm_id < 8) { + amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id )); + } else { + amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + } + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, pd_addr >> 12); + + /* bits 0-15 are the VM contexts0-15 */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(0))); + amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for the invalidate to complete */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); /* ref */ + amdgpu_ring_write(ring, 0); /* mask */ + amdgpu_ring_write(ring, 0x20); /* poll interval */ + + if (usepfp) { + /* sync PFP to ME, otherwise we might get invalid PFP reads */ + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + amdgpu_ring_write(ring, 0x0); + + /* synce CE with ME to prevent CE fetch CEIB before context switch done */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); + } +} + + +static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) +{ + int r; + + if (adev->gfx.rlc.save_restore_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); + adev->gfx.rlc.save_restore_obj = NULL; + } + + if (adev->gfx.rlc.clear_state_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + adev->gfx.rlc.clear_state_obj = NULL; + } + + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } +} + +static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) +{ + const u32 *src_ptr; + volatile u32 *dst_ptr; + u32 dws, i; + u64 reg_list_mc_addr; + const struct cs_section_def *cs_data; + int r; + + adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list; + adev->gfx.rlc.reg_list_size = + (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list); + + adev->gfx.rlc.cs_data = si_cs_data; + src_ptr = adev->gfx.rlc.reg_list; + dws = adev->gfx.rlc.reg_list_size; + cs_data = adev->gfx.rlc.cs_data; + + if (src_ptr) { + /* save restore block */ + if (adev->gfx.rlc.save_restore_obj == NULL) { + + r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.save_restore_obj); + + if (r) { + dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); + if (unlikely(r != 0)) { + gfx_v6_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.save_restore_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); + gfx_v6_0_rlc_fini(adev); + return r; + } + + r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); + gfx_v6_0_rlc_fini(adev); + return r; + } + /* write the sr buffer */ + dst_ptr = adev->gfx.rlc.sr_ptr; + for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) + dst_ptr[i] = cpu_to_le32(src_ptr[i]); + amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); + } + + if (cs_data) { + /* clear state block */ + adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); + dws = adev->gfx.rlc.clear_state_size + (256 / 4); + + if (adev->gfx.rlc.clear_state_obj == NULL) { + r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.clear_state_obj); + + if (r) { + dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); + gfx_v6_0_rlc_fini(adev); + return r; + } + } + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) { + gfx_v6_0_rlc_fini(adev); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.clear_state_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + gfx_v6_0_rlc_fini(adev); + return r; + } + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + gfx_v6_0_rlc_fini(adev); + return r; + } + /* set up the cs buffer */ + dst_ptr = adev->gfx.rlc.cs_ptr; + reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; + dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr)); + dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr)); + dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size); + gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } + + return 0; +} + +static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable) +{ + u32 tmp; + + tmp = RREG32(RLC_LB_CNTL); + if (enable) + tmp |= LOAD_BALANCE_ENABLE; + else + tmp &= ~LOAD_BALANCE_ENABLE; + WREG32(RLC_LB_CNTL, tmp); + + if (!enable) { + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + WREG32(SPI_LB_CU_MASK, 0x00ff); + } + +} + +static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0) + break; + udelay(1); + } + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0) + break; + udelay(1); + } +} + +static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc) +{ + u32 tmp; + + tmp = RREG32(RLC_CNTL); + if (tmp != rlc) + WREG32(RLC_CNTL, rlc); +} + +static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev) +{ + u32 data, orig; + + orig = data = RREG32(RLC_CNTL); + + if (data & RLC_ENABLE) { + data &= ~RLC_ENABLE; + WREG32(RLC_CNTL, data); + + gfx_v6_0_wait_for_rlc_serdes(adev); + } + + return orig; +} + +static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev) +{ + WREG32(RLC_CNTL, 0); + + gfx_v6_0_enable_gui_idle_interrupt(adev, false); + gfx_v6_0_wait_for_rlc_serdes(adev); +} + +static void gfx_v6_0_rlc_start(struct amdgpu_device *adev) +{ + WREG32(RLC_CNTL, RLC_ENABLE); + + gfx_v6_0_enable_gui_idle_interrupt(adev, true); + + udelay(50); +} + +static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(GRBM_SOFT_RESET); + + tmp |= SOFT_RESET_RLC; + WREG32(GRBM_SOFT_RESET, tmp); + udelay(50); + tmp &= ~SOFT_RESET_RLC; + WREG32(GRBM_SOFT_RESET, tmp); + udelay(50); +} + +static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev) +{ + u32 tmp; + + /* Enable LBPW only for DDR3 */ + tmp = RREG32(MC_SEQ_MISC0); + if ((tmp & 0xF0000000) == 0xB0000000) + return true; + return false; +} +static void gfx_v6_0_init_cg(struct amdgpu_device *adev) +{ +} + +static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) +{ + u32 i; + const struct rlc_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + + + if (!adev->gfx.rlc_fw) + return -EINVAL; + + gfx_v6_0_rlc_stop(adev); + gfx_v6_0_rlc_reset(adev); + gfx_v6_0_init_pg(adev); + gfx_v6_0_init_cg(adev); + + WREG32(RLC_RL_BASE, 0); + WREG32(RLC_RL_SIZE, 0); + WREG32(RLC_LB_CNTL, 0); + WREG32(RLC_LB_CNTR_MAX, 0xffffffff); + WREG32(RLC_LB_CNTR_INIT, 0); + WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); + + WREG32(RLC_MC_CNTL, 0); + WREG32(RLC_UCODE_CNTL, 0); + + hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + fw_data = (const __le32 *) + (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + amdgpu_ucode_print_rlc_hdr(&hdr->header); + + for (i = 0; i < fw_size; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++)); + } + WREG32(RLC_UCODE_ADDR, 0); + + gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev)); + gfx_v6_0_rlc_start(adev); + + return 0; +} + +static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig, tmp; + + orig = data = RREG32(RLC_CGCG_CGLS_CTRL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { + gfx_v6_0_enable_gui_idle_interrupt(adev, true); + + WREG32(RLC_GCPM_GENERAL_3, 0x00000080); + + tmp = gfx_v6_0_halt_rlc(adev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff); + + gfx_v6_0_wait_for_rlc_serdes(adev); + gfx_v6_0_update_rlc(adev, tmp); + + WREG32(RLC_SERDES_WR_CTRL, 0x007000ff); + + data |= CGCG_EN | CGLS_EN; + } else { + gfx_v6_0_enable_gui_idle_interrupt(adev, false); + + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + data &= ~(CGCG_EN | CGLS_EN); + } + + if (orig != data) + WREG32(RLC_CGCG_CGLS_CTRL, data); + +} + +static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable) +{ + + u32 data, orig, tmp = 0; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + orig = data = RREG32(CGTS_SM_CTRL_REG); + data = 0x96940200; + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { + orig = data = RREG32(CP_MEM_SLP_CNTL); + data |= CP_MEM_LS_EN; + if (orig != data) + WREG32(CP_MEM_SLP_CNTL, data); + } + + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data &= 0xffffffc0; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + tmp = gfx_v6_0_halt_rlc(adev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff); + + gfx_v6_0_update_rlc(adev, tmp); + } else { + orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000003; + if (orig != data) + WREG32(RLC_CGTT_MGCG_OVERRIDE, data); + + data = RREG32(CP_MEM_SLP_CNTL); + if (data & CP_MEM_LS_EN) { + data &= ~CP_MEM_LS_EN; + WREG32(CP_MEM_SLP_CNTL, data); + } + orig = data = RREG32(CGTS_SM_CTRL_REG); + data |= LS_OVERRIDE | OVERRIDE; + if (orig != data) + WREG32(CGTS_SM_CTRL_REG, data); + + tmp = gfx_v6_0_halt_rlc(adev); + + WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff); + WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff); + WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff); + + gfx_v6_0_update_rlc(adev, tmp); + } +} +/* +static void gfx_v6_0_update_cg(struct amdgpu_device *adev, + bool enable) +{ + gfx_v6_0_enable_gui_idle_interrupt(adev, false); + if (enable) { + gfx_v6_0_enable_mgcg(adev, true); + gfx_v6_0_enable_cgcg(adev, true); + } else { + gfx_v6_0_enable_cgcg(adev, false); + gfx_v6_0_enable_mgcg(adev, false); + } + gfx_v6_0_enable_gui_idle_interrupt(adev, true); +} +*/ +static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, + bool enable) +{ +} + +static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, + bool enable) +{ +} + +static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) + data &= ~0x8000; + else + data |= 0x8000; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) +{ +} +/* +static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_KAVERI) + max_me = 5; + + if (adev->gfx.rlc.cp_table_ptr == NULL) + return; + + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} +*/ +static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev, + bool enable) +{ + + u32 tmp; + + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { + tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); + WREG32(RLC_TTOP_D, tmp); + + tmp = RREG32(RLC_PG_CNTL); + tmp |= GFX_PG_ENABLE; + WREG32(RLC_PG_CNTL, tmp); + + tmp = RREG32(RLC_AUTO_PG_CTRL); + tmp |= AUTO_PG_EN; + WREG32(RLC_AUTO_PG_CTRL, tmp); + } else { + tmp = RREG32(RLC_AUTO_PG_CTRL); + tmp &= ~AUTO_PG_EN; + WREG32(RLC_AUTO_PG_CTRL, tmp); + + tmp = RREG32(DB_RENDER_CONTROL); + } +} + +static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev, + u32 se, u32 sh) +{ + + u32 mask = 0, tmp, tmp1; + int i; + + mutex_lock(&adev->grbm_idx_mutex); + gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff); + tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); + tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + tmp &= 0xffff0000; + + tmp |= tmp1; + tmp >>= 16; + + for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { + mask <<= 1; + mask |= 1; + } + + return (~tmp) & mask; +} + +static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) +{ + u32 i, j, k, active_cu_number = 0; + + u32 mask, counter, cu_bitmap; + u32 tmp = 0; + + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + cu_bitmap = 0; + counter = 0; + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { + if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) { + if (counter < 2) + cu_bitmap |= mask; + counter++; + } + mask <<= 1; + } + + active_cu_number += counter; + tmp |= (cu_bitmap << (i * 16 + j * 8)); + } + } + + WREG32(RLC_PG_AO_CU_MASK, tmp); + + tmp = RREG32(RLC_MAX_PG_CU); + tmp &= ~MAX_PU_CU_MASK; + tmp |= MAX_PU_CU(active_cu_number); + WREG32(RLC_MAX_PG_CU, tmp); +} + +static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) + data |= STATIC_PER_CU_PG_ENABLE; + else + data &= ~STATIC_PER_CU_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(RLC_PG_CNTL); + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) + data |= DYN_PER_CU_PG_ENABLE; + else + data &= ~DYN_PER_CU_PG_ENABLE; + if (orig != data) + WREG32(RLC_PG_CNTL, data); +} + +static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev) +{ + u32 tmp; + + WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); + + tmp = RREG32(RLC_PG_CNTL); + tmp |= GFX_PG_SRC; + WREG32(RLC_PG_CNTL, tmp); + + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8); + + tmp = RREG32(RLC_AUTO_PG_CTRL); + + tmp &= ~GRBM_REG_SGIT_MASK; + tmp |= GRBM_REG_SGIT(0x700); + tmp &= ~PG_AFTER_GRBM_REG_ST_MASK; + WREG32(RLC_AUTO_PG_CTRL, tmp); +} + +static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable) +{ + gfx_v6_0_enable_gfx_cgpg(adev, enable); + gfx_v6_0_enable_gfx_static_mgpg(adev, enable); + gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable); +} + +static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev) +{ + u32 count = 0; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return 0; + + /* begin clear state */ + count += 2; + /* context control state */ + count += 3; + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) + count += 2 + ext->reg_count; + else + return 0; + } + } + /* pa_sc_raster_config */ + count += 3; + /* end clear state */ + count += 2; + /* clear state */ + count += 2; + + return count; +} + +static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, + volatile u32 *buffer) +{ + u32 count = 0, i; + const struct cs_section_def *sect = NULL; + const struct cs_extent_def *ext = NULL; + + if (adev->gfx.rlc.cs_data == NULL) + return; + if (buffer == NULL) + return; + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + buffer[count++] = cpu_to_le32(0x80000000); + buffer[count++] = cpu_to_le32(0x80000000); + + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { + for (ext = sect->section; ext->extent != NULL; ++ext) { + if (sect->id == SECT_CONTEXT) { + buffer[count++] = + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); + buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000); + for (i = 0; i < ext->reg_count; i++) + buffer[count++] = cpu_to_le32(ext->extent[i]); + } else { + return; + } + } + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); + buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); + + switch (adev->asic_type) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + buffer[count++] = cpu_to_le32(0x2a00126a); + break; + case CHIP_VERDE: + buffer[count++] = cpu_to_le32(0x0000124a); + break; + case CHIP_OLAND: + buffer[count++] = cpu_to_le32(0x00000082); + break; + case CHIP_HAINAN: + buffer[count++] = cpu_to_le32(0x00000000); + break; + default: + buffer[count++] = cpu_to_le32(0x00000000); + break; + } + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); + + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); + buffer[count++] = cpu_to_le32(0); +} + +static void gfx_v6_0_init_pg(struct amdgpu_device *adev) +{ + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true); + gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { + gfx_v6_0_init_gfx_cgpg(adev); + gfx_v6_0_enable_cp_pg(adev, true); + gfx_v6_0_enable_gds_pg(adev, true); + } else { + WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8); + + } + gfx_v6_0_init_ao_cu_mask(adev); + gfx_v6_0_update_gfx_pg(adev, true); + } else { + + WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); + WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8); + } +} + +static void gfx_v6_0_fini_pg(struct amdgpu_device *adev) +{ + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v6_0_update_gfx_pg(adev, false); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { + gfx_v6_0_enable_cp_pg(adev, false); + gfx_v6_0_enable_gds_pg(adev, false); + } + } +} + +static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + uint64_t clock; + + mutex_lock(&adev->gfx.gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&adev->gfx.gpu_clock_mutex); + return clock; +} + +static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, 0x80000000); + amdgpu_ring_write(ring, 0); +} + +static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 6; /* gfx_v6_0_ring_emit_ib */ +} + +static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) +{ + return + 5 + /* gfx_v6_0_ring_emit_hdp_flush */ + 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ + 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ + 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ + 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ + 3; /* gfx_v6_ring_emit_cntxcntl */ +} + +static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) +{ + return + 5 + /* gfx_v6_0_ring_emit_hdp_flush */ + 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ + 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ + 17 + /* gfx_v6_0_ring_emit_vm_flush */ + 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ +} + +static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v6_0_select_se_sh, +}; + +static int gfx_v6_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; + adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v6_0_gfx_funcs; + gfx_v6_0_set_ring_funcs(adev); + gfx_v6_0_set_irq_funcs(adev); + + return 0; +} + +static int gfx_v6_0_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, r; + + r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq); + if (r) + return r; + + gfx_v6_0_scratch_init(adev); + + r = gfx_v6_0_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load gfx firmware!\n"); + return r; + } + + r = gfx_v6_0_rlc_init(adev); + if (r) { + DRM_ERROR("Failed to init rlc BOs!\n"); + return r; + } + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { + ring = &adev->gfx.gfx_ring[i]; + ring->ring_obj = NULL; + sprintf(ring->name, "gfx"); + r = amdgpu_ring_init(adev, ring, 1024, + 0x80000000, 0xf, + &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, + AMDGPU_RING_TYPE_GFX); + if (r) + return r; + } + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + unsigned irq_type; + + if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) { + DRM_ERROR("Too many (%d) compute rings!\n", i); + break; + } + ring = &adev->gfx.compute_ring[i]; + ring->ring_obj = NULL; + ring->use_doorbell = false; + ring->doorbell_index = 0; + ring->me = 1; + ring->pipe = i; + ring->queue = i; + sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; + r = amdgpu_ring_init(adev, ring, 1024, + 0x80000000, 0xf, + &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_TYPE_COMPUTE); + if (r) + return r; + } + + return r; +} + +static int gfx_v6_0_sw_fini(void *handle) +{ + int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_bo_unref(&adev->gds.oa_gfx_bo); + amdgpu_bo_unref(&adev->gds.gws_gfx_bo); + amdgpu_bo_unref(&adev->gds.gds_gfx_bo); + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); + for (i = 0; i < adev->gfx.num_compute_rings; i++) + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + + gfx_v6_0_rlc_fini(adev); + + return 0; +} + +static int gfx_v6_0_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gfx_v6_0_gpu_init(adev); + + r = gfx_v6_0_rlc_resume(adev); + if (r) + return r; + + r = gfx_v6_0_cp_resume(adev); + if (r) + return r; + + adev->gfx.ce_ram_size = 0x8000; + + return r; +} + +static int gfx_v6_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gfx_v6_0_cp_enable(adev, false); + gfx_v6_0_rlc_stop(adev); + gfx_v6_0_fini_pg(adev); + + return 0; +} + +static int gfx_v6_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return gfx_v6_0_hw_fini(adev); +} + +static int gfx_v6_0_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return gfx_v6_0_hw_init(adev); +} + +static bool gfx_v6_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) + return false; + else + return true; +} + +static int gfx_v6_0_wait_for_idle(void *handle) +{ + unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (gfx_v6_0_is_idle(handle)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int gfx_v6_0_soft_reset(void *handle) +{ + return 0; +} + +static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } +} + +static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, + int ring, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + switch (state){ + case AMDGPU_IRQ_STATE_DISABLE: + if (ring == 0) { + cp_int_cntl = RREG32(CP_INT_CNTL_RING1); + cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING1, cp_int_cntl); + break; + } else { + cp_int_cntl = RREG32(CP_INT_CNTL_RING2); + cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING2, cp_int_cntl); + break; + + } + case AMDGPU_IRQ_STATE_ENABLE: + if (ring == 0) { + cp_int_cntl = RREG32(CP_INT_CNTL_RING1); + cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING1, cp_int_cntl); + break; + } else { + cp_int_cntl = RREG32(CP_INT_CNTL_RING2); + cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING2, cp_int_cntl); + break; + + } + + default: + BUG(); + break; + + } +} + +static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cp_int_cntl; + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cp_int_cntl = RREG32(CP_INT_CNTL_RING0); + cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; + WREG32(CP_INT_CNTL_RING0, cp_int_cntl); + break; + default: + break; + } + + return 0; +} + +static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CP_IRQ_GFX_EOP: + gfx_v6_0_set_gfx_eop_interrupt_state(adev, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: + gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state); + break; + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: + gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state); + break; + default: + break; + } + return 0; +} + +static int gfx_v6_0_eop_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); + break; + case 1: + case 2: + amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]); + break; + default: + break; + } + return 0; +} + +static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal register access in command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int gfx_v6_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_CG_STATE_GATE) + gate = true; + + gfx_v6_0_enable_gui_idle_interrupt(adev, false); + if (gate) { + gfx_v6_0_enable_mgcg(adev, true); + gfx_v6_0_enable_cgcg(adev, true); + } else { + gfx_v6_0_enable_cgcg(adev, false); + gfx_v6_0_enable_mgcg(adev, false); + } + gfx_v6_0_enable_gui_idle_interrupt(adev, true); + + return 0; +} + +static int gfx_v6_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + bool gate = false; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) + gate = true; + + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { + gfx_v6_0_update_gfx_pg(adev, gate); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { + gfx_v6_0_enable_cp_pg(adev, gate); + gfx_v6_0_enable_gds_pg(adev, gate); + } + } + + return 0; +} + +const struct amd_ip_funcs gfx_v6_0_ip_funcs = { + .name = "gfx_v6_0", + .early_init = gfx_v6_0_early_init, + .late_init = NULL, + .sw_init = gfx_v6_0_sw_init, + .sw_fini = gfx_v6_0_sw_fini, + .hw_init = gfx_v6_0_hw_init, + .hw_fini = gfx_v6_0_hw_fini, + .suspend = gfx_v6_0_suspend, + .resume = gfx_v6_0_resume, + .is_idle = gfx_v6_0_is_idle, + .wait_for_idle = gfx_v6_0_wait_for_idle, + .soft_reset = gfx_v6_0_soft_reset, + .set_clockgating_state = gfx_v6_0_set_clockgating_state, + .set_powergating_state = gfx_v6_0_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { + .get_rptr = gfx_v6_0_ring_get_rptr, + .get_wptr = gfx_v6_0_ring_get_wptr, + .set_wptr = gfx_v6_0_ring_set_wptr_gfx, + .parse_cs = NULL, + .emit_ib = gfx_v6_0_ring_emit_ib, + .emit_fence = gfx_v6_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, + .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate, + .test_ring = gfx_v6_0_ring_test_ring, + .test_ib = gfx_v6_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, + .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, + .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx, +}; + +static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { + .get_rptr = gfx_v6_0_ring_get_rptr, + .get_wptr = gfx_v6_0_ring_get_wptr, + .set_wptr = gfx_v6_0_ring_set_wptr_compute, + .parse_cs = NULL, + .emit_ib = gfx_v6_0_ring_emit_ib, + .emit_fence = gfx_v6_0_ring_emit_fence, + .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, + .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, + .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate, + .test_ring = gfx_v6_0_ring_test_ring, + .test_ib = gfx_v6_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, + .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute, +}; + +static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->gfx.num_gfx_rings; i++) + adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx; + for (i = 0; i < adev->gfx.num_compute_rings; i++) + adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute; +} + +static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = { + .set = gfx_v6_0_set_eop_interrupt_state, + .process = gfx_v6_0_eop_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = { + .set = gfx_v6_0_set_priv_reg_fault_state, + .process = gfx_v6_0_priv_reg_irq, +}; + +static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = { + .set = gfx_v6_0_set_priv_inst_fault_state, + .process = gfx_v6_0_priv_inst_irq, +}; + +static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; + adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs; + + adev->gfx.priv_reg_irq.num_types = 1; + adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs; + + adev->gfx.priv_inst_irq.num_types = 1; + adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs; +} + +static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) +{ + int i, j, k, counter, active_cu_number = 0; + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + + memset(cu_info, 0, sizeof(*cu_info)); + + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + ao_bitmap = 0; + counter = 0; + bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j); + cu_info->bitmap[i][j] = bitmap; + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + if (bitmap & mask) { + if (counter < 2) + ao_bitmap |= mask; + counter ++; + } + mask <<= 1; + } + active_cu_number += counter; + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + } + } + + cu_info->number = active_cu_number; + cu_info->ao_cu_mask = ao_cu_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h new file mode 100644 index 000000000000..b9657e72b248 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V6_0_H__ +#define __GFX_V6_0_H__ + +extern const struct amd_ip_funcs gfx_v6_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 425413fcaf02..71116da9e782 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } +static void +gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= 0x0; + break; + case CHIP_HAWAII: + *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | + RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) | + PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) | + SE_YSEL(3); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | + SE_PAIR_YSEL(2); + break; + case CHIP_KAVERI: + *rconf |= RB_MAP_PKR0(2); + *rconf1 |= 0x0; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + *rconf |= 0x0; + *rconf1 |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void +gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, u32 raster_config_1, + unsigned rb_mask, unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || + (!se_mask[2] && !se_mask[3]))) { + raster_config_1 &= ~SE_PAIR_MAP_MASK; + + if (!se_mask[0] && !se_mask[1]) { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); + } else { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); + } + } + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on CI+ */ + gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } + + /* GRBM_GFX_INDEX has a different offset on CI+ */ + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + /** * gfx_v7_0_setup_rb - setup the RBs on the asic * @@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) { int i, j; u32 data; + u32 raster_config = 0, raster_config_1 = 0; u32 active_rbs = 0; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; + unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); + + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + + gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } else { + gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + mutex_unlock(&adev->grbm_idx_mutex); } /** @@ -2096,6 +2254,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, control); } +static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) +{ + uint32_t dw2 = 0; + + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ + if (flags & AMDGPU_HAVE_CTX_SWITCH) { + /* set load_global_config & load_global_uconfig */ + dw2 |= 0x8001; + /* set load_cs_sh_regs */ + dw2 |= 0x01000000; + /* set load_per_context_state & load_gfx_sh_regs */ + dw2 |= 0x10002; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, dw2); + amdgpu_ring_write(ring, 0); +} + /** * gfx_v7_0_ring_test_ib - basic ring IB test * @@ -2443,7 +2620,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) return 0; } -static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) +static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring) { return ring->adev->wb.wb[ring->rptr_offs]; } @@ -2463,11 +2640,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) (void)RREG32(mmCP_RB0_WPTR); } -static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring) -{ - return ring->adev->wb.wb[ring->rptr_offs]; -} - static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { /* XXX check if swapping is necessary on BE */ @@ -4182,6 +4354,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) +{ + return + 4; /* gfx_v7_0_ring_emit_ib_gfx */ +} + +static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) +{ + return + 20 + /* gfx_v7_0_ring_emit_gds_switch */ + 7 + /* gfx_v7_0_ring_emit_hdp_flush */ + 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ + 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ + 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ + 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ + 3; /* gfx_v7_ring_emit_cntxcntl */ +} + +static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) +{ + return + 4; /* gfx_v7_0_ring_emit_ib_compute */ +} + +static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) +{ + return + 20 + /* gfx_v7_0_ring_emit_gds_switch */ + 7 + /* gfx_v7_0_ring_emit_hdp_flush */ + 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ + 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ + 17 + /* gfx_v7_0_ring_emit_vm_flush */ + 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ +} + static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, .select_se_sh = &gfx_v7_0_select_se_sh, @@ -4471,24 +4678,21 @@ static int gfx_v7_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, - NULL, NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; @@ -4504,9 +4708,9 @@ static int gfx_v7_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_bo_unref(&adev->gds.oa_gfx_bo); - amdgpu_bo_unref(&adev->gds.gws_gfx_bo); - amdgpu_bo_unref(&adev->gds.gds_gfx_bo); + amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4937,7 +5141,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = { }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { - .get_rptr = gfx_v7_0_ring_get_rptr_gfx, + .get_rptr = gfx_v7_0_ring_get_rptr, .get_wptr = gfx_v7_0_ring_get_wptr_gfx, .set_wptr = gfx_v7_0_ring_set_wptr_gfx, .parse_cs = NULL, @@ -4952,10 +5156,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, + .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx, + .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { - .get_rptr = gfx_v7_0_ring_get_rptr_compute, + .get_rptr = gfx_v7_0_ring_get_rptr, .get_wptr = gfx_v7_0_ring_get_wptr_compute, .set_wptr = gfx_v7_0_ring_set_wptr_compute, .parse_cs = NULL, @@ -4970,6 +5177,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .test_ib = gfx_v7_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute, + .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute, }; static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b8184617ca25..6c6ff57b1c95 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -703,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); - if (adev->pdev->revision == 0xc7) { + if (adev->pdev->revision == 0xc7 && + ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || + (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || + (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); } @@ -1233,10 +1236,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) if (adev->gfx.rlc.clear_state_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } @@ -1248,7 +1250,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); adev->gfx.rlc.cp_table_obj = NULL; } @@ -1290,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.clear_state_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } @@ -1332,7 +1333,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.cp_table_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -1345,7 +1346,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - } return 0; @@ -1361,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); adev->gfx.mec.hpd_eop_obj = NULL; } @@ -2082,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, NULL, - NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, NULL, - NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, NULL, - NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; @@ -2117,9 +2113,9 @@ static int gfx_v8_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_bo_unref(&adev->gds.oa_gfx_bo); - amdgpu_bo_unref(&adev->gds.gws_gfx_bo); - amdgpu_bo_unref(&adev->gds.gds_gfx_bo); + amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); + amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -2127,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v8_0_mec_fini(adev); - gfx_v8_0_rlc_fini(adev); - gfx_v8_0_free_microcode(adev); return 0; @@ -3465,19 +3459,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32(mmGRBM_GFX_INDEX, data); } @@ -3490,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se); @@ -3502,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) return (~data) & mask; } +static void +gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) +{ + switch (adev->asic_type) { + case CHIP_FIJI: + *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | + RB_XSEL2(1) | PKR_MAP(2) | + PKR_XSEL(1) | PKR_YSEL(1) | + SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | + SE_PAIR_YSEL(2); + break; + case CHIP_TONGA: + case CHIP_POLARIS10: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) | + SE_PAIR_YSEL(2); + break; + case CHIP_TOPAZ: + case CHIP_CARRIZO: + *rconf |= RB_MAP_PKR0(2); + *rconf1 |= 0x0; + break; + case CHIP_POLARIS11: + *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | + SE_XSEL(1) | SE_YSEL(1); + *rconf1 |= 0x0; + break; + case CHIP_STONEY: + *rconf |= 0x0; + *rconf1 |= 0x0; + break; + default: + DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); + break; + } +} + +static void +gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, + u32 raster_config, u32 raster_config_1, + unsigned rb_mask, unsigned num_rb) +{ + unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); + unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); + unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); + unsigned rb_per_se = num_rb / num_se; + unsigned se_mask[4]; + unsigned se; + + se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; + se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; + se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; + se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; + + WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); + WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); + WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); + + if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || + (!se_mask[2] && !se_mask[3]))) { + raster_config_1 &= ~SE_PAIR_MAP_MASK; + + if (!se_mask[0] && !se_mask[1]) { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); + } else { + raster_config_1 |= + SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); + } + } + + for (se = 0; se < num_se; se++) { + unsigned raster_config_se = raster_config; + unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); + unsigned pkr1_mask = pkr0_mask << rb_per_pkr; + int idx = (se / 2) * 2; + + if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { + raster_config_se &= ~SE_MAP_MASK; + + if (!se_mask[idx]) { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); + } else { + raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); + } + } + + pkr0_mask &= rb_mask; + pkr1_mask &= rb_mask; + if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { + raster_config_se &= ~PKR_MAP_MASK; + + if (!pkr0_mask) { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); + } else { + raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); + } + } + + if (rb_per_se >= 2) { + unsigned rb0_mask = 1 << (se * rb_per_se); + unsigned rb1_mask = rb0_mask << 1; + + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR0_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); + } + } + + if (rb_per_se > 2) { + rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); + rb1_mask = rb0_mask << 1; + rb0_mask &= rb_mask; + rb1_mask &= rb_mask; + if (!rb0_mask || !rb1_mask) { + raster_config_se &= ~RB_MAP_PKR1_MASK; + + if (!rb0_mask) { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); + } else { + raster_config_se |= + RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); + } + } + } + } + + /* GRBM_GFX_INDEX has a different offset on VI */ + gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); + WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } + + /* GRBM_GFX_INDEX has a different offset on VI */ + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +} + static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) { int i, j; u32 data; + u32 raster_config = 0, raster_config_1 = 0; u32 active_rbs = 0; u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se; + unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -3520,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; adev->gfx.config.num_rbs = hweight32(active_rbs); + + num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines, 16); + + gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1); + + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); + } else { + gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + + mutex_unlock(&adev->grbm_idx_mutex); } /** @@ -3576,16 +3732,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) u32 tmp; int i; - tmp = RREG32(mmGRBM_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); - WREG32(mmGRBM_CNTL, tmp); - + WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF); WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev); gfx_v8_0_get_cu_info(adev); @@ -3769,9 +3921,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) sizeof(indirect_start_offsets)/sizeof(int)); /* save and restore list */ - temp = RREG32(mmRLC_SRM_CNTL); - temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; - WREG32(mmRLC_SRM_CNTL, temp); + WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); WREG32(mmRLC_SRM_ARAM_ADDR, 0); for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) @@ -3808,11 +3958,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) { - uint32_t data; - - data = RREG32(mmRLC_SRM_CNTL); - data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(mmRLC_SRM_CNTL, data); + WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1); } static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) @@ -3822,75 +3968,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG)) { - data = RREG32(mmCP_RB_WPTR_POLL_CNTL); - data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; - data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); - WREG32(mmCP_RB_WPTR_POLL_CNTL, data); - - data = 0; - data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY, data); + WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); - data = RREG32(mmRLC_PG_DELAY_2); - data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; - data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY_2, data); + data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); + WREG32(mmRLC_PG_DELAY, data); - data = RREG32(mmRLC_AUTO_PG_CTRL); - data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; - data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); - WREG32(mmRLC_AUTO_PG_CTRL, data); + WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); + WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); } } static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0); } static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0); } static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; - else - data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0); } static void gfx_v8_0_init_pg(struct amdgpu_device *adev) @@ -3927,36 +4032,26 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) } } -void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v8_0_enable_gui_idle_interrupt(adev, false); - gfx_v8_0_wait_for_rlc_serdes(adev); } static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmGRBM_SOFT_RESET); - - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - WREG32(mmGRBM_SOFT_RESET, tmp); + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - WREG32(mmGRBM_SOFT_RESET, tmp); + + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1); /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) @@ -3998,14 +4093,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32(mmRLC_CGCG_CGLS_CTRL, 0); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) + adev->asic_type == CHIP_POLARIS10) WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); gfx_v8_0_rlc_reset(adev); - gfx_v8_0_init_pg(adev); if (!adev->pp_enabled) { @@ -4300,12 +4394,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_start(adev); ring->ready = true; r = amdgpu_ring_test_ring(ring); - if (r) { + if (r) ring->ready = false; - return r; - } - return 0; + return r; } static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) @@ -4980,7 +5072,6 @@ static int gfx_v8_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); - gfx_v8_0_gpu_init(adev); r = gfx_v8_0_rlc_resume(adev); @@ -4988,8 +5079,6 @@ static int gfx_v8_0_hw_init(void *handle) return r; r = gfx_v8_0_cp_resume(adev); - if (r) - return r; return r; } @@ -5037,25 +5126,22 @@ static bool gfx_v8_0_is_idle(void *handle) static int gfx_v8_0_wait_for_idle(void *handle) { unsigned i; - u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; - - if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + if (gfx_v8_0_is_idle(handle)) return 0; + udelay(1); } return -ETIMEDOUT; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_check_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5064,16 +5150,12 @@ static int gfx_v8_0_soft_reset(void *handle) GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK | + GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { - grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, - GRBM_SOFT_RESET, SOFT_RESET_CP, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); } @@ -5084,73 +5166,199 @@ static int gfx_v8_0_soft_reset(void *handle) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPF, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPC, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPG, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_GRBM, 1); + } + /* SRBM_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - /* stop the rlc */ - gfx_v8_0_rlc_stop(adev); + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; + adev->gfx.grbm_soft_reset = grbm_soft_reset; + adev->gfx.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; + adev->gfx.grbm_soft_reset = 0; + adev->gfx.srbm_soft_reset = 0; + } + + return 0; +} + +static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + int i; + + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { + u32 tmp; + tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST, + DEQUEUE_REQ, 2); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + udelay(1); + } + } +} + +static int gfx_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + /* stop the rlc */ + gfx_v8_0_rlc_stop(adev); + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) /* Disable GFX parsing/prefetching */ gfx_v8_0_cp_gfx_enable(adev, false); + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_inactive_hqd(adev, ring); + } /* Disable MEC parsing/prefetching */ gfx_v8_0_cp_compute_enable(adev, false); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 1); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 1); - WREG32(mmGMCON_DEBUG, tmp); + return 0; +} - udelay(50); - } +static int gfx_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; - udelay(50); + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1); + WREG32(mmGMCON_DEBUG, tmp); + udelay(50); + } - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 0); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 0); - WREG32(mmGMCON_DEBUG, tmp); - } + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); - /* Wait a little for things to settle down */ udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0); + WREG32(mmGMCON_DEBUG, tmp); } + + /* Wait a little for things to settle down */ + udelay(50); + + return 0; +} + +static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); + WREG32(mmCP_HQD_PQ_RPTR, 0); + WREG32(mmCP_HQD_PQ_WPTR, 0); + vi_srbm_select(adev, 0, 0, 0, 0); +} + +static int gfx_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + gfx_v8_0_cp_gfx_resume(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_init_hqd(adev, ring); + } + gfx_v8_0_cp_compute_resume(adev); + } + gfx_v8_0_rlc_start(adev); + return 0; } @@ -5269,8 +5477,6 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - if (adev->asic_type == CHIP_POLARIS11) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, @@ -5278,83 +5484,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable static MGPG */ - if (enable) - data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable dynamic MGPG */ - if (enable) - data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable quick PG */ - if (enable) - data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0); /* Read any GFX register to wake up GFX. */ if (!enable) - data = RREG32(mmDB_RENDER_CONTROL); + RREG32(mmDB_RENDER_CONTROL); } static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, @@ -5430,15 +5588,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, data = RREG32(mmRLC_SERDES_WR_CTRL); if (adev->asic_type == CHIP_STONEY) - data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | - RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | - RLC_SERDES_WR_CTRL__P1_SELECT_MASK | - RLC_SERDES_WR_CTRL__P2_SELECT_MASK | - RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | - RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | - RLC_SERDES_WR_CTRL__POWER_UP_MASK | - RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | - RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); else data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | @@ -5461,10 +5619,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define MSG_ENTER_RLC_SAFE_MODE 1 #define MSG_EXIT_RLC_SAFE_MODE 0 - -#define RLC_GPR_REG2__REQ_MASK 0x00000001 -#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 -#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e +#define RLC_GPR_REG2__REQ_MASK 0x00000001 +#define RLC_GPR_REG2__REQ__SHIFT 0 +#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 +#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) { @@ -5494,7 +5652,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5522,7 +5680,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5554,7 +5712,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5581,7 +5739,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5622,21 +5780,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) /* 1 - RLC memory Light sleep */ - temp = data = RREG32(mmRLC_MEM_SLP_CNTL); - data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmRLC_MEM_SLP_CNTL, data); - } + WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1); - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - /* 2 - CP memory Light sleep */ - temp = data = RREG32(mmCP_MEM_SLP_CNTL); - data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmCP_MEM_SLP_CNTL, data); - } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) + WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1); } /* 3 - RLC_CGTT_MGCG_OVERRIDE */ @@ -5834,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + +static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_3D, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_MG, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_RLC, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, + PP_BLOCK_GFX_CP, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + static int gfx_v8_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -5846,33 +6065,33 @@ static int gfx_v8_0_set_clockgating_state(void *handle, gfx_v8_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; + case CHIP_TONGA: + gfx_v8_0_tonga_update_gfx_clock_gating(adev, state); + break; + case CHIP_POLARIS10: + case CHIP_POLARIS11: + gfx_v8_0_polaris_update_gfx_clock_gating(adev, state); + break; default: break; } return 0; } -static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) +static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; if (ring->use_doorbell) /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; + return ring->adev->wb.wb[ring->wptr_offs]; else - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -5939,12 +6158,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, { u32 header, control = 0; - /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (ctx_switch) { - amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); - amdgpu_ring_write(ring, 0); - } - if (ib->flags & AMDGPU_IB_FLAG_CE) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); else @@ -5971,9 +6184,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN - (2 << 0) | + (2 << 0) | #endif - (ib->gpu_addr & 0xFFFFFFFC)); + (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } @@ -6014,14 +6227,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 4); /* poll interval */ - - if (usepfp) { - /* synce CE with ME to prevent CE fetch CEIB before context switch done */ - amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); - amdgpu_ring_write(ring, 0); - } } static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, @@ -6029,6 +6234,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, { int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); + /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */ + if (usepfp) + amdgpu_ring_insert_nop(ring, 128); + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0)) | @@ -6068,18 +6277,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* sync PFP to ME, otherwise we might get invalid PFP reads */ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, 0x0); - amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); - amdgpu_ring_write(ring, 0); + /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */ + amdgpu_ring_insert_nop(ring, 128); } } -static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring) -{ - return ring->adev->wb.wb[ring->rptr_offs]; -} - static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring) { return ring->adev->wb.wb[ring->wptr_offs]; @@ -6115,36 +6317,88 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, upper_32_bits(seq)); } -static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, - enum amdgpu_interrupt_state state) +static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring) { - u32 cp_int_cntl; + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); + amdgpu_ring_write(ring, 0); +} - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = - REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; +static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) +{ + uint32_t dw2 = 0; + + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ + if (flags & AMDGPU_HAVE_CTX_SWITCH) { + /* set load_global_config & load_global_uconfig */ + dw2 |= 0x8001; + /* set load_cs_sh_regs */ + dw2 |= 0x01000000; + /* set load_per_context_state & load_gfx_sh_regs for GFX */ + dw2 |= 0x10002; + + /* set load_ce_ram if preamble presented */ + if (AMDGPU_PREAMBLE_IB_PRESENT & flags) + dw2 |= 0x10000000; + } else { + /* still load_ce_ram if this is the first time preamble presented + * although there is no context switch happens. + */ + if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) + dw2 |= 0x10000000; } + + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); + amdgpu_ring_write(ring, dw2); + amdgpu_ring_write(ring, 0); +} + +static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) +{ + return + 4; /* gfx_v8_0_ring_emit_ib_gfx */ +} + +static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) +{ + return + 20 + /* gfx_v8_0_ring_emit_gds_switch */ + 7 + /* gfx_v8_0_ring_emit_hdp_flush */ + 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ + 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */ + 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ + 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ + 2 + /* gfx_v8_ring_emit_sb */ + 3; /* gfx_v8_ring_emit_cntxcntl */ +} + +static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) +{ + return + 4; /* gfx_v8_0_ring_emit_ib_compute */ +} + +static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) +{ + return + 20 + /* gfx_v8_0_ring_emit_gds_switch */ + 7 + /* gfx_v8_0_ring_emit_hdp_flush */ + 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ + 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ + 17 + /* gfx_v8_0_ring_emit_vm_flush */ + 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ +} + +static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, + enum amdgpu_interrupt_state state) +{ + WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state) { - u32 mec_int_cntl, mec_int_cntl_reg; - /* * amdgpu controls only pipe 0 of MEC1. That's why this function only * handles the setting of interrupts for this specific pipe. All other @@ -6154,7 +6408,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -6165,22 +6418,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, return; } - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, @@ -6188,24 +6427,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6215,24 +6438,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6338,13 +6545,16 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .resume = gfx_v8_0_resume, .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, + .check_soft_reset = gfx_v8_0_check_soft_reset, + .pre_soft_reset = gfx_v8_0_pre_soft_reset, .soft_reset = gfx_v8_0_soft_reset, + .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { - .get_rptr = gfx_v8_0_ring_get_rptr_gfx, + .get_rptr = gfx_v8_0_ring_get_rptr, .get_wptr = gfx_v8_0_ring_get_wptr_gfx, .set_wptr = gfx_v8_0_ring_set_wptr_gfx, .parse_cs = NULL, @@ -6359,10 +6569,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_switch_buffer = gfx_v8_ring_emit_sb, + .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, + .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx, + .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { - .get_rptr = gfx_v8_0_ring_get_rptr_compute, + .get_rptr = gfx_v8_0_ring_get_rptr, .get_wptr = gfx_v8_0_ring_get_wptr_compute, .set_wptr = gfx_v8_0_ring_set_wptr_compute, .parse_cs = NULL, @@ -6377,6 +6591,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute, + .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) @@ -6479,15 +6695,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - return (~data) & mask; + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index bc82c794312c..ebed1f829297 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,6 +26,4 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c new file mode 100644 index 000000000000..b13c8aaec078 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -0,0 +1,1071 @@ + +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/firmware.h> +#include "drmP.h" +#include "amdgpu.h" +#include "gmc_v6_0.h" +#include "amdgpu_ucode.h" +#include "si/sid.h" + +static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); +static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v6_0_wait_for_idle(void *handle); + +MODULE_FIRMWARE("radeon/tahiti_mc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); +MODULE_FIRMWARE("radeon/verde_mc.bin"); +MODULE_FIRMWARE("radeon/oland_mc.bin"); + +static const u32 crtc_offsets[6] = +{ + SI_CRTC0_REGISTER_OFFSET, + SI_CRTC1_REGISTER_OFFSET, + SI_CRTC2_REGISTER_OFFSET, + SI_CRTC3_REGISTER_OFFSET, + SI_CRTC4_REGISTER_OFFSET, + SI_CRTC5_REGISTER_OFFSET +}; + +static void gmc_v6_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 blackout; + + if (adev->mode_info.num_crtc) + amdgpu_display_stop_mc_access(adev, save); + + gmc_v6_0_wait_for_idle((void *)adev); + + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); + if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) { + /* Block CPU access */ + WREG32(BIF_FB_EN, 0); + /* blackout the MC */ + blackout = REG_SET_FIELD(blackout, + mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0); + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); + } + /* wait for the MC to settle */ + udelay(100); + +} + +static void gmc_v6_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + u32 tmp; + + /* unblackout the MC */ + tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); + tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0); + WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); + /* allow CPU access */ + tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1); + tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1); + WREG32(BIF_FB_EN, tmp); + + if (adev->mode_info.num_crtc) + amdgpu_display_resume_mc_access(adev, save); + +} + +static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_name = "tahiti"; + break; + case CHIP_PITCAIRN: + chip_name = "pitcairn"; + break; + case CHIP_VERDE: + chip_name = "verde"; + break; + case CHIP_OLAND: + chip_name = "oland"; + break; + case CHIP_HAINAN: + chip_name = "hainan"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&adev->mc.fw, fw_name, adev->dev); + if (err) + goto out; + + err = amdgpu_ucode_validate(adev->mc.fw); + +out: + if (err) { + dev_err(adev->dev, + "si_mc: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->mc.fw); + adev->mc.fw = NULL; + } + return err; +} + +static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) +{ + const __le32 *new_fw_data = NULL; + u32 running; + const __le32 *new_io_mc_regs = NULL; + int i, regs_size, ucode_size; + const struct mc_firmware_header_v1_0 *hdr; + + if (!adev->mc.fw) + return -EINVAL; + + hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; + + amdgpu_ucode_print_mc_hdr(&hdr->header); + + adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + new_io_mc_regs = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + new_fw_data = (const __le32 *) + (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; + + if (running == 0) { + + /* reset the engine and set to writable */ + WREG32(MC_SEQ_SUP_CNTL, 0x00000008); + WREG32(MC_SEQ_SUP_CNTL, 0x00000010); + + /* load mc io regs */ + for (i = 0; i < regs_size; i++) { + WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); + WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); + } + /* load the MC ucode */ + for (i = 0; i < ucode_size; i++) { + WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); + } + + /* put the engine back into the active state */ + WREG32(MC_SEQ_SUP_CNTL, 0x00000008); + WREG32(MC_SEQ_SUP_CNTL, 0x00000004); + WREG32(MC_SEQ_SUP_CNTL, 0x00000001); + + /* wait for training to complete */ + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0) + break; + udelay(1); + } + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1) + break; + udelay(1); + } + + } + + return 0; +} + +static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc) +{ + if (mc->mc_vram_size > 0xFFC0000000ULL) { + dev_warn(adev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xFFC0000000ULL; + mc->mc_vram_size = 0xFFC0000000ULL; + } + amdgpu_vram_location(adev, &adev->mc, 0); + adev->mc.gtt_base_align = 0; + amdgpu_gtt_location(adev, mc); +} + +static void gmc_v6_0_mc_program(struct amdgpu_device *adev) +{ + struct amdgpu_mode_mc_save save; + u32 tmp; + int i, j; + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x6) { + WREG32((0xb05 + j), 0x00000000); + WREG32((0xb06 + j), 0x00000000); + WREG32((0xb07 + j), 0x00000000); + WREG32((0xb08 + j), 0x00000000); + WREG32((0xb09 + j), 0x00000000); + } + WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); + + gmc_v6_0_mc_stop(adev, &save); + + if (gmc_v6_0_wait_for_idle((void *)adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + + WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + /* Update configuration */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->mc.vram_end >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, + adev->vram_scratch.gpu_addr >> 12); + tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); + WREG32(MC_VM_FB_LOCATION, tmp); + /* XXX double check these! */ + WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); + WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); + WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); + WREG32(MC_VM_AGP_BASE, 0); + WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); + WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); + + if (gmc_v6_0_wait_for_idle((void *)adev)) { + dev_warn(adev->dev, "Wait for MC idle timedout !\n"); + } + gmc_v6_0_mc_resume(adev, &save); + amdgpu_display_set_vga_render_state(adev, false); +} + +static int gmc_v6_0_mc_init(struct amdgpu_device *adev) +{ + + u32 tmp; + int chansize, numchan; + + tmp = RREG32(MC_ARB_RAMCFG); + if (tmp & CHANSIZE_OVERRIDE) { + chansize = 16; + } else if (tmp & CHANSIZE_MASK) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + case 4: + numchan = 3; + break; + case 5: + numchan = 6; + break; + case 6: + numchan = 10; + break; + case 7: + numchan = 12; + break; + case 8: + numchan = 16; + break; + } + adev->mc.vram_width = numchan * chansize; + /* Could aper size report 0 ? */ + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + /* size in MB on si */ + adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + adev->mc.visible_vram_size = adev->mc.aper_size; + + /* unless the user had overridden it, set the gart + * size equal to the 1024 or vram, whichever is larger. + */ + if (amdgpu_gart_size == -1) + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); + else + adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; + + gmc_v6_0_vram_gtt_location(adev, &adev->mc); + + return 0; +} + +static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, + uint32_t vmid) +{ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + + WREG32(VM_INVALIDATE_REQUEST, 1 << vmid); +} + +static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, + void *cpu_pt_addr, + uint32_t gpu_page_idx, + uint64_t addr, + uint32_t flags) +{ + void __iomem *ptr = (void *)cpu_pt_addr; + uint64_t value; + + value = addr & 0xFFFFFFFFFFFFF000ULL; + value |= flags; + writeq(value, ptr + (gpu_page_idx * 8)); + + return 0; +} + +static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, + bool value) +{ + u32 tmp; + + tmp = RREG32(VM_CONTEXT1_CNTL); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value); + tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, + xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); + WREG32(VM_CONTEXT1_CNTL, tmp); +} + +static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) +{ + int r, i; + + if (adev->gart.robj == NULL) { + dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; + } + r = amdgpu_gart_table_vram_pin(adev); + if (r) + return r; + /* Setup TLB control */ + WREG32(MC_VM_MX_L1_TLB_CNTL, + (0xA << 7) | + ENABLE_L1_TLB | + ENABLE_L1_FRAGMENT_PROCESSING | + SYSTEM_ACCESS_MODE_NOT_IN_SYS | + ENABLE_ADVANCED_DRIVER_MODEL | + SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | + ENABLE_L2_FRAGMENT_PROCESSING | + ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7) | + CONTEXT1_IDENTITY_ACCESS_MODE(1)); + WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); + WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | + BANK_SELECT(4) | + L2_CACHE_BIGK_FRAGMENT_SIZE(4)); + /* setup context0 */ + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); + WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(VM_CONTEXT0_CNTL2, 0); + WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); + + WREG32(0x575, 0); + WREG32(0x576, 0); + WREG32(0x577, 0); + + /* empty context1-15 */ + /* set vm size, must be a multiple of 4 */ + WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); + /* Assign the pt base to something valid for now; the pts used for + * the VMs are determined by the application and setup and assigned + * on the fly in the vm part of radeon_gart.c + */ + for (i = 1; i < 16; i++) { + if (i < 8) + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, + adev->gart.table_addr >> 12); + else + WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, + adev->gart.table_addr >> 12); + } + + /* enable context1-15 */ + WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(adev->dummy_page.addr >> 12)); + WREG32(VM_CONTEXT1_CNTL2, 4); + WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) | + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | + VALID_PROTECTION_FAULT_ENABLE_DEFAULT | + READ_PROTECTION_FAULT_ENABLE_INTERRUPT | + READ_PROTECTION_FAULT_ENABLE_DEFAULT | + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); + + gmc_v6_0_gart_flush_gpu_tlb(adev, 0); + dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->mc.gtt_size >> 20), + (unsigned long long)adev->gart.table_addr); + adev->gart.ready = true; + return 0; +} + +static int gmc_v6_0_gart_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->gart.robj) { + dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n"); + return 0; + } + r = amdgpu_gart_init(adev); + if (r) + return r; + adev->gart.table_size = adev->gart.num_gpu_pages * 8; + return amdgpu_gart_table_vram_alloc(adev); +} + +static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) +{ + /*unsigned i; + + for (i = 1; i < 16; ++i) { + uint32_t reg; + if (i < 8) + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ; + else + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8); + adev->vm_manager.saved_table_addr[i] = RREG32(reg); + }*/ + + /* Disable all tables */ + WREG32(VM_CONTEXT0_CNTL, 0); + WREG32(VM_CONTEXT1_CNTL, 0); + /* Setup TLB control */ + WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | + SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7) | + CONTEXT1_IDENTITY_ACCESS_MODE(1)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | + L2_CACHE_BIGK_FRAGMENT_SIZE(0)); + amdgpu_gart_table_vram_unpin(adev); +} + +static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) +{ + amdgpu_gart_table_vram_free(adev); + amdgpu_gart_fini(adev); +} + +static int gmc_v6_0_vm_init(struct amdgpu_device *adev) +{ + /* + * number of VMs + * VMID 0 is reserved for System + * amdgpu graphics/compute will use VMIDs 1-7 + * amdkfd will use VMIDs 8-15 + */ + adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; + amdgpu_vm_manager_init(adev); + + /* base offset of vram pages */ + if (adev->flags & AMD_IS_APU) { + u64 tmp = RREG32(MC_VM_FB_OFFSET); + tmp <<= 22; + adev->vm_manager.vram_base_offset = tmp; + } else + adev->vm_manager.vram_base_offset = 0; + + return 0; +} + +static void gmc_v6_0_vm_fini(struct amdgpu_device *adev) +{ +} + +static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, + u32 status, u32 addr, u32 mc_client) +{ + u32 mc_id; + u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID); + u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, + xxPROTECTIONS); + char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, + (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; + + mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, + xxMEMORY_CLIENT_ID); + + dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", + protections, vmid, addr, + REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, + xxMEMORY_CLIENT_RW) ? + "write" : "read", block, mc_client, mc_id); +} + +/* +static const u32 mc_cg_registers[] = { + MC_HUB_MISC_HUB_CG, + MC_HUB_MISC_SIP_CG, + MC_HUB_MISC_VM_CG, + MC_XPB_CLK_GAT, + ATC_MISC_CG, + MC_CITF_MISC_WR_CG, + MC_CITF_MISC_RD_CG, + MC_CITF_MISC_VM_CG, + VM_L2_CG, +}; + +static const u32 mc_cg_ls_en[] = { + MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, + MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, + MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, + MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, + ATC_MISC_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, + MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, + VM_L2_CG__MEM_LS_ENABLE_MASK, +}; + +static const u32 mc_cg_en[] = { + MC_HUB_MISC_HUB_CG__ENABLE_MASK, + MC_HUB_MISC_SIP_CG__ENABLE_MASK, + MC_HUB_MISC_VM_CG__ENABLE_MASK, + MC_XPB_CLK_GAT__ENABLE_MASK, + ATC_MISC_CG__ENABLE_MASK, + MC_CITF_MISC_WR_CG__ENABLE_MASK, + MC_CITF_MISC_RD_CG__ENABLE_MASK, + MC_CITF_MISC_VM_CG__ENABLE_MASK, + VM_L2_CG__ENABLE_MASK, +}; + +static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) + data |= mc_cg_ls_en[i]; + else + data &= ~mc_cg_ls_en[i]; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev, + bool enable) +{ + int i; + u32 orig, data; + + for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { + orig = data = RREG32(mc_cg_registers[i]); + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) + data |= mc_cg_en[i]; + else + data &= ~mc_cg_en[i]; + if (data != orig) + WREG32(mc_cg_registers[i], data); + } +} + +static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32_PCIE(ixPCIE_CNTL2); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); + } else { + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); + data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); + } + + if (orig != data) + WREG32_PCIE(ixPCIE_CNTL2, data); +} + +static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_HOST_PATH_CNTL); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) + data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); + else + data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); + + if (orig != data) + WREG32(HDP_HOST_PATH_CNTL, data); +} + +static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, + bool enable) +{ + u32 orig, data; + + orig = data = RREG32(HDP_MEM_POWER_LS); + + if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) + data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); + else + data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); + + if (orig != data) + WREG32(HDP_MEM_POWER_LS, data); +} +*/ + +static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) +{ + switch (mc_seq_vram_type) { + case MC_SEQ_MISC0__MT__GDDR1: + return AMDGPU_VRAM_TYPE_GDDR1; + case MC_SEQ_MISC0__MT__DDR2: + return AMDGPU_VRAM_TYPE_DDR2; + case MC_SEQ_MISC0__MT__GDDR3: + return AMDGPU_VRAM_TYPE_GDDR3; + case MC_SEQ_MISC0__MT__GDDR4: + return AMDGPU_VRAM_TYPE_GDDR4; + case MC_SEQ_MISC0__MT__GDDR5: + return AMDGPU_VRAM_TYPE_GDDR5; + case MC_SEQ_MISC0__MT__DDR3: + return AMDGPU_VRAM_TYPE_DDR3; + default: + return AMDGPU_VRAM_TYPE_UNKNOWN; + } +} + +static int gmc_v6_0_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gmc_v6_0_set_gart_funcs(adev); + gmc_v6_0_set_irq_funcs(adev); + + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp = RREG32(MC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); + } + + return 0; +} + +static int gmc_v6_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); +} + +static int gmc_v6_0_sw_init(void *handle) +{ + int r; + int dma_bits; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); + if (r) + return r; + + adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + + adev->mc.mc_mask = 0xffffffffffULL; + + adev->need_dma32 = false; + dma_bits = adev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + adev->need_dma32 = true; + dma_bits = 32; + dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); + } + r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); + if (r) { + pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); + dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n"); + } + + r = gmc_v6_0_init_microcode(adev); + if (r) { + dev_err(adev->dev, "Failed to load mc firmware!\n"); + return r; + } + + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + + r = gmc_v6_0_mc_init(adev); + if (r) + return r; + + r = amdgpu_bo_init(adev); + if (r) + return r; + + r = gmc_v6_0_gart_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v6_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static int gmc_v6_0_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->vm_manager.enabled) { + gmc_v6_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v6_0_gart_fini(adev); + amdgpu_gem_force_release(adev); + amdgpu_bo_fini(adev); + + return 0; +} + +static int gmc_v6_0_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + gmc_v6_0_mc_program(adev); + + if (!(adev->flags & AMD_IS_APU)) { + r = gmc_v6_0_mc_load_microcode(adev); + if (r) { + dev_err(adev->dev, "Failed to load MC firmware!\n"); + return r; + } + } + + r = gmc_v6_0_gart_enable(adev); + if (r) + return r; + + return r; +} + +static int gmc_v6_0_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); + gmc_v6_0_gart_disable(adev); + + return 0; +} + +static int gmc_v6_0_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->vm_manager.enabled) { + gmc_v6_0_vm_fini(adev); + adev->vm_manager.enabled = false; + } + gmc_v6_0_hw_fini(adev); + + return 0; +} + +static int gmc_v6_0_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = gmc_v6_0_hw_init(adev); + if (r) + return r; + + if (!adev->vm_manager.enabled) { + r = gmc_v6_0_vm_init(adev); + if (r) { + dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); + return r; + } + adev->vm_manager.enabled = true; + } + + return r; +} + +static bool gmc_v6_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) + return false; + + return true; +} + +static int gmc_v6_0_wait_for_idle(void *handle) +{ + unsigned i; + u32 tmp; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | + SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | + SRBM_STATUS__MCD_BUSY_MASK | + SRBM_STATUS__VMC_BUSY_MASK); + if (!tmp) + return 0; + udelay(1); + } + return -ETIMEDOUT; + +} + +static int gmc_v6_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_mode_mc_save save; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(SRBM_STATUS); + + if (tmp & SRBM_STATUS__VMC_BUSY_MASK) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1); + + if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | + SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { + if (!(adev->flags & AMD_IS_APU)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1); + } + + if (srbm_soft_reset) { + gmc_v6_0_mc_stop(adev, &save); + if (gmc_v6_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + + tmp = RREG32(SRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + + udelay(50); + + gmc_v6_0_mc_resume(adev, &save); + udelay(50); + } + + return 0; +} + +static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp; + u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); + + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + tmp = RREG32(VM_CONTEXT0_CNTL); + tmp &= ~bits; + WREG32(VM_CONTEXT0_CNTL, tmp); + tmp = RREG32(VM_CONTEXT1_CNTL); + tmp &= ~bits; + WREG32(VM_CONTEXT1_CNTL, tmp); + break; + case AMDGPU_IRQ_STATE_ENABLE: + tmp = RREG32(VM_CONTEXT0_CNTL); + tmp |= bits; + WREG32(VM_CONTEXT0_CNTL, tmp); + tmp = RREG32(VM_CONTEXT1_CNTL); + tmp |= bits; + WREG32(VM_CONTEXT1_CNTL, tmp); + break; + default: + break; + } + + return 0; +} + +static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u32 addr, status; + + addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); + status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) + gmc_v6_0_set_fault_enable_default(adev, false); + + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", + entry->src_id, entry->src_data); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + addr); + dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + status); + gmc_v6_0_vm_decode_fault(adev, status, addr, 0); + + return 0; +} + +static int gmc_v6_0_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int gmc_v6_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs gmc_v6_0_ip_funcs = { + .name = "gmc_v6_0", + .early_init = gmc_v6_0_early_init, + .late_init = gmc_v6_0_late_init, + .sw_init = gmc_v6_0_sw_init, + .sw_fini = gmc_v6_0_sw_fini, + .hw_init = gmc_v6_0_hw_init, + .hw_fini = gmc_v6_0_hw_fini, + .suspend = gmc_v6_0_suspend, + .resume = gmc_v6_0_resume, + .is_idle = gmc_v6_0_is_idle, + .wait_for_idle = gmc_v6_0_wait_for_idle, + .soft_reset = gmc_v6_0_soft_reset, + .set_clockgating_state = gmc_v6_0_set_clockgating_state, + .set_powergating_state = gmc_v6_0_set_powergating_state, +}; + +static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { + .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, + .set_pte_pde = gmc_v6_0_gart_set_pte_pde, +}; + +static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { + .set = gmc_v6_0_vm_fault_interrupt_state, + .process = gmc_v6_0_process_interrupt, +}; + +static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev) +{ + if (adev->gart.gart_funcs == NULL) + adev->gart.gart_funcs = &gmc_v6_0_gart_funcs; +} + +static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->mc.vm_fault.num_types = 1; + adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h new file mode 100644 index 000000000000..42c4fc676cd4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h @@ -0,0 +1,29 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GMC_V6_0_H__ +#define __GMC_V6_0_H__ + +extern const struct amd_ip_funcs gmc_v6_0_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0b0f08641eed..aa0c4b964621 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -183,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -203,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -239,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -393,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -953,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v7_0_mc_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 2aee2c6f3cd5..1b319f5bc696 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -261,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -269,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) /* Skip MC ucode loading on SR-IOV capable boards. * vbios does this for us in asic_init in that case. + * Skip MC ucode loading on VF, because hypervisor will do that + * for this adaptor. */ - if (adev->virtualization.supports_sr_iov) + if (amdgpu_sriov_bios(adev)) return 0; hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; @@ -287,11 +289,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -323,9 +320,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -477,7 +471,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -957,6 +951,11 @@ static int gmc_v8_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v8_0_mc_init(adev); if (r) return r; @@ -1100,9 +1099,8 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_check_soft_reset(void *handle) { - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1117,13 +1115,42 @@ static int gmc_v8_0_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } - if (srbm_soft_reset) { - gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle((void *)adev)) { - dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; + adev->mc.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; + adev->mc.srbm_soft_reset = 0; + } + return 0; +} +static int gmc_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_stop(adev, &adev->mc.save); + if (gmc_v8_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + return 0; +} + +static int gmc_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + srbm_soft_reset = adev->mc.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1139,14 +1166,22 @@ static int gmc_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); } return 0; } +static int gmc_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_resume(adev, &adev->mc.save); + return 0; +} + static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -1414,7 +1449,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .resume = gmc_v8_0_resume, .is_idle = gmc_v8_0_is_idle, .wait_for_idle = gmc_v8_0_wait_for_idle, + .check_soft_reset = gmc_v8_0_check_soft_reset, + .pre_soft_reset = gmc_v8_0_pre_soft_reset, .soft_reset = gmc_v8_0_soft_reset, + .post_soft_reset = gmc_v8_0_post_soft_reset, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c deleted file mode 100644 index 2f078ad6095c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "iceland_smum.h" - -MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); - -static void iceland_dpm_set_funcs(struct amdgpu_device *adev); - -static int iceland_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - iceland_dpm_set_funcs(adev); - - return 0; -} - -static int iceland_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/topaz_smc.bin"; - int err; - - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int iceland_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = iceland_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int iceland_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int iceland_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = iceland_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = iceland_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int iceland_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - iceland_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int iceland_dpm_suspend(void *handle) -{ - return 0; -} - -static int iceland_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - ret = iceland_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - -fail: - mutex_unlock(&adev->pm.mutex); - return ret; -} - -static int iceland_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int iceland_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs iceland_dpm_ip_funcs = { - .name = "iceland_dpm", - .early_init = iceland_dpm_early_init, - .late_init = NULL, - .sw_init = iceland_dpm_sw_init, - .sw_fini = iceland_dpm_sw_fini, - .hw_init = iceland_dpm_hw_init, - .hw_fini = iceland_dpm_hw_fini, - .suspend = iceland_dpm_suspend, - .resume = iceland_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = iceland_dpm_set_clockgating_state, - .set_powergating_state = iceland_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs iceland_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void iceland_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &iceland_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c deleted file mode 100644 index 211839913728..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ /dev/null @@ -1,677 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "ppsmc.h" -#include "iceland_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_1_d.h" -#include "smu/smu_7_1_1_sh_mask.h" - -#define ICELAND_SMC_SIZE 0x20000 - -static int iceland_set_smc_sram_address(struct amdgpu_device *adev, - uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev, - uint32_t smc_start_address, - const uint8_t *src, - uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = iceland_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = iceland_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = iceland_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -void iceland_start_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); -} - -void iceland_reset_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); -} - -static int iceland_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -void iceland_stop_smc_clock(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); -} - -void iceland_start_smc_clock(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); -} - -static bool iceland_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - WREG32(mmSMC_MSG_ARG_0, parameter); - - return iceland_send_msg_to_smc(adev, msg); -} - -static int iceland_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - WREG32(mmSMC_MSG_ARG_0, parameter); - - return iceland_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!iceland_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t data; - unsigned long flags; - int i; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (adev->virtualization.supports_sr_iov) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > ICELAND_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0) - break; - udelay(1); - } - val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL); - WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1); - - iceland_stop_smc_clock(adev); - iceland_reset_smc(adev); - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - while (byte_count >= 4) { - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - WREG32(mmSMC_IND_DATA_0, data); - src += 4; - byte_count -= 4; - } - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int iceland_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = iceland_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int iceland_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = iceland_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int iceland_smu_stop_smc(struct amdgpu_device *adev) -{ - iceland_reset_smc(adev); - iceland_stop_smc_clock(adev); - - return 0; -} -#endif - -static int iceland_smu_start_smc(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - iceland_program_jump_on_start(adev); - iceland_start_smc_clock(adev); - iceland_start_smc(adev); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1) - break; - udelay(1); - } - return 0; -} - -static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC2; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - entry->flags = 0; - - return 0; -} - -static int iceland_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK | - UCODE_ID_CP_MEC_JT1_MASK; - - - if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -int iceland_smu_start(struct amdgpu_device *adev) -{ - int result; - - result = iceland_smu_upload_firmware_image(adev); - if (result) - return result; - result = iceland_smu_start_smc(adev); - if (result) - return result; - - return iceland_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = { - .check_fw_load_finish = iceland_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int iceland_smu_init(struct amdgpu_device *adev) -{ - struct iceland_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - adev->smu.smumgr_funcs = &iceland_smumgr_funcs; - - return 0; -} - -int iceland_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a845e883f5fa..f8618a3881a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (amdgpu_bapm == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h new file mode 100644 index 000000000000..055321f61ca7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h @@ -0,0 +1,127 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __R600_DPM_H__ +#define __R600_DPM_H__ + +#define R600_ASI_DFLT 10000 +#define R600_BSP_DFLT 0x41EB +#define R600_BSU_DFLT 0x2 +#define R600_AH_DFLT 5 +#define R600_RLP_DFLT 25 +#define R600_RMP_DFLT 65 +#define R600_LHP_DFLT 40 +#define R600_LMP_DFLT 15 +#define R600_TD_DFLT 0 +#define R600_UTC_DFLT_00 0x24 +#define R600_UTC_DFLT_01 0x22 +#define R600_UTC_DFLT_02 0x22 +#define R600_UTC_DFLT_03 0x22 +#define R600_UTC_DFLT_04 0x22 +#define R600_UTC_DFLT_05 0x22 +#define R600_UTC_DFLT_06 0x22 +#define R600_UTC_DFLT_07 0x22 +#define R600_UTC_DFLT_08 0x22 +#define R600_UTC_DFLT_09 0x22 +#define R600_UTC_DFLT_10 0x22 +#define R600_UTC_DFLT_11 0x22 +#define R600_UTC_DFLT_12 0x22 +#define R600_UTC_DFLT_13 0x22 +#define R600_UTC_DFLT_14 0x22 +#define R600_DTC_DFLT_00 0x24 +#define R600_DTC_DFLT_01 0x22 +#define R600_DTC_DFLT_02 0x22 +#define R600_DTC_DFLT_03 0x22 +#define R600_DTC_DFLT_04 0x22 +#define R600_DTC_DFLT_05 0x22 +#define R600_DTC_DFLT_06 0x22 +#define R600_DTC_DFLT_07 0x22 +#define R600_DTC_DFLT_08 0x22 +#define R600_DTC_DFLT_09 0x22 +#define R600_DTC_DFLT_10 0x22 +#define R600_DTC_DFLT_11 0x22 +#define R600_DTC_DFLT_12 0x22 +#define R600_DTC_DFLT_13 0x22 +#define R600_DTC_DFLT_14 0x22 +#define R600_VRC_DFLT 0x0000C003 +#define R600_VOLTAGERESPONSETIME_DFLT 1000 +#define R600_BACKBIASRESPONSETIME_DFLT 1000 +#define R600_VRU_DFLT 0x3 +#define R600_SPLLSTEPTIME_DFLT 0x1000 +#define R600_SPLLSTEPUNIT_DFLT 0x3 +#define R600_TPU_DFLT 0 +#define R600_TPC_DFLT 0x200 +#define R600_SSTU_DFLT 0 +#define R600_SST_DFLT 0x00C8 +#define R600_GICST_DFLT 0x200 +#define R600_FCT_DFLT 0x0400 +#define R600_FCTU_DFLT 0 +#define R600_CTXCGTT3DRPHC_DFLT 0x20 +#define R600_CTXCGTT3DRSDC_DFLT 0x40 +#define R600_VDDC3DOORPHC_DFLT 0x100 +#define R600_VDDC3DOORSDC_DFLT 0x7 +#define R600_VDDC3DOORSU_DFLT 0 +#define R600_MPLLLOCKTIME_DFLT 100 +#define R600_MPLLRESETTIME_DFLT 150 +#define R600_VCOSTEPPCT_DFLT 20 +#define R600_ENDINGVCOSTEPPCT_DFLT 5 +#define R600_REFERENCEDIVIDER_DFLT 4 + +#define R600_PM_NUMBER_OF_TC 15 +#define R600_PM_NUMBER_OF_SCLKS 20 +#define R600_PM_NUMBER_OF_MCLKS 4 +#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4 +#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3 + +/* XXX are these ok? */ +#define R600_TEMP_RANGE_MIN (90 * 1000) +#define R600_TEMP_RANGE_MAX (120 * 1000) + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +enum r600_power_level { + R600_POWER_LEVEL_LOW = 0, + R600_POWER_LEVEL_MEDIUM = 1, + R600_POWER_LEVEL_HIGH = 2, + R600_POWER_LEVEL_CTXSW = 3, +}; + +enum r600_td { + R600_TD_AUTO, + R600_TD_UP, + R600_TD_DOWN, +}; + +enum r600_display_watermark { + R600_DISPLAY_WATERMARK_LOW = 0, + R600_DISPLAY_WATERMARK_HIGH = 1, +}; + +enum r600_display_gap +{ + R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, + R600_PM_DISPLAY_GAP_VBLANK = 1, + R600_PM_DISPLAY_GAP_WATERMARK = 2, + R600_PM_DISPLAY_GAP_IGNORE = 3, +}; +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index a64715d90503..565dab3c7218 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -190,12 +190,8 @@ out: */ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) { - u32 rptr; - /* XXX check if swapping is necessary on BE */ - rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs] >> 2; } /** @@ -749,24 +745,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -774,39 +762,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -822,40 +798,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** @@ -945,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } +static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 7 + 6; /* sdma_v2_4_ring_emit_ib */ +} + +static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 6 + /* sdma_v2_4_ring_emit_hdp_flush */ + 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ + 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ + 12 + /* sdma_v2_4_ring_emit_vm_flush */ + 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ +} + static int sdma_v2_4_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1263,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { .test_ib = sdma_v2_4_ring_test_ib, .insert_nop = sdma_v2_4_ring_insert_nop, .pad_ib = sdma_v2_4_ring_pad_ib, + .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size, + .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size, }; static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 653ce5ed55ae..f325fd86430b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -335,12 +335,8 @@ out: */ static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) { - u32 rptr; - /* XXX check if swapping is necessary on BE */ - rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs] >> 2; } /** @@ -499,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); } -unsigned init_cond_exec(struct amdgpu_ring *ring) -{ - unsigned ret; - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE)); - amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); - amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); - amdgpu_ring_write(ring, 1); - ret = ring->wptr;/* this is the offset we need patch later */ - amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ - return ret; -} - -void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) -{ - unsigned cur; - BUG_ON(ring->ring[offset] != 0x55aa55aa); - - cur = ring->wptr - 1; - if (likely(cur > offset)) - ring->ring[offset] = cur - offset; - else - ring->ring[offset] = (ring->ring_size>>2) - offset + cur; -} - - /** * sdma_v3_0_gfx_stop - stop the gfx async dma engines * @@ -976,24 +947,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -1001,39 +964,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } +static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -1049,40 +1000,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** @@ -1172,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ } +static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 7 + 6; /* sdma_v3_0_ring_emit_ib */ +} + +static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 6 + /* sdma_v3_0_ring_emit_hdp_flush */ + 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ + 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ + 12 + /* sdma_v3_0_ring_emit_vm_flush */ + 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ +} + static int sdma_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1320,28 +1268,79 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || + (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; + adev->sdma.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; + adev->sdma.srbm_soft_reset = 0; + } + + return 0; +} + +static int sdma_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); + } + + return 0; +} + +static int sdma_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_gfx_resume(adev); + sdma_v3_0_rlc_resume(adev); + } + + return 0; +} + +static int sdma_v3_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1559,6 +1558,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .resume = sdma_v3_0_resume, .is_idle = sdma_v3_0_is_idle, .wait_for_idle = sdma_v3_0_wait_for_idle, + .check_soft_reset = sdma_v3_0_check_soft_reset, + .pre_soft_reset = sdma_v3_0_pre_soft_reset, + .post_soft_reset = sdma_v3_0_post_soft_reset, .soft_reset = sdma_v3_0_soft_reset, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, @@ -1579,6 +1581,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { .test_ib = sdma_v3_0_ring_test_ib, .insert_nop = sdma_v3_0_ring_insert_nop, .pad_ib = sdma_v3_0_ring_pad_ib, + .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size, + .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size, }; static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c new file mode 100644 index 000000000000..dc9511c5ecb8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -0,0 +1,1965 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/module.h> +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_atombios.h" +#include "amdgpu_ih.h" +#include "amdgpu_uvd.h" +#include "amdgpu_vce.h" +#include "atom.h" +#include "amdgpu_powerplay.h" +#include "si/sid.h" +#include "si_ih.h" +#include "gfx_v6_0.h" +#include "gmc_v6_0.h" +#include "si_dma.h" +#include "dce_v6_0.h" +#include "si.h" + +static const u32 tahiti_golden_registers[] = +{ + 0x2684, 0x00010000, 0x00018208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x031e, 0x00000080, 0x00000000, + 0x340c, 0x000300c0, 0x00800040, + 0x360c, 0x000300c0, 0x00800040, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0x00200000, 0x50100000, + 0x1c0c, 0x31000311, 0x00000011, + 0x09df, 0x00000003, 0x000007ff, + 0x0903, 0x000007ff, 0x00000000, + 0x2285, 0xf000001f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ffffff, + 0x22c4, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x4e000000, + 0xa0d4, 0x3f3f3fff, 0x2a00126a, + 0x000c, 0x000000ff, 0x0040, + 0x000d, 0x00000040, 0x00004040, + 0x2440, 0x07ffffff, 0x03000000, + 0x23a2, 0x01ff1f3f, 0x00000000, + 0x23a1, 0x01ff1f3f, 0x00000000, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b05, 0x00000200, 0x000002fb, + 0x2b04, 0xffffffff, 0x0000543b, + 0x2b03, 0xffffffff, 0xa9210876, + 0x2234, 0xffffffff, 0x000fff40, + 0x2235, 0x0000001f, 0x00000010, + 0x0504, 0x20000000, 0x20fffed8, + 0x0570, 0x000c0fc0, 0x000c0400 +}; + +static const u32 tahiti_golden_registers2[] = +{ + 0x0319, 0x00000001, 0x00000001 +}; + +static const u32 tahiti_golden_rlc_registers[] = +{ + 0x3109, 0xffffffff, 0x00601005, + 0x311f, 0xffffffff, 0x10104040, + 0x3122, 0xffffffff, 0x0100000a, + 0x30c5, 0xffffffff, 0x00000800, + 0x30c3, 0xffffffff, 0x800000f4, + 0x3d2a, 0xffffffff, 0x00000000 +}; + +static const u32 pitcairn_golden_registers[] = +{ + 0x2684, 0x00010000, 0x00018208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x031e, 0x00000080, 0x00000000, + 0x340c, 0x000300c0, 0x00800040, + 0x360c, 0x000300c0, 0x00800040, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0x00200000, 0x50100000, + 0x1c0c, 0x31000311, 0x00000011, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0903, 0x000007ff, 0x00000000, + 0x2285, 0xf000001f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ffffff, + 0x22c4, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x4e000000, + 0xa0d4, 0x3f3f3fff, 0x2a00126a, + 0x000c, 0x000000ff, 0x0040, + 0x000d, 0x00000040, 0x00004040, + 0x2440, 0x07ffffff, 0x03000000, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b05, 0x000003ff, 0x000000f7, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b03, 0xffffffff, 0x32761054, + 0x2235, 0x0000001f, 0x00000010, + 0x0570, 0x000c0fc0, 0x000c0400 +}; + +static const u32 pitcairn_golden_rlc_registers[] = +{ + 0x3109, 0xffffffff, 0x00601004, + 0x311f, 0xffffffff, 0x10102020, + 0x3122, 0xffffffff, 0x01000020, + 0x30c5, 0xffffffff, 0x00000800, + 0x30c3, 0xffffffff, 0x800000a4 +}; + +static const u32 verde_pg_init[] = +{ + 0xd4f, 0xffffffff, 0x40000, + 0xd4e, 0xffffffff, 0x200010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x7007, + 0xd4e, 0xffffffff, 0x300010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x400000, + 0xd4e, 0xffffffff, 0x100010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x120200, + 0xd4e, 0xffffffff, 0x500010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x1e1e16, + 0xd4e, 0xffffffff, 0x600010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x171f1e, + 0xd4e, 0xffffffff, 0x700010ff, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4f, 0xffffffff, 0x0, + 0xd4e, 0xffffffff, 0x9ff, + 0xd40, 0xffffffff, 0x0, + 0xd41, 0xffffffff, 0x10000800, + 0xd41, 0xffffffff, 0xf, + 0xd41, 0xffffffff, 0xf, + 0xd40, 0xffffffff, 0x4, + 0xd41, 0xffffffff, 0x1000051e, + 0xd41, 0xffffffff, 0xffff, + 0xd41, 0xffffffff, 0xffff, + 0xd40, 0xffffffff, 0x8, + 0xd41, 0xffffffff, 0x80500, + 0xd40, 0xffffffff, 0x12, + 0xd41, 0xffffffff, 0x9050c, + 0xd40, 0xffffffff, 0x1d, + 0xd41, 0xffffffff, 0xb052c, + 0xd40, 0xffffffff, 0x2a, + 0xd41, 0xffffffff, 0x1053e, + 0xd40, 0xffffffff, 0x2d, + 0xd41, 0xffffffff, 0x10546, + 0xd40, 0xffffffff, 0x30, + 0xd41, 0xffffffff, 0xa054e, + 0xd40, 0xffffffff, 0x3c, + 0xd41, 0xffffffff, 0x1055f, + 0xd40, 0xffffffff, 0x3f, + 0xd41, 0xffffffff, 0x10567, + 0xd40, 0xffffffff, 0x42, + 0xd41, 0xffffffff, 0x1056f, + 0xd40, 0xffffffff, 0x45, + 0xd41, 0xffffffff, 0x10572, + 0xd40, 0xffffffff, 0x48, + 0xd41, 0xffffffff, 0x20575, + 0xd40, 0xffffffff, 0x4c, + 0xd41, 0xffffffff, 0x190801, + 0xd40, 0xffffffff, 0x67, + 0xd41, 0xffffffff, 0x1082a, + 0xd40, 0xffffffff, 0x6a, + 0xd41, 0xffffffff, 0x1b082d, + 0xd40, 0xffffffff, 0x87, + 0xd41, 0xffffffff, 0x310851, + 0xd40, 0xffffffff, 0xba, + 0xd41, 0xffffffff, 0x891, + 0xd40, 0xffffffff, 0xbc, + 0xd41, 0xffffffff, 0x893, + 0xd40, 0xffffffff, 0xbe, + 0xd41, 0xffffffff, 0x20895, + 0xd40, 0xffffffff, 0xc2, + 0xd41, 0xffffffff, 0x20899, + 0xd40, 0xffffffff, 0xc6, + 0xd41, 0xffffffff, 0x2089d, + 0xd40, 0xffffffff, 0xca, + 0xd41, 0xffffffff, 0x8a1, + 0xd40, 0xffffffff, 0xcc, + 0xd41, 0xffffffff, 0x8a3, + 0xd40, 0xffffffff, 0xce, + 0xd41, 0xffffffff, 0x308a5, + 0xd40, 0xffffffff, 0xd3, + 0xd41, 0xffffffff, 0x6d08cd, + 0xd40, 0xffffffff, 0x142, + 0xd41, 0xffffffff, 0x2000095a, + 0xd41, 0xffffffff, 0x1, + 0xd40, 0xffffffff, 0x144, + 0xd41, 0xffffffff, 0x301f095b, + 0xd40, 0xffffffff, 0x165, + 0xd41, 0xffffffff, 0xc094d, + 0xd40, 0xffffffff, 0x173, + 0xd41, 0xffffffff, 0xf096d, + 0xd40, 0xffffffff, 0x184, + 0xd41, 0xffffffff, 0x15097f, + 0xd40, 0xffffffff, 0x19b, + 0xd41, 0xffffffff, 0xc0998, + 0xd40, 0xffffffff, 0x1a9, + 0xd41, 0xffffffff, 0x409a7, + 0xd40, 0xffffffff, 0x1af, + 0xd41, 0xffffffff, 0xcdc, + 0xd40, 0xffffffff, 0x1b1, + 0xd41, 0xffffffff, 0x800, + 0xd42, 0xffffffff, 0x6c9b2000, + 0xd44, 0xfc00, 0x2000, + 0xd51, 0xffffffff, 0xfc0, + 0xa35, 0x00000100, 0x100 +}; + +static const u32 verde_golden_rlc_registers[] = +{ + 0x3109, 0xffffffff, 0x033f1005, + 0x311f, 0xffffffff, 0x10808020, + 0x3122, 0xffffffff, 0x00800008, + 0x30c5, 0xffffffff, 0x00001000, + 0x30c3, 0xffffffff, 0x80010014 +}; + +static const u32 verde_golden_registers[] = +{ + 0x2684, 0x00010000, 0x00018208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x031e, 0x00000080, 0x00000000, + 0x340c, 0x000300c0, 0x00800040, + 0x340c, 0x000300c0, 0x00800040, + 0x360c, 0x000300c0, 0x00800040, + 0x360c, 0x000300c0, 0x00800040, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f0, 0x00200000, 0x50100000, + + 0x1c0c, 0x31000311, 0x00000011, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0903, 0x000007ff, 0x00000000, + 0x0903, 0x000007ff, 0x00000000, + 0x0903, 0x000007ff, 0x00000000, + 0x2285, 0xf000001f, 0x00000007, + 0x2285, 0xf000001f, 0x00000007, + 0x2285, 0xf000001f, 0x00000007, + 0x2285, 0xffffffff, 0x00ffffff, + 0x22c4, 0x0000ff0f, 0x00000000, + + 0xa293, 0x07ffffff, 0x4e000000, + 0xa0d4, 0x3f3f3fff, 0x0000124a, + 0xa0d4, 0x3f3f3fff, 0x0000124a, + 0xa0d4, 0x3f3f3fff, 0x0000124a, + 0x000c, 0x000000ff, 0x0040, + 0x000d, 0x00000040, 0x00004040, + 0x2440, 0x07ffffff, 0x03000000, + 0x2440, 0x07ffffff, 0x03000000, + 0x23a2, 0x01ff1f3f, 0x00000000, + 0x23a3, 0x01ff1f3f, 0x00000000, + 0x23a2, 0x01ff1f3f, 0x00000000, + 0x23a1, 0x01ff1f3f, 0x00000000, + 0x23a1, 0x01ff1f3f, 0x00000000, + + 0x23a1, 0x01ff1f3f, 0x00000000, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b01, 0x000003ff, 0x00000003, + 0x2b05, 0x000003ff, 0x00000003, + 0x2b05, 0x000003ff, 0x00000003, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b03, 0xffffffff, 0x00001032, + 0x2b03, 0xffffffff, 0x00001032, + 0x2b03, 0xffffffff, 0x00001032, + 0x2235, 0x0000001f, 0x00000010, + 0x2235, 0x0000001f, 0x00000010, + 0x2235, 0x0000001f, 0x00000010, + 0x0570, 0x000c0fc0, 0x000c0400 +}; + +static const u32 oland_golden_registers[] = +{ + 0x2684, 0x00010000, 0x00018208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x031e, 0x00000080, 0x00000000, + 0x340c, 0x000300c0, 0x00800040, + 0x360c, 0x000300c0, 0x00800040, + 0x16ec, 0x000000f0, 0x00000070, + 0x16f9, 0x00200000, 0x50100000, + 0x1c0c, 0x31000311, 0x00000011, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0903, 0x000007ff, 0x00000000, + 0x2285, 0xf000001f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ffffff, + 0x22c4, 0x0000ff0f, 0x00000000, + 0xa293, 0x07ffffff, 0x4e000000, + 0xa0d4, 0x3f3f3fff, 0x00000082, + 0x000c, 0x000000ff, 0x0040, + 0x000d, 0x00000040, 0x00004040, + 0x2440, 0x07ffffff, 0x03000000, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b05, 0x000003ff, 0x000000f3, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b03, 0xffffffff, 0x00003210, + 0x2235, 0x0000001f, 0x00000010, + 0x0570, 0x000c0fc0, 0x000c0400 +}; + +static const u32 oland_golden_rlc_registers[] = +{ + 0x3109, 0xffffffff, 0x00601005, + 0x311f, 0xffffffff, 0x10104040, + 0x3122, 0xffffffff, 0x0100000a, + 0x30c5, 0xffffffff, 0x00000800, + 0x30c3, 0xffffffff, 0x800000f4 +}; + +static const u32 hainan_golden_registers[] = +{ + 0x2684, 0x00010000, 0x00018208, + 0x260c, 0xffffffff, 0x00000000, + 0x260d, 0xf00fffff, 0x00000400, + 0x260e, 0x0002021c, 0x00020200, + 0x4595, 0xff000fff, 0x00000100, + 0x340c, 0x000300c0, 0x00800040, + 0x3630, 0xff000fff, 0x00000100, + 0x360c, 0x000300c0, 0x00800040, + 0x0ab9, 0x00073ffe, 0x000022a2, + 0x0903, 0x000007ff, 0x00000000, + 0x2285, 0xf000001f, 0x00000007, + 0x22c9, 0xffffffff, 0x00ffffff, + 0x22c4, 0x0000ff0f, 0x00000000, + 0xa393, 0x07ffffff, 0x4e000000, + 0xa0d4, 0x3f3f3fff, 0x00000000, + 0x000c, 0x000000ff, 0x0040, + 0x000d, 0x00000040, 0x00004040, + 0x2440, 0x03e00000, 0x03600000, + 0x2418, 0x0000007f, 0x00000020, + 0x2542, 0x00010000, 0x00010000, + 0x2b05, 0x000003ff, 0x000000f1, + 0x2b04, 0xffffffff, 0x00000000, + 0x2b03, 0xffffffff, 0x00003210, + 0x2235, 0x0000001f, 0x00000010, + 0x0570, 0x000c0fc0, 0x000c0400 +}; + +static const u32 hainan_golden_registers2[] = +{ + 0x263e, 0xffffffff, 0x02010001 +}; + +static const u32 tahiti_mgcg_cgcg_init[] = +{ + 0x3100, 0xffffffff, 0xfffffffc, + 0x200b, 0xffffffff, 0xe0000000, + 0x2698, 0xffffffff, 0x00000100, + 0x24a9, 0xffffffff, 0x00000100, + 0x3059, 0xffffffff, 0x00000100, + 0x25dd, 0xffffffff, 0x00000100, + 0x2261, 0xffffffff, 0x06000100, + 0x2286, 0xffffffff, 0x00000100, + 0x24a8, 0xffffffff, 0x00000100, + 0x30e0, 0xffffffff, 0x00000100, + 0x22ca, 0xffffffff, 0x00000100, + 0x2451, 0xffffffff, 0x00000100, + 0x2362, 0xffffffff, 0x00000100, + 0x2363, 0xffffffff, 0x00000100, + 0x240c, 0xffffffff, 0x00000100, + 0x240d, 0xffffffff, 0x00000100, + 0x240e, 0xffffffff, 0x00000100, + 0x240f, 0xffffffff, 0x00000100, + 0x2b60, 0xffffffff, 0x00000100, + 0x2b15, 0xffffffff, 0x00000100, + 0x225f, 0xffffffff, 0x06000100, + 0x261a, 0xffffffff, 0x00000100, + 0x2544, 0xffffffff, 0x00000100, + 0x2bc1, 0xffffffff, 0x00000100, + 0x2b81, 0xffffffff, 0x00000100, + 0x2527, 0xffffffff, 0x00000100, + 0x200b, 0xffffffff, 0xe0000000, + 0x2458, 0xffffffff, 0x00010000, + 0x2459, 0xffffffff, 0x00030002, + 0x245a, 0xffffffff, 0x00040007, + 0x245b, 0xffffffff, 0x00060005, + 0x245c, 0xffffffff, 0x00090008, + 0x245d, 0xffffffff, 0x00020001, + 0x245e, 0xffffffff, 0x00040003, + 0x245f, 0xffffffff, 0x00000007, + 0x2460, 0xffffffff, 0x00060005, + 0x2461, 0xffffffff, 0x00090008, + 0x2462, 0xffffffff, 0x00030002, + 0x2463, 0xffffffff, 0x00050004, + 0x2464, 0xffffffff, 0x00000008, + 0x2465, 0xffffffff, 0x00070006, + 0x2466, 0xffffffff, 0x000a0009, + 0x2467, 0xffffffff, 0x00040003, + 0x2468, 0xffffffff, 0x00060005, + 0x2469, 0xffffffff, 0x00000009, + 0x246a, 0xffffffff, 0x00080007, + 0x246b, 0xffffffff, 0x000b000a, + 0x246c, 0xffffffff, 0x00050004, + 0x246d, 0xffffffff, 0x00070006, + 0x246e, 0xffffffff, 0x0008000b, + 0x246f, 0xffffffff, 0x000a0009, + 0x2470, 0xffffffff, 0x000d000c, + 0x2471, 0xffffffff, 0x00060005, + 0x2472, 0xffffffff, 0x00080007, + 0x2473, 0xffffffff, 0x0000000b, + 0x2474, 0xffffffff, 0x000a0009, + 0x2475, 0xffffffff, 0x000d000c, + 0x2476, 0xffffffff, 0x00070006, + 0x2477, 0xffffffff, 0x00090008, + 0x2478, 0xffffffff, 0x0000000c, + 0x2479, 0xffffffff, 0x000b000a, + 0x247a, 0xffffffff, 0x000e000d, + 0x247b, 0xffffffff, 0x00080007, + 0x247c, 0xffffffff, 0x000a0009, + 0x247d, 0xffffffff, 0x0000000d, + 0x247e, 0xffffffff, 0x000c000b, + 0x247f, 0xffffffff, 0x000f000e, + 0x2480, 0xffffffff, 0x00090008, + 0x2481, 0xffffffff, 0x000b000a, + 0x2482, 0xffffffff, 0x000c000f, + 0x2483, 0xffffffff, 0x000e000d, + 0x2484, 0xffffffff, 0x00110010, + 0x2485, 0xffffffff, 0x000a0009, + 0x2486, 0xffffffff, 0x000c000b, + 0x2487, 0xffffffff, 0x0000000f, + 0x2488, 0xffffffff, 0x000e000d, + 0x2489, 0xffffffff, 0x00110010, + 0x248a, 0xffffffff, 0x000b000a, + 0x248b, 0xffffffff, 0x000d000c, + 0x248c, 0xffffffff, 0x00000010, + 0x248d, 0xffffffff, 0x000f000e, + 0x248e, 0xffffffff, 0x00120011, + 0x248f, 0xffffffff, 0x000c000b, + 0x2490, 0xffffffff, 0x000e000d, + 0x2491, 0xffffffff, 0x00000011, + 0x2492, 0xffffffff, 0x0010000f, + 0x2493, 0xffffffff, 0x00130012, + 0x2494, 0xffffffff, 0x000d000c, + 0x2495, 0xffffffff, 0x000f000e, + 0x2496, 0xffffffff, 0x00100013, + 0x2497, 0xffffffff, 0x00120011, + 0x2498, 0xffffffff, 0x00150014, + 0x2499, 0xffffffff, 0x000e000d, + 0x249a, 0xffffffff, 0x0010000f, + 0x249b, 0xffffffff, 0x00000013, + 0x249c, 0xffffffff, 0x00120011, + 0x249d, 0xffffffff, 0x00150014, + 0x249e, 0xffffffff, 0x000f000e, + 0x249f, 0xffffffff, 0x00110010, + 0x24a0, 0xffffffff, 0x00000014, + 0x24a1, 0xffffffff, 0x00130012, + 0x24a2, 0xffffffff, 0x00160015, + 0x24a3, 0xffffffff, 0x0010000f, + 0x24a4, 0xffffffff, 0x00120011, + 0x24a5, 0xffffffff, 0x00000015, + 0x24a6, 0xffffffff, 0x00140013, + 0x24a7, 0xffffffff, 0x00170016, + 0x2454, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x311e, 0xffffffff, 0x00000080, + 0x3101, 0xffffffff, 0x0020003f, + 0xc, 0xffffffff, 0x0000001c, + 0xd, 0x000f0000, 0x000f0000, + 0x583, 0xffffffff, 0x00000100, + 0x409, 0xffffffff, 0x00000100, + 0x40b, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x993, 0x000c0000, 0x000c0000, + 0x992, 0x000c0000, 0x000c0000, + 0x1579, 0xff000fff, 0x00000100, + 0x157a, 0x00000001, 0x00000001, + 0xbd4, 0x00000001, 0x00000001, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3430, 0xfffffff0, 0x00000100, + 0x3630, 0xfffffff0, 0x00000100 +}; +static const u32 pitcairn_mgcg_cgcg_init[] = +{ + 0x3100, 0xffffffff, 0xfffffffc, + 0x200b, 0xffffffff, 0xe0000000, + 0x2698, 0xffffffff, 0x00000100, + 0x24a9, 0xffffffff, 0x00000100, + 0x3059, 0xffffffff, 0x00000100, + 0x25dd, 0xffffffff, 0x00000100, + 0x2261, 0xffffffff, 0x06000100, + 0x2286, 0xffffffff, 0x00000100, + 0x24a8, 0xffffffff, 0x00000100, + 0x30e0, 0xffffffff, 0x00000100, + 0x22ca, 0xffffffff, 0x00000100, + 0x2451, 0xffffffff, 0x00000100, + 0x2362, 0xffffffff, 0x00000100, + 0x2363, 0xffffffff, 0x00000100, + 0x240c, 0xffffffff, 0x00000100, + 0x240d, 0xffffffff, 0x00000100, + 0x240e, 0xffffffff, 0x00000100, + 0x240f, 0xffffffff, 0x00000100, + 0x2b60, 0xffffffff, 0x00000100, + 0x2b15, 0xffffffff, 0x00000100, + 0x225f, 0xffffffff, 0x06000100, + 0x261a, 0xffffffff, 0x00000100, + 0x2544, 0xffffffff, 0x00000100, + 0x2bc1, 0xffffffff, 0x00000100, + 0x2b81, 0xffffffff, 0x00000100, + 0x2527, 0xffffffff, 0x00000100, + 0x200b, 0xffffffff, 0xe0000000, + 0x2458, 0xffffffff, 0x00010000, + 0x2459, 0xffffffff, 0x00030002, + 0x245a, 0xffffffff, 0x00040007, + 0x245b, 0xffffffff, 0x00060005, + 0x245c, 0xffffffff, 0x00090008, + 0x245d, 0xffffffff, 0x00020001, + 0x245e, 0xffffffff, 0x00040003, + 0x245f, 0xffffffff, 0x00000007, + 0x2460, 0xffffffff, 0x00060005, + 0x2461, 0xffffffff, 0x00090008, + 0x2462, 0xffffffff, 0x00030002, + 0x2463, 0xffffffff, 0x00050004, + 0x2464, 0xffffffff, 0x00000008, + 0x2465, 0xffffffff, 0x00070006, + 0x2466, 0xffffffff, 0x000a0009, + 0x2467, 0xffffffff, 0x00040003, + 0x2468, 0xffffffff, 0x00060005, + 0x2469, 0xffffffff, 0x00000009, + 0x246a, 0xffffffff, 0x00080007, + 0x246b, 0xffffffff, 0x000b000a, + 0x246c, 0xffffffff, 0x00050004, + 0x246d, 0xffffffff, 0x00070006, + 0x246e, 0xffffffff, 0x0008000b, + 0x246f, 0xffffffff, 0x000a0009, + 0x2470, 0xffffffff, 0x000d000c, + 0x2480, 0xffffffff, 0x00090008, + 0x2481, 0xffffffff, 0x000b000a, + 0x2482, 0xffffffff, 0x000c000f, + 0x2483, 0xffffffff, 0x000e000d, + 0x2484, 0xffffffff, 0x00110010, + 0x2485, 0xffffffff, 0x000a0009, + 0x2486, 0xffffffff, 0x000c000b, + 0x2487, 0xffffffff, 0x0000000f, + 0x2488, 0xffffffff, 0x000e000d, + 0x2489, 0xffffffff, 0x00110010, + 0x248a, 0xffffffff, 0x000b000a, + 0x248b, 0xffffffff, 0x000d000c, + 0x248c, 0xffffffff, 0x00000010, + 0x248d, 0xffffffff, 0x000f000e, + 0x248e, 0xffffffff, 0x00120011, + 0x248f, 0xffffffff, 0x000c000b, + 0x2490, 0xffffffff, 0x000e000d, + 0x2491, 0xffffffff, 0x00000011, + 0x2492, 0xffffffff, 0x0010000f, + 0x2493, 0xffffffff, 0x00130012, + 0x2494, 0xffffffff, 0x000d000c, + 0x2495, 0xffffffff, 0x000f000e, + 0x2496, 0xffffffff, 0x00100013, + 0x2497, 0xffffffff, 0x00120011, + 0x2498, 0xffffffff, 0x00150014, + 0x2454, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x311e, 0xffffffff, 0x00000080, + 0x3101, 0xffffffff, 0x0020003f, + 0xc, 0xffffffff, 0x0000001c, + 0xd, 0x000f0000, 0x000f0000, + 0x583, 0xffffffff, 0x00000100, + 0x409, 0xffffffff, 0x00000100, + 0x40b, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x1579, 0xff000fff, 0x00000100, + 0x157a, 0x00000001, 0x00000001, + 0xbd4, 0x00000001, 0x00000001, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3430, 0xfffffff0, 0x00000100, + 0x3630, 0xfffffff0, 0x00000100 +}; +static const u32 verde_mgcg_cgcg_init[] = +{ + 0x3100, 0xffffffff, 0xfffffffc, + 0x200b, 0xffffffff, 0xe0000000, + 0x2698, 0xffffffff, 0x00000100, + 0x24a9, 0xffffffff, 0x00000100, + 0x3059, 0xffffffff, 0x00000100, + 0x25dd, 0xffffffff, 0x00000100, + 0x2261, 0xffffffff, 0x06000100, + 0x2286, 0xffffffff, 0x00000100, + 0x24a8, 0xffffffff, 0x00000100, + 0x30e0, 0xffffffff, 0x00000100, + 0x22ca, 0xffffffff, 0x00000100, + 0x2451, 0xffffffff, 0x00000100, + 0x2362, 0xffffffff, 0x00000100, + 0x2363, 0xffffffff, 0x00000100, + 0x240c, 0xffffffff, 0x00000100, + 0x240d, 0xffffffff, 0x00000100, + 0x240e, 0xffffffff, 0x00000100, + 0x240f, 0xffffffff, 0x00000100, + 0x2b60, 0xffffffff, 0x00000100, + 0x2b15, 0xffffffff, 0x00000100, + 0x225f, 0xffffffff, 0x06000100, + 0x261a, 0xffffffff, 0x00000100, + 0x2544, 0xffffffff, 0x00000100, + 0x2bc1, 0xffffffff, 0x00000100, + 0x2b81, 0xffffffff, 0x00000100, + 0x2527, 0xffffffff, 0x00000100, + 0x200b, 0xffffffff, 0xe0000000, + 0x2458, 0xffffffff, 0x00010000, + 0x2459, 0xffffffff, 0x00030002, + 0x245a, 0xffffffff, 0x00040007, + 0x245b, 0xffffffff, 0x00060005, + 0x245c, 0xffffffff, 0x00090008, + 0x245d, 0xffffffff, 0x00020001, + 0x245e, 0xffffffff, 0x00040003, + 0x245f, 0xffffffff, 0x00000007, + 0x2460, 0xffffffff, 0x00060005, + 0x2461, 0xffffffff, 0x00090008, + 0x2462, 0xffffffff, 0x00030002, + 0x2463, 0xffffffff, 0x00050004, + 0x2464, 0xffffffff, 0x00000008, + 0x2465, 0xffffffff, 0x00070006, + 0x2466, 0xffffffff, 0x000a0009, + 0x2467, 0xffffffff, 0x00040003, + 0x2468, 0xffffffff, 0x00060005, + 0x2469, 0xffffffff, 0x00000009, + 0x246a, 0xffffffff, 0x00080007, + 0x246b, 0xffffffff, 0x000b000a, + 0x246c, 0xffffffff, 0x00050004, + 0x246d, 0xffffffff, 0x00070006, + 0x246e, 0xffffffff, 0x0008000b, + 0x246f, 0xffffffff, 0x000a0009, + 0x2470, 0xffffffff, 0x000d000c, + 0x2480, 0xffffffff, 0x00090008, + 0x2481, 0xffffffff, 0x000b000a, + 0x2482, 0xffffffff, 0x000c000f, + 0x2483, 0xffffffff, 0x000e000d, + 0x2484, 0xffffffff, 0x00110010, + 0x2485, 0xffffffff, 0x000a0009, + 0x2486, 0xffffffff, 0x000c000b, + 0x2487, 0xffffffff, 0x0000000f, + 0x2488, 0xffffffff, 0x000e000d, + 0x2489, 0xffffffff, 0x00110010, + 0x248a, 0xffffffff, 0x000b000a, + 0x248b, 0xffffffff, 0x000d000c, + 0x248c, 0xffffffff, 0x00000010, + 0x248d, 0xffffffff, 0x000f000e, + 0x248e, 0xffffffff, 0x00120011, + 0x248f, 0xffffffff, 0x000c000b, + 0x2490, 0xffffffff, 0x000e000d, + 0x2491, 0xffffffff, 0x00000011, + 0x2492, 0xffffffff, 0x0010000f, + 0x2493, 0xffffffff, 0x00130012, + 0x2494, 0xffffffff, 0x000d000c, + 0x2495, 0xffffffff, 0x000f000e, + 0x2496, 0xffffffff, 0x00100013, + 0x2497, 0xffffffff, 0x00120011, + 0x2498, 0xffffffff, 0x00150014, + 0x2454, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x311e, 0xffffffff, 0x00000080, + 0x3101, 0xffffffff, 0x0020003f, + 0xc, 0xffffffff, 0x0000001c, + 0xd, 0x000f0000, 0x000f0000, + 0x583, 0xffffffff, 0x00000100, + 0x409, 0xffffffff, 0x00000100, + 0x40b, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x993, 0x000c0000, 0x000c0000, + 0x992, 0x000c0000, 0x000c0000, + 0x1579, 0xff000fff, 0x00000100, + 0x157a, 0x00000001, 0x00000001, + 0xbd4, 0x00000001, 0x00000001, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3430, 0xfffffff0, 0x00000100, + 0x3630, 0xfffffff0, 0x00000100 +}; +static const u32 oland_mgcg_cgcg_init[] = +{ + 0x3100, 0xffffffff, 0xfffffffc, + 0x200b, 0xffffffff, 0xe0000000, + 0x2698, 0xffffffff, 0x00000100, + 0x24a9, 0xffffffff, 0x00000100, + 0x3059, 0xffffffff, 0x00000100, + 0x25dd, 0xffffffff, 0x00000100, + 0x2261, 0xffffffff, 0x06000100, + 0x2286, 0xffffffff, 0x00000100, + 0x24a8, 0xffffffff, 0x00000100, + 0x30e0, 0xffffffff, 0x00000100, + 0x22ca, 0xffffffff, 0x00000100, + 0x2451, 0xffffffff, 0x00000100, + 0x2362, 0xffffffff, 0x00000100, + 0x2363, 0xffffffff, 0x00000100, + 0x240c, 0xffffffff, 0x00000100, + 0x240d, 0xffffffff, 0x00000100, + 0x240e, 0xffffffff, 0x00000100, + 0x240f, 0xffffffff, 0x00000100, + 0x2b60, 0xffffffff, 0x00000100, + 0x2b15, 0xffffffff, 0x00000100, + 0x225f, 0xffffffff, 0x06000100, + 0x261a, 0xffffffff, 0x00000100, + 0x2544, 0xffffffff, 0x00000100, + 0x2bc1, 0xffffffff, 0x00000100, + 0x2b81, 0xffffffff, 0x00000100, + 0x2527, 0xffffffff, 0x00000100, + 0x200b, 0xffffffff, 0xe0000000, + 0x2458, 0xffffffff, 0x00010000, + 0x2459, 0xffffffff, 0x00030002, + 0x245a, 0xffffffff, 0x00040007, + 0x245b, 0xffffffff, 0x00060005, + 0x245c, 0xffffffff, 0x00090008, + 0x245d, 0xffffffff, 0x00020001, + 0x245e, 0xffffffff, 0x00040003, + 0x245f, 0xffffffff, 0x00000007, + 0x2460, 0xffffffff, 0x00060005, + 0x2461, 0xffffffff, 0x00090008, + 0x2462, 0xffffffff, 0x00030002, + 0x2463, 0xffffffff, 0x00050004, + 0x2464, 0xffffffff, 0x00000008, + 0x2465, 0xffffffff, 0x00070006, + 0x2466, 0xffffffff, 0x000a0009, + 0x2467, 0xffffffff, 0x00040003, + 0x2468, 0xffffffff, 0x00060005, + 0x2469, 0xffffffff, 0x00000009, + 0x246a, 0xffffffff, 0x00080007, + 0x246b, 0xffffffff, 0x000b000a, + 0x246c, 0xffffffff, 0x00050004, + 0x246d, 0xffffffff, 0x00070006, + 0x246e, 0xffffffff, 0x0008000b, + 0x246f, 0xffffffff, 0x000a0009, + 0x2470, 0xffffffff, 0x000d000c, + 0x2471, 0xffffffff, 0x00060005, + 0x2472, 0xffffffff, 0x00080007, + 0x2473, 0xffffffff, 0x0000000b, + 0x2474, 0xffffffff, 0x000a0009, + 0x2475, 0xffffffff, 0x000d000c, + 0x2454, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x311e, 0xffffffff, 0x00000080, + 0x3101, 0xffffffff, 0x0020003f, + 0xc, 0xffffffff, 0x0000001c, + 0xd, 0x000f0000, 0x000f0000, + 0x583, 0xffffffff, 0x00000100, + 0x409, 0xffffffff, 0x00000100, + 0x40b, 0x00000101, 0x00000000, + 0x82a, 0xffffffff, 0x00000104, + 0x993, 0x000c0000, 0x000c0000, + 0x992, 0x000c0000, 0x000c0000, + 0x1579, 0xff000fff, 0x00000100, + 0x157a, 0x00000001, 0x00000001, + 0xbd4, 0x00000001, 0x00000001, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3430, 0xfffffff0, 0x00000100, + 0x3630, 0xfffffff0, 0x00000100 +}; +static const u32 hainan_mgcg_cgcg_init[] = +{ + 0x3100, 0xffffffff, 0xfffffffc, + 0x200b, 0xffffffff, 0xe0000000, + 0x2698, 0xffffffff, 0x00000100, + 0x24a9, 0xffffffff, 0x00000100, + 0x3059, 0xffffffff, 0x00000100, + 0x25dd, 0xffffffff, 0x00000100, + 0x2261, 0xffffffff, 0x06000100, + 0x2286, 0xffffffff, 0x00000100, + 0x24a8, 0xffffffff, 0x00000100, + 0x30e0, 0xffffffff, 0x00000100, + 0x22ca, 0xffffffff, 0x00000100, + 0x2451, 0xffffffff, 0x00000100, + 0x2362, 0xffffffff, 0x00000100, + 0x2363, 0xffffffff, 0x00000100, + 0x240c, 0xffffffff, 0x00000100, + 0x240d, 0xffffffff, 0x00000100, + 0x240e, 0xffffffff, 0x00000100, + 0x240f, 0xffffffff, 0x00000100, + 0x2b60, 0xffffffff, 0x00000100, + 0x2b15, 0xffffffff, 0x00000100, + 0x225f, 0xffffffff, 0x06000100, + 0x261a, 0xffffffff, 0x00000100, + 0x2544, 0xffffffff, 0x00000100, + 0x2bc1, 0xffffffff, 0x00000100, + 0x2b81, 0xffffffff, 0x00000100, + 0x2527, 0xffffffff, 0x00000100, + 0x200b, 0xffffffff, 0xe0000000, + 0x2458, 0xffffffff, 0x00010000, + 0x2459, 0xffffffff, 0x00030002, + 0x245a, 0xffffffff, 0x00040007, + 0x245b, 0xffffffff, 0x00060005, + 0x245c, 0xffffffff, 0x00090008, + 0x245d, 0xffffffff, 0x00020001, + 0x245e, 0xffffffff, 0x00040003, + 0x245f, 0xffffffff, 0x00000007, + 0x2460, 0xffffffff, 0x00060005, + 0x2461, 0xffffffff, 0x00090008, + 0x2462, 0xffffffff, 0x00030002, + 0x2463, 0xffffffff, 0x00050004, + 0x2464, 0xffffffff, 0x00000008, + 0x2465, 0xffffffff, 0x00070006, + 0x2466, 0xffffffff, 0x000a0009, + 0x2467, 0xffffffff, 0x00040003, + 0x2468, 0xffffffff, 0x00060005, + 0x2469, 0xffffffff, 0x00000009, + 0x246a, 0xffffffff, 0x00080007, + 0x246b, 0xffffffff, 0x000b000a, + 0x246c, 0xffffffff, 0x00050004, + 0x246d, 0xffffffff, 0x00070006, + 0x246e, 0xffffffff, 0x0008000b, + 0x246f, 0xffffffff, 0x000a0009, + 0x2470, 0xffffffff, 0x000d000c, + 0x2471, 0xffffffff, 0x00060005, + 0x2472, 0xffffffff, 0x00080007, + 0x2473, 0xffffffff, 0x0000000b, + 0x2474, 0xffffffff, 0x000a0009, + 0x2475, 0xffffffff, 0x000d000c, + 0x2454, 0xffffffff, 0x96940200, + 0x21c2, 0xffffffff, 0x00900100, + 0x311e, 0xffffffff, 0x00000080, + 0x3101, 0xffffffff, 0x0020003f, + 0xc, 0xffffffff, 0x0000001c, + 0xd, 0x000f0000, 0x000f0000, + 0x583, 0xffffffff, 0x00000100, + 0x409, 0xffffffff, 0x00000100, + 0x82a, 0xffffffff, 0x00000104, + 0x993, 0x000c0000, 0x000c0000, + 0x992, 0x000c0000, 0x000c0000, + 0xbd4, 0x00000001, 0x00000001, + 0xc33, 0xc0000fff, 0x00000104, + 0x3079, 0x00000001, 0x00000001, + 0x3430, 0xfffffff0, 0x00000100, + 0x3630, 0xfffffff0, 0x00000100 +}; + +static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(AMDGPU_PCIE_INDEX, reg); + (void)RREG32(AMDGPU_PCIE_INDEX); + r = RREG32(AMDGPU_PCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(AMDGPU_PCIE_INDEX, reg); + (void)RREG32(AMDGPU_PCIE_INDEX); + WREG32(AMDGPU_PCIE_DATA, v); + (void)RREG32(AMDGPU_PCIE_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); + (void)RREG32(PCIE_PORT_INDEX); + r = RREG32(PCIE_PORT_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); + (void)RREG32(PCIE_PORT_INDEX); + WREG32(PCIE_PORT_DATA, (v)); + (void)RREG32(PCIE_PORT_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(SMC_IND_INDEX_0, (reg)); + r = RREG32(SMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return r; +} + +static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(SMC_IND_INDEX_0, (reg)); + WREG32(SMC_IND_DATA_0, (v)); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); +} + +static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { + {GRBM_STATUS, false}, + {GB_ADDR_CONFIG, false}, + {MC_ARB_RAMCFG, false}, + {GB_TILE_MODE0, false}, + {GB_TILE_MODE1, false}, + {GB_TILE_MODE2, false}, + {GB_TILE_MODE3, false}, + {GB_TILE_MODE4, false}, + {GB_TILE_MODE5, false}, + {GB_TILE_MODE6, false}, + {GB_TILE_MODE7, false}, + {GB_TILE_MODE8, false}, + {GB_TILE_MODE9, false}, + {GB_TILE_MODE10, false}, + {GB_TILE_MODE11, false}, + {GB_TILE_MODE12, false}, + {GB_TILE_MODE13, false}, + {GB_TILE_MODE14, false}, + {GB_TILE_MODE15, false}, + {GB_TILE_MODE16, false}, + {GB_TILE_MODE17, false}, + {GB_TILE_MODE18, false}, + {GB_TILE_MODE19, false}, + {GB_TILE_MODE20, false}, + {GB_TILE_MODE21, false}, + {GB_TILE_MODE22, false}, + {GB_TILE_MODE23, false}, + {GB_TILE_MODE24, false}, + {GB_TILE_MODE25, false}, + {GB_TILE_MODE26, false}, + {GB_TILE_MODE27, false}, + {GB_TILE_MODE28, false}, + {GB_TILE_MODE29, false}, + {GB_TILE_MODE30, false}, + {GB_TILE_MODE31, false}, + {CC_RB_BACKEND_DISABLE, false, true}, + {GC_USER_RB_BACKEND_DISABLE, false, true}, + {PA_SC_RASTER_CONFIG, false, true}, +}; + +static uint32_t si_read_indexed_register(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, + u32 reg_offset) +{ + uint32_t val; + + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + + val = RREG32(reg_offset); + + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; +} + +static int si_read_register(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 reg_offset, u32 *value) +{ + uint32_t i; + + *value = 0; + for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) { + if (reg_offset != si_allowed_read_registers[i].reg_offset) + continue; + + if (!si_allowed_read_registers[i].untouched) + *value = si_allowed_read_registers[i].grbm_indexed ? + si_read_indexed_register(adev, se_num, + sh_num, reg_offset) : + RREG32(reg_offset); + return 0; + } + return -EINVAL; +} + +static bool si_read_disabled_bios(struct amdgpu_device *adev) +{ + u32 bus_cntl; + u32 d1vga_control = 0; + u32 d2vga_control = 0; + u32 vga_render_control = 0; + u32 rom_cntl; + bool r; + + bus_cntl = RREG32(R600_BUS_CNTL); + if (adev->mode_info.num_crtc) { + d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); + d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); + vga_render_control = RREG32(VGA_RENDER_CONTROL); + } + rom_cntl = RREG32(R600_ROM_CNTL); + + /* enable the rom */ + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); + if (adev->mode_info.num_crtc) { + /* Disable VGA mode */ + WREG32(AVIVO_D1VGA_CONTROL, + (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_D2VGA_CONTROL, + (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(VGA_RENDER_CONTROL, + (vga_render_control & C_000300_VGA_VSTATUS_CNTL)); + } + WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); + + r = amdgpu_read_bios(adev); + + /* restore regs */ + WREG32(R600_BUS_CNTL, bus_cntl); + if (adev->mode_info.num_crtc) { + WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); + WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); + WREG32(VGA_RENDER_CONTROL, vga_render_control); + } + WREG32(R600_ROM_CNTL, rom_cntl); + return r; +} + +//xxx: not implemented +static int si_asic_reset(struct amdgpu_device *adev) +{ + return 0; +} + +static void si_vga_set_state(struct amdgpu_device *adev, bool state) +{ + uint32_t temp; + + temp = RREG32(CONFIG_CNTL); + if (state == false) { + temp &= ~(1<<0); + temp |= (1<<1); + } else { + temp &= ~(1<<1); + } + WREG32(CONFIG_CNTL, temp); +} + +static u32 si_get_xclk(struct amdgpu_device *adev) +{ + u32 reference_clock = adev->clock.spll.reference_freq; + u32 tmp; + + tmp = RREG32(CG_CLKPIN_CNTL_2); + if (tmp & MUX_TCLK_TO_XCLK) + return TCLK; + + tmp = RREG32(CG_CLKPIN_CNTL); + if (tmp & XTALIN_DIVIDE) + return reference_clock / 4; + + return reference_clock; +} + +//xxx:not implemented +static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) +{ + return 0; +} + +static void si_detect_hw_virtualization(struct amdgpu_device *adev) +{ + if (is_virtual_machine()) /* passthrough mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; +} + +static const struct amdgpu_asic_funcs si_asic_funcs = +{ + .read_disabled_bios = &si_read_disabled_bios, + .detect_hw_virtualization = si_detect_hw_virtualization, + .read_register = &si_read_register, + .reset = &si_asic_reset, + .set_vga_state = &si_vga_set_state, + .get_xclk = &si_get_xclk, + .set_uvd_clocks = &si_set_uvd_clocks, + .set_vce_clocks = NULL, +}; + +static uint32_t si_get_rev_id(struct amdgpu_device *adev) +{ + return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) + >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; +} + +static int si_common_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->smc_rreg = &si_smc_rreg; + adev->smc_wreg = &si_smc_wreg; + adev->pcie_rreg = &si_pcie_rreg; + adev->pcie_wreg = &si_pcie_wreg; + adev->pciep_rreg = &si_pciep_rreg; + adev->pciep_wreg = &si_pciep_wreg; + adev->uvd_ctx_rreg = NULL; + adev->uvd_ctx_wreg = NULL; + adev->didt_rreg = NULL; + adev->didt_wreg = NULL; + + adev->asic_funcs = &si_asic_funcs; + + adev->rev_id = si_get_rev_id(adev); + adev->external_rev_id = 0xFF; + switch (adev->asic_type) { + case CHIP_TAHITI: + adev->cg_flags = + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + break; + case CHIP_PITCAIRN: + adev->cg_flags = + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + break; + + case CHIP_VERDE: + adev->cg_flags = + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + //??? + adev->external_rev_id = adev->rev_id + 0x14; + break; + case CHIP_OLAND: + adev->cg_flags = + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + break; + case CHIP_HAINAN: + adev->cg_flags = + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; + adev->pg_flags = 0; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int si_common_sw_init(void *handle) +{ + return 0; +} + +static int si_common_sw_fini(void *handle) +{ + return 0; +} + + +static void si_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TAHITI: + amdgpu_program_register_sequence(adev, + tahiti_golden_registers, + (const u32)ARRAY_SIZE(tahiti_golden_registers)); + amdgpu_program_register_sequence(adev, + tahiti_golden_rlc_registers, + (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers)); + amdgpu_program_register_sequence(adev, + tahiti_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + tahiti_golden_registers2, + (const u32)ARRAY_SIZE(tahiti_golden_registers2)); + break; + case CHIP_PITCAIRN: + amdgpu_program_register_sequence(adev, + pitcairn_golden_registers, + (const u32)ARRAY_SIZE(pitcairn_golden_registers)); + amdgpu_program_register_sequence(adev, + pitcairn_golden_rlc_registers, + (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers)); + amdgpu_program_register_sequence(adev, + pitcairn_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + case CHIP_VERDE: + amdgpu_program_register_sequence(adev, + verde_golden_registers, + (const u32)ARRAY_SIZE(verde_golden_registers)); + amdgpu_program_register_sequence(adev, + verde_golden_rlc_registers, + (const u32)ARRAY_SIZE(verde_golden_rlc_registers)); + amdgpu_program_register_sequence(adev, + verde_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + verde_pg_init, + (const u32)ARRAY_SIZE(verde_pg_init)); + break; + case CHIP_OLAND: + amdgpu_program_register_sequence(adev, + oland_golden_registers, + (const u32)ARRAY_SIZE(oland_golden_registers)); + amdgpu_program_register_sequence(adev, + oland_golden_rlc_registers, + (const u32)ARRAY_SIZE(oland_golden_rlc_registers)); + amdgpu_program_register_sequence(adev, + oland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + case CHIP_HAINAN: + amdgpu_program_register_sequence(adev, + hainan_golden_registers, + (const u32)ARRAY_SIZE(hainan_golden_registers)); + amdgpu_program_register_sequence(adev, + hainan_golden_registers2, + (const u32)ARRAY_SIZE(hainan_golden_registers2)); + amdgpu_program_register_sequence(adev, + hainan_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init)); + break; + + + default: + BUG(); + } +} + +static void si_pcie_gen3_enable(struct amdgpu_device *adev) +{ + struct pci_dev *root = adev->pdev->bus->self; + int bridge_pos, gpu_pos; + u32 speed_cntl, mask, current_data_rate; + int ret, i; + u16 tmp16; + + if (pci_is_root_bus(adev->pdev->bus)) + return; + + if (amdgpu_pcie_gen2 == 0) + return; + + if (adev->flags & AMD_IS_APU) + return; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (ret != 0) + return; + + if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) + return; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> + LC_CURRENT_DATA_RATE_SHIFT; + if (mask & DRM_PCIE_SPEED_80) { + if (current_data_rate == 2) { + DRM_INFO("PCIE gen 3 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); + } else if (mask & DRM_PCIE_SPEED_50) { + if (current_data_rate == 1) { + DRM_INFO("PCIE gen 2 link speeds already enabled\n"); + return; + } + DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); + } + + bridge_pos = pci_pcie_cap(root); + if (!bridge_pos) + return; + + gpu_pos = pci_pcie_cap(adev->pdev); + if (!gpu_pos) + return; + + if (mask & DRM_PCIE_SPEED_80) { + if (current_data_rate != 2) { + u16 bridge_cfg, gpu_cfg; + u16 bridge_cfg2, gpu_cfg2; + u32 max_lw, current_lw, tmp; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD; + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + tmp = RREG32_PCIE(PCIE_LC_STATUS1); + max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; + current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT; + + if (current_lw < max_lw) { + tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + if (tmp & LC_RENEGOTIATION_SUPPORT) { + tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS); + tmp |= (max_lw << LC_LINK_WIDTH_SHIFT); + tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW; + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp); + } + } + + for (i = 0; i < 10; i++) { + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16); + if (tmp16 & PCI_EXP_DEVSTA_TRPND) + break; + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2); + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp |= LC_REDO_EQ; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + + mdelay(100); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16); + tmp16 &= ~PCI_EXP_LNKCTL_HAWD; + tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD); + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16); + + pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~((1 << 4) | (7 << 9)); + tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9))); + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4); + tmp &= ~LC_SET_QUIESCE; + WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp); + } + } + } + + speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE; + speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); + tmp16 &= ~0xf; + if (mask & DRM_PCIE_SPEED_80) + tmp16 |= 3; + else if (mask & DRM_PCIE_SPEED_50) + tmp16 |= 2; + else + tmp16 |= 1; + pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16); + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE; + WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); + + for (i = 0; i < adev->usec_timeout; i++) { + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0) + break; + udelay(1); + } +} + +static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); + r = RREG32(EVERGREEN_PIF_PHY0_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); + WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); + r = RREG32(EVERGREEN_PIF_PHY1_DATA); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); + WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} +static void si_program_aspm(struct amdgpu_device *adev) +{ + u32 data, orig; + bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false; + bool disable_clkreq = false; + + if (amdgpu_aspm == 0) + return; + + if (adev->flags & AMD_IS_APU) + return; + orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + data &= ~LC_XMIT_N_FTS_MASK; + data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3); + data |= LC_GO_TO_RECOVERY; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL3, data); + + orig = data = RREG32_PCIE(PCIE_P_CNTL); + data |= P_IGNORE_EDB_ERR; + if (orig != data) + WREG32_PCIE(PCIE_P_CNTL, data); + + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK); + data |= LC_PMI_TO_L1_DIS; + if (!disable_l0s) + data |= LC_L0S_INACTIVITY(7); + + if (!disable_l1) { + data |= LC_L1_INACTIVITY(7); + data &= ~LC_PMI_TO_L1_DIS; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + + if (!disable_plloff_in_l1) { + bool clk_req_support; + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data); + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0); + data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK); + data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7); + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1); + data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK); + data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7); + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); + + if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) { + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data); + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data); + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2); + data &= ~PLL_RAMP_UP_TIME_2_MASK; + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data); + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3); + data &= ~PLL_RAMP_UP_TIME_3_MASK; + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0); + data &= ~PLL_RAMP_UP_TIME_0_MASK; + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1); + data &= ~PLL_RAMP_UP_TIME_1_MASK; + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2); + data &= ~PLL_RAMP_UP_TIME_2_MASK; + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3); + data &= ~PLL_RAMP_UP_TIME_3_MASK; + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data); + } + orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); + data &= ~LC_DYN_LANES_PWR_STATE_MASK; + data |= LC_DYN_LANES_PWR_STATE(3); + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); + + orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data); + + orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL); + data &= ~LS2_EXIT_TIME_MASK; + if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN)) + data |= LS2_EXIT_TIME(5); + if (orig != data) + si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data); + + if (!disable_clkreq && + !pci_is_root_bus(adev->pdev->bus)) { + struct pci_dev *root = adev->pdev->bus->self; + u32 lnkcap; + + clk_req_support = false; + pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap & PCI_EXP_LNKCAP_CLKPM) + clk_req_support = true; + } else { + clk_req_support = false; + } + + if (clk_req_support) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2); + data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL2, data); + + orig = data = RREG32(THM_CLK_CNTL); + data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK); + data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1); + if (orig != data) + WREG32(THM_CLK_CNTL, data); + + orig = data = RREG32(MISC_CLK_CNTL); + data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK); + data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1); + if (orig != data) + WREG32(MISC_CLK_CNTL, data); + + orig = data = RREG32(CG_CLKPIN_CNTL); + data &= ~BCLK_AS_XCLK; + if (orig != data) + WREG32(CG_CLKPIN_CNTL, data); + + orig = data = RREG32(CG_CLKPIN_CNTL_2); + data &= ~FORCE_BIF_REFCLK_EN; + if (orig != data) + WREG32(CG_CLKPIN_CNTL_2, data); + + orig = data = RREG32(MPLL_BYPASSCLK_SEL); + data &= ~MPLL_CLKOUT_SEL_MASK; + data |= MPLL_CLKOUT_SEL(4); + if (orig != data) + WREG32(MPLL_BYPASSCLK_SEL, data); + + orig = data = RREG32(SPLL_CNTL_MODE); + data &= ~SPLL_REFCLK_SEL_MASK; + if (orig != data) + WREG32(SPLL_CNTL_MODE, data); + } + } + } else { + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + + orig = data = RREG32_PCIE(PCIE_CNTL2); + data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN; + if (orig != data) + WREG32_PCIE(PCIE_CNTL2, data); + + if (!disable_l0s) { + data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); + if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) { + data = RREG32_PCIE(PCIE_LC_STATUS1); + if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) { + orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL); + data &= ~LC_L0S_INACTIVITY_MASK; + if (orig != data) + WREG32_PCIE_PORT(PCIE_LC_CNTL, data); + } + } + } +} + +static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev) +{ + int readrq; + u16 v; + + readrq = pcie_get_readrq(adev->pdev); + v = ffs(readrq) - 8; + if ((v == 0) || (v == 6) || (v == 7)) + pcie_set_readrq(adev->pdev, 512); +} + +static int si_common_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_fix_pci_max_read_req_size(adev); + si_init_golden_registers(adev); + si_pcie_gen3_enable(adev); + si_program_aspm(adev); + + return 0; +} + +static int si_common_hw_fini(void *handle) +{ + return 0; +} + +static int si_common_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_common_hw_fini(adev); +} + +static int si_common_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_common_hw_init(adev); +} + +static bool si_common_is_idle(void *handle) +{ + return true; +} + +static int si_common_wait_for_idle(void *handle) +{ + return 0; +} + +static int si_common_soft_reset(void *handle) +{ + return 0; +} + +static int si_common_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int si_common_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs si_common_ip_funcs = { + .name = "si_common", + .early_init = si_common_early_init, + .late_init = NULL, + .sw_init = si_common_sw_init, + .sw_fini = si_common_sw_fini, + .hw_init = si_common_hw_init, + .hw_fini = si_common_hw_fini, + .suspend = si_common_suspend, + .resume = si_common_resume, + .is_idle = si_common_is_idle, + .wait_for_idle = si_common_wait_for_idle, + .soft_reset = si_common_soft_reset, + .set_clockgating_state = si_common_set_clockgating_state, + .set_powergating_state = si_common_set_powergating_state, +}; + +static const struct amdgpu_ip_block_version verde_ip_blocks[] = +{ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &gmc_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &dce_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &gfx_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_dma_ip_funcs, + }, +/* { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &si_null_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_null_ip_funcs, + }, + */ +}; + + +static const struct amdgpu_ip_block_version hainan_ip_blocks[] = +{ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &gmc_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &gfx_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &si_dma_ip_funcs, + }, +}; + +int si_set_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + adev->ip_blocks = verde_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks); + break; + case CHIP_HAINAN: + adev->ip_blocks = hainan_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks); + break; + default: + BUG(); + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/si.h index 1cef03deeac3..959d7b63e0e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h +++ b/drivers/gpu/drm/amd/amdgpu/si.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Advanced Micro Devices, Inc. + * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -21,22 +21,13 @@ * */ -#ifndef FIJI_SMUMGR_H -#define FIJI_SMUMGR_H +#ifndef __SI_H__ +#define __SI_H__ -#include "fiji_ppsmc.h" +extern const struct amd_ip_funcs si_common_ip_funcs; -int fiji_smu_init(struct amdgpu_device *adev); -int fiji_smu_fini(struct amdgpu_device *adev); -int fiji_smu_start(struct amdgpu_device *adev); - -struct fiji_smu_private_data -{ - uint8_t *header; - uint32_t smu_buffer_addr_high; - uint32_t smu_buffer_addr_low; - uint32_t header_addr_high; - uint32_t header_addr_low; -}; +void si_srbm_select(struct amdgpu_device *adev, + u32 me, u32 pipe, u32 queue, u32 vmid); +int si_set_ip_blocks(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c new file mode 100644 index 000000000000..de358193a8f9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -0,0 +1,915 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include <drm/drmP.h> +#include "amdgpu.h" +#include "amdgpu_trace.h" +#include "si/sid.h" + +const u32 sdma_offsets[SDMA_MAX_INSTANCE] = +{ + DMA0_REGISTER_OFFSET, + DMA1_REGISTER_OFFSET +}; + +static void si_dma_set_ring_funcs(struct amdgpu_device *adev); +static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); +static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); +static void si_dma_set_irq_funcs(struct amdgpu_device *adev); + +static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) +{ + return ring->adev->wb.wb[ring->rptr_offs>>2]; +} + +static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; + + return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; +} + +static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; + + WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); +} + +static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) +{ + /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. + * Pad as necessary with NOPs. + */ + while ((ring->wptr & 7) != 5) + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); + amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); + amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); + +} + +static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL)); + amdgpu_ring_write(ring, 1); +} + +static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0)); + amdgpu_ring_write(ring, 1); +} + +/** + * si_dma_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @fence: amdgpu fence object + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed (VI). + */ +static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + /* write the fence */ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); + amdgpu_ring_write(ring, seq); + /* optionally write high bits as well */ + if (write64bit) { + addr += 4; + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); + amdgpu_ring_write(ring, addr & 0xfffffffc); + amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + /* generate an interrupt */ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); +} + +static void si_dma_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl; + unsigned i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + /* dma0 */ + rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); + rb_cntl &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); + ring->ready = false; + } +} + +static int si_dma_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; + int i, r; + uint64_t rptr_addr; + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + + WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); + WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); + + /* Set ring buffer size in dwords */ + rb_bufsz = order_base_2(ring->ring_size / 4); + rb_cntl = rb_bufsz << 1; +#ifdef __BIG_ENDIAN + rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; +#endif + WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); + WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); + + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + + WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); + WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); + + rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; + + WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); + + /* enable DMA IBs */ + ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; +#ifdef __BIG_ENDIAN + ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif + WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); + + dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); + dma_cntl &= ~CTXEMPTY_INT_ENABLE; + WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); + + ring->wptr = 0; + WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2); + WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); + + ring->ready = true; + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + return r; + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); + } + + return 0; +} + +/** + * si_dma_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. (VI). + * Returns 0 for success, error for failure. + */ +static int si_dma_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_alloc(ring, 4); + if (r) { + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); + amdgpu_wb_free(adev, index); + return r; + } + + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i < adev->usec_timeout) { + DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + } else { + DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", + ring->idx, tmp); + r = -EINVAL; + } + amdgpu_wb_free(adev, index); + + return r; +} + +/** + * si_dma_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * + * Test a simple IB in the DMA ring (VI). + * Returns 0 on success, error on failure. + */ +static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct fence *f = NULL; + unsigned index; + u32 tmp = 0; + u64 gpu_addr; + long r; + + r = amdgpu_wb_get(adev, &index); + if (r) { + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); + return r; + } + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 256, &ib); + if (r) { + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + goto err0; + } + + ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; + ib.ptr[3] = 0xDEADBEEF; + ib.length_dw = 4; + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + if (r) + goto err1; + + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } else { + DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); + r = -EINVAL; + } + +err1: + amdgpu_ib_free(adev, &ib, NULL); + fence_put(f); +err0: + amdgpu_wb_free(adev, index); + return r; +} + +/** + * cik_dma_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using DMA (SI). + */ +static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 1, 0, 0, bytes); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; +} + +/** + * si_dma_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @value: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * + * Update PTEs by writing them manually using DMA (SI). + */ +static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; + } +} + +/** + * si_dma_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & AMDGPU_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } +} + +/** + * si_dma_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); +} + +/** + * cik_sdma_ring_emit_pipeline_sync - sync the pipeline + * + * @ring: amdgpu_ring pointer + * + * Make sure all previous operations are completed (CIK). + */ +static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + /* wait for idle */ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | + (1 << 27)); /* Poll memory */ + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, seq); /* value */ + amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */ +} + +/** + * si_dma_ring_emit_vm_flush - cik vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vm: amdgpu_vm pointer + * + * Update the page table base and flush the VM TLB + * using sDMA (VI). + */ +static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) +{ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + if (vm_id < 8) + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + else + amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); + amdgpu_ring_write(ring, pd_addr >> 12); + + /* bits 0-7 are the VM contexts0-7 */ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); + amdgpu_ring_write(ring, 1 << vm_id); + + /* wait for invalidate to complete */ + amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); + amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); + amdgpu_ring_write(ring, 0xff << 16); /* retry */ + amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, 0); /* value */ + amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ +} + +static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 7 + 3; /* si_dma_ring_emit_ib */ +} + +static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 3 + /* si_dma_ring_emit_hdp_flush */ + 3 + /* si_dma_ring_emit_hdp_invalidate */ + 6 + /* si_dma_ring_emit_pipeline_sync */ + 12 + /* si_dma_ring_emit_vm_flush */ + 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */ +} + +static int si_dma_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->sdma.num_instances = 2; + + si_dma_set_ring_funcs(adev); + si_dma_set_buffer_funcs(adev); + si_dma_set_vm_pte_funcs(adev); + si_dma_set_irq_funcs(adev); + + return 0; +} + +static int si_dma_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* DMA0 trap event */ + r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); + if (r) + return r; + + /* DMA1 trap event */ + r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1); + if (r) + return r; + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + ring->ring_obj = NULL; + ring->use_doorbell = false; + sprintf(ring->name, "sdma%d", i); + r = amdgpu_ring_init(adev, ring, 1024, + DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf, + &adev->sdma.trap_irq, + (i == 0) ? + AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, + AMDGPU_RING_TYPE_SDMA); + if (r) + return r; + } + + return r; +} + +static int si_dma_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + + return 0; +} + +static int si_dma_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_dma_start(adev); +} + +static int si_dma_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_dma_stop(adev); + + return 0; +} + +static int si_dma_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_dma_hw_fini(adev); +} + +static int si_dma_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_dma_hw_init(adev); +} + +static bool si_dma_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS2); + + if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) + return false; + + return true; +} + +static int si_dma_wait_for_idle(void *handle) +{ + unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (si_dma_is_idle(handle)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int si_dma_soft_reset(void *handle) +{ + DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); + return 0; +} + +static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + switch (type) { + case AMDGPU_SDMA_IRQ_TRAP0: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); + sdma_cntl &= ~TRAP_ENABLE; + WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); + sdma_cntl |= TRAP_ENABLE; + WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + case AMDGPU_SDMA_IRQ_TRAP1: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); + sdma_cntl &= ~TRAP_ENABLE; + WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); + break; + case AMDGPU_IRQ_STATE_ENABLE: + sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); + sdma_cntl |= TRAP_ENABLE; + WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static int si_dma_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + amdgpu_fence_process(&adev->sdma.instance[0].ring); + + return 0; +} + +static int si_dma_process_trap_irq_1(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + amdgpu_fence_process(&adev->sdma.instance[1].ring); + + return 0; +} + +static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + schedule_work(&adev->reset_work); + return 0; +} + +static int si_dma_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + u32 orig, data, offset; + int i; + bool enable; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data &= ~MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + WREG32(DMA_CLK_CTRL + offset, 0x00000100); + } + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + if (i == 0) + offset = DMA0_REGISTER_OFFSET; + else + offset = DMA1_REGISTER_OFFSET; + orig = data = RREG32(DMA_POWER_CNTL + offset); + data |= MEM_POWER_OVERRIDE; + if (data != orig) + WREG32(DMA_POWER_CNTL + offset, data); + + orig = data = RREG32(DMA_CLK_CTRL + offset); + data = 0xff000000; + if (data != orig) + WREG32(DMA_CLK_CTRL + offset, data); + } + } + + return 0; +} + +static int si_dma_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + u32 tmp; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + WREG32(DMA_PGFSM_WRITE, 0x00002000); + WREG32(DMA_PGFSM_CONFIG, 0x100010ff); + + for (tmp = 0; tmp < 5; tmp++) + WREG32(DMA_PGFSM_WRITE, 0); + + return 0; +} + +const struct amd_ip_funcs si_dma_ip_funcs = { + .name = "si_dma", + .early_init = si_dma_early_init, + .late_init = NULL, + .sw_init = si_dma_sw_init, + .sw_fini = si_dma_sw_fini, + .hw_init = si_dma_hw_init, + .hw_fini = si_dma_hw_fini, + .suspend = si_dma_suspend, + .resume = si_dma_resume, + .is_idle = si_dma_is_idle, + .wait_for_idle = si_dma_wait_for_idle, + .soft_reset = si_dma_soft_reset, + .set_clockgating_state = si_dma_set_clockgating_state, + .set_powergating_state = si_dma_set_powergating_state, +}; + +static const struct amdgpu_ring_funcs si_dma_ring_funcs = { + .get_rptr = si_dma_ring_get_rptr, + .get_wptr = si_dma_ring_get_wptr, + .set_wptr = si_dma_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = si_dma_ring_emit_ib, + .emit_fence = si_dma_ring_emit_fence, + .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, + .emit_vm_flush = si_dma_ring_emit_vm_flush, + .emit_hdp_flush = si_dma_ring_emit_hdp_flush, + .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate, + .test_ring = si_dma_ring_test_ring, + .test_ib = si_dma_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = si_dma_ring_pad_ib, + .get_emit_ib_size = si_dma_ring_get_emit_ib_size, + .get_dma_frame_size = si_dma_ring_get_dma_frame_size, +}; + +static void si_dma_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) + adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; +} + +static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { + .set = si_dma_set_trap_irq_state, + .process = si_dma_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = { + .set = si_dma_set_trap_irq_state, + .process = si_dma_process_trap_irq_1, +}; + +static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { + .process = si_dma_process_illegal_inst_irq, +}; + +static void si_dma_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; + adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1; + adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; +} + +/** + * si_dma_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Copy GPU buffers using the DMA engine (VI). + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count) +{ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 1, 0, 0, byte_count); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; +} + +/** + * si_dma_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine (VI). + */ +static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, + 0, 0, 0, byte_count / 4); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; +} + + +static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { + .copy_max_bytes = 0xffff8, + .copy_num_dw = 5, + .emit_copy_buffer = si_dma_emit_copy_buffer, + + .fill_max_bytes = 0xffff8, + .fill_num_dw = 4, + .emit_fill_buffer = si_dma_emit_fill_buffer, +}; + +static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) +{ + if (adev->mman.buffer_funcs == NULL) { + adev->mman.buffer_funcs = &si_dma_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; + } +} + +static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { + .copy_pte = si_dma_vm_copy_pte, + .write_pte = si_dma_vm_write_pte, + .set_pte_pde = si_dma_vm_set_pte_pde, +}; + +static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + unsigned i; + + if (adev->vm_manager.vm_pte_funcs == NULL) { + adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) + adev->vm_manager.vm_pte_rings[i] = + &adev->sdma.instance[i].ring; + + adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h new file mode 100644 index 000000000000..3a3e0c78a54b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h @@ -0,0 +1,29 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SI_DMA_H__ +#define __SI_DMA_H__ + +extern const struct amd_ip_funcs si_dma_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c new file mode 100644 index 000000000000..8bd08925b370 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -0,0 +1,8006 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_dpm.h" +#include "amdgpu_atombios.h" +#include "si/sid.h" +#include "r600_dpm.h" +#include "si_dpm.h" +#include "atom.h" +#include "../include/pptable.h" +#include <linux/math64.h> +#include <linux/seq_file.h> +#include <linux/firmware.h> + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x20000 + +#define SCLK_MIN_DEEPSLEEP_FREQ 1350 + + +/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 + +#define BIOS_SCRATCH_4 0x5cd + +MODULE_FIRMWARE("radeon/tahiti_smc.bin"); +MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); +MODULE_FIRMWARE("radeon/verde_smc.bin"); +MODULE_FIRMWARE("radeon/verde_k_smc.bin"); +MODULE_FIRMWARE("radeon/oland_smc.bin"); +MODULE_FIRMWARE("radeon/oland_k_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; +}; + +union fan_info { + struct _ATOM_PPLIB_FANTABLE fan; + struct _ATOM_PPLIB_FANTABLE2 fan2; + struct _ATOM_PPLIB_FANTABLE3 fan3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_SI_CLOCK_INFO si; +}; + +static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = +{ + R600_UTC_DFLT_00, + R600_UTC_DFLT_01, + R600_UTC_DFLT_02, + R600_UTC_DFLT_03, + R600_UTC_DFLT_04, + R600_UTC_DFLT_05, + R600_UTC_DFLT_06, + R600_UTC_DFLT_07, + R600_UTC_DFLT_08, + R600_UTC_DFLT_09, + R600_UTC_DFLT_10, + R600_UTC_DFLT_11, + R600_UTC_DFLT_12, + R600_UTC_DFLT_13, + R600_UTC_DFLT_14, +}; + +static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = +{ + R600_DTC_DFLT_00, + R600_DTC_DFLT_01, + R600_DTC_DFLT_02, + R600_DTC_DFLT_03, + R600_DTC_DFLT_04, + R600_DTC_DFLT_05, + R600_DTC_DFLT_06, + R600_DTC_DFLT_07, + R600_DTC_DFLT_08, + R600_DTC_DFLT_09, + R600_DTC_DFLT_10, + R600_DTC_DFLT_11, + R600_DTC_DFLT_12, + R600_DTC_DFLT_13, + R600_DTC_DFLT_14, +}; + +static const struct si_cac_config_reg cac_weights_tahiti[] = +{ + { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_tahiti[] = +{ + { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } + +}; + +static const struct si_cac_config_reg cac_override_tahiti[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_tahiti = +{ + ((1 << 16) | 27027), + 6, + 0, + 4, + 95, + { + 0UL, + 0UL, + 4521550UL, + 309631529UL, + -1270850L, + 4513710L, + 40 + }, + 595000000UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_tahiti = +{ + { 1159409, 0, 0, 0, 0 }, + { 777, 0, 0, 0, 0 }, + 2, + 54000, + 127000, + 25, + 2, + 10, + 13, + { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, + { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, + { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, + 85, + false +}; + +#if 0 +static const struct si_dte_data dte_data_tahiti_le = +{ + { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 }, + { 0x7D, 0x7D, 0x4E4, 0xB00, 0 }, + 0x5, + 0xAFC8, + 0x64, + 0x32, + 1, + 0, + 0x10, + { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 }, + { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 }, + { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 }, + 85, + true +}; +#endif + +static const struct si_dte_data dte_data_tahiti_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_new_zealand = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, + { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, + 0x5, + 0xAFC8, + 0x69, + 0x32, + 1, + 0, + 0x10, + { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, + 85, + true +}; + +static const struct si_dte_data dte_data_aruba_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_malta = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_pitcairn[] = +{ + { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_pitcairn[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_pitcairn[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_pitcairn = +{ + ((1 << 16) | 27027), + 5, + 0, + 6, + 100, + { + 51600000UL, + 1800000UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_pitcairn = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_curacao_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_curacao_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_neptune_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_chelsea_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_chelsea_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_heathrow[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_cape_verde[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_cape_verde[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_cape_verde[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_cape_verde = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_cape_verde = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_venus_xtx = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_venus_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_venus_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_oland[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_mars_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_mars_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_oland_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_oland_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_oland[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_mars_pro[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_oland[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_oland = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_powertune_data powertune_data_mars_pro = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_oland = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_mars_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 55000, + 105, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_sun_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 55000, + 105, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + + +static const struct si_cac_config_reg cac_weights_hainan[] = +{ + { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_hainan = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 9, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); +static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); +static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); +static struct si_ps *si_get_ps(struct amdgpu_ps *rps); + +static int si_populate_voltage_value(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); +static int si_get_std_voltage_value(struct amdgpu_device *adev, + SISLANDS_SMC_VOLTAGE_VALUE *voltage, + u16 *std_voltage); +static int si_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value); +static int si_convert_power_level_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); +static int si_calculate_sclk_params(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk); + +static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); +static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); +static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev); +static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); + +static struct si_power_info *si_get_pi(struct amdgpu_device *adev) +{ + struct si_power_info *pi = adev->pm.dpm.priv; + return pi; +} + +static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, + u16 v, s32 t, u32 ileakage, u32 *leakage) +{ + s64 kt, kv, leakage_w, i_leakage, vddc; + s64 temperature, t_slope, t_intercept, av, bv, t_ref; + s64 tmp; + + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); + vddc = div64_s64(drm_int2fixp(v), 1000); + temperature = div64_s64(drm_int2fixp(t), 1000); + + t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); + t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); + av = div64_s64(drm_int2fixp(coeff->av), 100000000); + bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); + t_ref = drm_int2fixp(coeff->t_ref); + + tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; + kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); + kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); + kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); + + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); + + *leakage = drm_fixp2int(leakage_w * 1000); +} + +static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, + const struct ni_leakage_coeffients *coeff, + u16 v, + s32 t, + u32 i_leakage, + u32 *leakage) +{ + si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); +} + +static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, + const u32 fixed_kt, u16 v, + u32 ileakage, u32 *leakage) +{ + s64 kt, kv, leakage_w, i_leakage, vddc; + + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); + vddc = div64_s64(drm_int2fixp(v), 1000); + + kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); + kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), + drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); + + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); + + *leakage = drm_fixp2int(leakage_w * 1000); +} + +static void si_calculate_leakage_for_v(struct amdgpu_device *adev, + const struct ni_leakage_coeffients *coeff, + const u32 fixed_kt, + u16 v, + u32 i_leakage, + u32 *leakage) +{ + si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); +} + + +static void si_update_dte_from_pl2(struct amdgpu_device *adev, + struct si_dte_data *dte_data) +{ + u32 p_limit1 = adev->pm.dpm.tdp_limit; + u32 p_limit2 = adev->pm.dpm.near_tdp_limit; + u32 k = dte_data->k; + u32 t_max = dte_data->max_t; + u32 t_split[5] = { 10, 15, 20, 25, 30 }; + u32 t_0 = dte_data->t0; + u32 i; + + if (p_limit2 != 0 && p_limit2 <= p_limit1) { + dte_data->tdep_count = 3; + + for (i = 0; i < k; i++) { + dte_data->r[i] = + (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / + (p_limit2 * (u32)100); + } + + dte_data->tdep_r[1] = dte_data->r[4] * 2; + + for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { + dte_data->tdep_r[i] = dte_data->r[4]; + } + } else { + DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); + } +} + +static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) +{ + struct ni_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static struct si_ps *si_get_ps(struct amdgpu_ps *aps) +{ + struct si_ps *ps = aps->ps_priv; + + return ps; +} + +static void si_initialize_powertune_defaults(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + bool update_dte_from_pl2 = false; + + if (adev->asic_type == CHIP_TAHITI) { + si_pi->cac_weights = cac_weights_tahiti; + si_pi->lcac_config = lcac_tahiti; + si_pi->cac_override = cac_override_tahiti; + si_pi->powertune_data = &powertune_data_tahiti; + si_pi->dte_data = dte_data_tahiti; + + switch (adev->pdev->device) { + case 0x6798: + si_pi->dte_data.enable_dte_by_default = true; + break; + case 0x6799: + si_pi->dte_data = dte_data_new_zealand; + break; + case 0x6790: + case 0x6791: + case 0x6792: + case 0x679E: + si_pi->dte_data = dte_data_aruba_pro; + update_dte_from_pl2 = true; + break; + case 0x679B: + si_pi->dte_data = dte_data_malta; + update_dte_from_pl2 = true; + break; + case 0x679A: + si_pi->dte_data = dte_data_tahiti_pro; + update_dte_from_pl2 = true; + break; + default: + if (si_pi->dte_data.enable_dte_by_default == true) + DRM_ERROR("DTE is not enabled!\n"); + break; + } + } else if (adev->asic_type == CHIP_PITCAIRN) { + si_pi->cac_weights = cac_weights_pitcairn; + si_pi->lcac_config = lcac_pitcairn; + si_pi->cac_override = cac_override_pitcairn; + si_pi->powertune_data = &powertune_data_pitcairn; + + switch (adev->pdev->device) { + case 0x6810: + case 0x6818: + si_pi->dte_data = dte_data_curacao_xt; + update_dte_from_pl2 = true; + break; + case 0x6819: + case 0x6811: + si_pi->dte_data = dte_data_curacao_pro; + update_dte_from_pl2 = true; + break; + case 0x6800: + case 0x6806: + si_pi->dte_data = dte_data_neptune_xt; + update_dte_from_pl2 = true; + break; + default: + si_pi->dte_data = dte_data_pitcairn; + break; + } + } else if (adev->asic_type == CHIP_VERDE) { + si_pi->lcac_config = lcac_cape_verde; + si_pi->cac_override = cac_override_cape_verde; + si_pi->powertune_data = &powertune_data_cape_verde; + + switch (adev->pdev->device) { + case 0x683B: + case 0x683F: + case 0x6829: + case 0x6835: + si_pi->cac_weights = cac_weights_cape_verde_pro; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x682C: + si_pi->cac_weights = cac_weights_cape_verde_pro; + si_pi->dte_data = dte_data_sun_xt; + break; + case 0x6825: + case 0x6827: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x6824: + case 0x682D: + si_pi->cac_weights = cac_weights_chelsea_xt; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x682F: + si_pi->cac_weights = cac_weights_chelsea_pro; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x6820: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_venus_xtx; + break; + case 0x6821: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_venus_xt; + break; + case 0x6823: + case 0x682B: + case 0x6822: + case 0x682A: + si_pi->cac_weights = cac_weights_chelsea_pro; + si_pi->dte_data = dte_data_venus_pro; + break; + default: + si_pi->cac_weights = cac_weights_cape_verde; + si_pi->dte_data = dte_data_cape_verde; + break; + } + } else if (adev->asic_type == CHIP_OLAND) { + si_pi->lcac_config = lcac_mars_pro; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_mars_pro; + si_pi->dte_data = dte_data_mars_pro; + + switch (adev->pdev->device) { + case 0x6601: + case 0x6621: + case 0x6603: + case 0x6605: + si_pi->cac_weights = cac_weights_mars_pro; + update_dte_from_pl2 = true; + break; + case 0x6600: + case 0x6606: + case 0x6620: + case 0x6604: + si_pi->cac_weights = cac_weights_mars_xt; + update_dte_from_pl2 = true; + break; + case 0x6611: + case 0x6613: + case 0x6608: + si_pi->cac_weights = cac_weights_oland_pro; + update_dte_from_pl2 = true; + break; + case 0x6610: + si_pi->cac_weights = cac_weights_oland_xt; + update_dte_from_pl2 = true; + break; + default: + si_pi->cac_weights = cac_weights_oland; + si_pi->lcac_config = lcac_oland; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_oland; + si_pi->dte_data = dte_data_oland; + break; + } + } else if (adev->asic_type == CHIP_HAINAN) { + si_pi->cac_weights = cac_weights_hainan; + si_pi->lcac_config = lcac_oland; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_hainan; + si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; + } else { + DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); + return; + } + + ni_pi->enable_power_containment = false; + ni_pi->enable_cac = false; + ni_pi->enable_sq_ramping = false; + si_pi->enable_dte = false; + + if (si_pi->powertune_data->enable_powertune_by_default) { + ni_pi->enable_power_containment = true; + ni_pi->enable_cac = true; + if (si_pi->dte_data.enable_dte_by_default) { + si_pi->enable_dte = true; + if (update_dte_from_pl2) + si_update_dte_from_pl2(adev, &si_pi->dte_data); + + } + ni_pi->enable_sq_ramping = true; + } + + ni_pi->driver_calculate_cac_leakage = true; + ni_pi->cac_configuration_required = true; + + if (ni_pi->cac_configuration_required) { + ni_pi->support_cac_long_term_average = true; + si_pi->dyn_powertune_data.l2_lta_window_size = + si_pi->powertune_data->l2_lta_window_size_default; + si_pi->dyn_powertune_data.lts_truncate = + si_pi->powertune_data->lts_truncate_default; + } else { + ni_pi->support_cac_long_term_average = false; + si_pi->dyn_powertune_data.l2_lta_window_size = 0; + si_pi->dyn_powertune_data.lts_truncate = 0; + } + + si_pi->dyn_powertune_data.disable_uvd_powertune = false; +} + +static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) +{ + return 1; +} + +static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) +{ + u32 xclk; + u32 wintime; + u32 cac_window; + u32 cac_window_size; + + xclk = amdgpu_asic_get_xclk(adev); + + if (xclk == 0) + return 0; + + cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; + cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); + + wintime = (cac_window_size * 100) / xclk; + + return wintime; +} + +static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) +{ + return power_in_watts; +} + +static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, + bool adjust_polarity, + u32 tdp_adjustment, + u32 *tdp_limit, + u32 *near_tdp_limit) +{ + u32 adjustment_delta, max_tdp_limit; + + if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) + return -EINVAL; + + max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; + + if (adjust_polarity) { + *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); + } else { + *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; + adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; + if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; + else + *near_tdp_limit = 0; + } + + if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) + return -EINVAL; + if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) + return -EINVAL; + + return 0; +} + +static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (ni_pi->enable_power_containment) { + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; + PP_SIslands_PAPMParameters *papm_parm; + struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); + u32 tdp_limit; + u32 near_tdp_limit; + int ret; + + if (scaling_factor == 0) + return -EINVAL; + + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); + + ret = si_calculate_adjusted_tdp_limits(adev, + false, /* ??? */ + adev->pm.dpm.tdp_adjustment, + &tdp_limit, + &near_tdp_limit); + if (ret) + return ret; + + smc_table->dpm2Params.TDPLimit = + cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); + smc_table->dpm2Params.NearTDPLimit = + cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); + smc_table->dpm2Params.SafePowerLimit = + cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + + offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), + (u8 *)(&(smc_table->dpm2Params.TDPLimit)), + sizeof(u32) * 3, + si_pi->sram_end); + if (ret) + return ret; + + if (si_pi->enable_ppm) { + papm_parm = &si_pi->papm_parm; + memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); + papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); + papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); + papm_parm->dGPU_T_Warning = cpu_to_be32(95); + papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); + papm_parm->PlatformPowerLimit = 0xffffffff; + papm_parm->NearTDPLimitPAPM = 0xffffffff; + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, + (u8 *)papm_parm, + sizeof(PP_SIslands_PAPMParameters), + si_pi->sram_end); + if (ret) + return ret; + } + } + return 0; +} + +static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (ni_pi->enable_power_containment) { + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); + int ret; + + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); + + smc_table->dpm2Params.NearTDPLimit = + cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); + smc_table->dpm2Params.SafePowerLimit = + cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + (si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + + offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), + (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), + sizeof(u32) * 2, + si_pi->sram_end); + if (ret) + return ret; + } + + return 0; +} + +static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, + const u16 prev_std_vddc, + const u16 curr_std_vddc) +{ + u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; + u64 prev_vddc = (u64)prev_std_vddc; + u64 curr_vddc = (u64)curr_std_vddc; + u64 pwr_efficiency_ratio, n, d; + + if ((prev_vddc == 0) || (curr_vddc == 0)) + return 0; + + n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); + d = prev_vddc * prev_vddc; + pwr_efficiency_ratio = div64_u64(n, d); + + if (pwr_efficiency_ratio > (u64)0xFFFF) + return 0; + + return (u16)pwr_efficiency_ratio; +} + +static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + if (si_pi->dyn_powertune_data.disable_uvd_powertune && + amdgpu_state->vclk && amdgpu_state->dclk) + return true; + + return false; +} + +struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) +{ + struct evergreen_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static int si_populate_power_containment_values(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + SISLANDS_SMC_VOLTAGE_VALUE vddc; + u32 prev_sclk; + u32 max_sclk; + u32 min_sclk; + u16 prev_std_vddc; + u16 curr_std_vddc; + int i; + u16 pwr_efficiency_ratio; + u8 max_ps_percent; + bool disable_uvd_power_tune; + int ret; + + if (ni_pi->enable_power_containment == false) + return 0; + + if (state->performance_level_count == 0) + return -EINVAL; + + if (smc_state->levelCount != state->performance_level_count) + return -EINVAL; + + disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); + + smc_state->levels[0].dpm2.MaxPS = 0; + smc_state->levels[0].dpm2.NearTDPDec = 0; + smc_state->levels[0].dpm2.AboveSafeInc = 0; + smc_state->levels[0].dpm2.BelowSafeInc = 0; + smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; + + for (i = 1; i < state->performance_level_count; i++) { + prev_sclk = state->performance_levels[i-1].sclk; + max_sclk = state->performance_levels[i].sclk; + if (i == 1) + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; + else + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; + + if (prev_sclk > max_sclk) + return -EINVAL; + + if ((max_ps_percent == 0) || + (prev_sclk == max_sclk) || + disable_uvd_power_tune) + min_sclk = max_sclk; + else if (i == 1) + min_sclk = prev_sclk; + else + min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; + + if (min_sclk < state->performance_levels[0].sclk) + min_sclk = state->performance_levels[0].sclk; + + if (min_sclk == 0) + return -EINVAL; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + state->performance_levels[i-1].vddc, &vddc); + if (ret) + return ret; + + ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); + if (ret) + return ret; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + state->performance_levels[i].vddc, &vddc); + if (ret) + return ret; + + ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); + if (ret) + return ret; + + pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, + prev_std_vddc, curr_std_vddc); + + smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); + smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; + smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; + smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; + smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); + } + + return 0; +} + +static int si_populate_sq_ramping_values(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + u32 sq_power_throttle, sq_power_throttle2; + bool enable_sq_ramping = ni_pi->enable_sq_ramping; + int i; + + if (state->performance_level_count == 0) + return -EINVAL; + + if (smc_state->levelCount != state->performance_level_count) + return -EINVAL; + + if (adev->pm.dpm.sq_ramping_threshold == 0) + return -EINVAL; + + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + enable_sq_ramping = false; + + for (i = 0; i < state->performance_level_count; i++) { + sq_power_throttle = 0; + sq_power_throttle2 = 0; + + if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && + enable_sq_ramping) { + sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); + sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); + sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); + sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); + sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); + } else { + sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; + sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + } + + smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); + smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); + } + + return 0; +} + +static int si_enable_power_containment(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + bool enable) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (ni_pi->enable_power_containment) { + if (enable) { + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + ni_pi->pc_enabled = false; + } else { + ni_pi->pc_enabled = true; + } + } + } else { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + ni_pi->pc_enabled = false; + } + } + + return ret; +} + +static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + int ret = 0; + struct si_dte_data *dte_data = &si_pi->dte_data; + Smc_SIslands_DTE_Configuration *dte_tables = NULL; + u32 table_size; + u8 tdep_count; + u32 i; + + if (dte_data == NULL) + si_pi->enable_dte = false; + + if (si_pi->enable_dte == false) + return 0; + + if (dte_data->k <= 0) + return -EINVAL; + + dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); + if (dte_tables == NULL) { + si_pi->enable_dte = false; + return -ENOMEM; + } + + table_size = dte_data->k; + + if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) + table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; + + tdep_count = dte_data->tdep_count; + if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) + tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; + + dte_tables->K = cpu_to_be32(table_size); + dte_tables->T0 = cpu_to_be32(dte_data->t0); + dte_tables->MaxT = cpu_to_be32(dte_data->max_t); + dte_tables->WindowSize = dte_data->window_size; + dte_tables->temp_select = dte_data->temp_select; + dte_tables->DTE_mode = dte_data->dte_mode; + dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); + + if (tdep_count > 0) + table_size--; + + for (i = 0; i < table_size; i++) { + dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); + dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); + } + + dte_tables->Tdep_count = tdep_count; + + for (i = 0; i < (u32)tdep_count; i++) { + dte_tables->T_limits[i] = dte_data->t_limits[i]; + dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); + dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); + } + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start, + (u8 *)dte_tables, + sizeof(Smc_SIslands_DTE_Configuration), + si_pi->sram_end); + kfree(dte_tables); + + return ret; +} + +static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, + u16 *max, u16 *min) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_cac_leakage_table *table = + &adev->pm.dpm.dyn_state.cac_leakage_table; + u32 i; + u32 v0_loadline; + + if (table == NULL) + return -EINVAL; + + *max = 0; + *min = 0xFFFF; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].vddc > *max) + *max = table->entries[i].vddc; + if (table->entries[i].vddc < *min) + *min = table->entries[i].vddc; + } + + if (si_pi->powertune_data->lkge_lut_v0_percent > 100) + return -EINVAL; + + v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; + + if (v0_loadline > 0xFFFFUL) + return -EINVAL; + + *min = (u16)v0_loadline; + + if ((*min > *max) || (*max == 0) || (*min == 0)) + return -EINVAL; + + return 0; +} + +static u16 si_get_cac_std_voltage_step(u16 max, u16 min) +{ + return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / + SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; +} + +static int si_init_dte_leakage_table(struct amdgpu_device *adev, + PP_SIslands_CacConfig *cac_tables, + u16 vddc_max, u16 vddc_min, u16 vddc_step, + u16 t0, u16 t_step) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 leakage; + unsigned int i, j; + s32 t; + u32 smc_leakage; + u32 scaling_factor; + u16 voltage; + + scaling_factor = si_get_smc_power_scaling_factor(adev); + + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { + t = (1000 * (i * t_step + t0)); + + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { + voltage = vddc_max - (vddc_step * j); + + si_calculate_leakage_for_v_and_t(adev, + &si_pi->powertune_data->leakage_coefficients, + voltage, + t, + si_pi->dyn_powertune_data.cac_leakage, + &leakage); + + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; + + if (smc_leakage > 0xFFFF) + smc_leakage = 0xFFFF; + + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = + cpu_to_be16((u16)smc_leakage); + } + } + return 0; +} + +static int si_init_simplified_leakage_table(struct amdgpu_device *adev, + PP_SIslands_CacConfig *cac_tables, + u16 vddc_max, u16 vddc_min, u16 vddc_step) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 leakage; + unsigned int i, j; + u32 smc_leakage; + u32 scaling_factor; + u16 voltage; + + scaling_factor = si_get_smc_power_scaling_factor(adev); + + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { + voltage = vddc_max - (vddc_step * j); + + si_calculate_leakage_for_v(adev, + &si_pi->powertune_data->leakage_coefficients, + si_pi->powertune_data->fixed_kt, + voltage, + si_pi->dyn_powertune_data.cac_leakage, + &leakage); + + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; + + if (smc_leakage > 0xFFFF) + smc_leakage = 0xFFFF; + + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = + cpu_to_be16((u16)smc_leakage); + } + return 0; +} + +static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + PP_SIslands_CacConfig *cac_tables = NULL; + u16 vddc_max, vddc_min, vddc_step; + u16 t0, t_step; + u32 load_line_slope, reg; + int ret = 0; + u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; + + if (ni_pi->enable_cac == false) + return 0; + + cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); + if (!cac_tables) + return -ENOMEM; + + reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; + reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); + WREG32(CG_CAC_CTRL, reg); + + si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; + si_pi->dyn_powertune_data.dc_pwr_value = + si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; + si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); + si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; + + si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; + + ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); + if (ret) + goto done_free; + + vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); + vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); + t_step = 4; + t0 = 60; + + if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) + ret = si_init_dte_leakage_table(adev, cac_tables, + vddc_max, vddc_min, vddc_step, + t0, t_step); + else + ret = si_init_simplified_leakage_table(adev, cac_tables, + vddc_max, vddc_min, vddc_step); + if (ret) + goto done_free; + + load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; + + cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); + cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; + cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; + cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); + cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); + cac_tables->R_LL = cpu_to_be32(load_line_slope); + cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); + cac_tables->calculation_repeats = cpu_to_be32(2); + cac_tables->dc_cac = cpu_to_be32(0); + cac_tables->log2_PG_LKG_SCALE = 12; + cac_tables->cac_temp = si_pi->powertune_data->operating_temp; + cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); + cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start, + (u8 *)cac_tables, + sizeof(PP_SIslands_CacConfig), + si_pi->sram_end); + + if (ret) + goto done_free; + + ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); + +done_free: + if (ret) { + ni_pi->enable_cac = false; + ni_pi->enable_power_containment = false; + } + + kfree(cac_tables); + + return ret; +} + +static int si_program_cac_config_registers(struct amdgpu_device *adev, + const struct si_cac_config_reg *cac_config_regs) +{ + const struct si_cac_config_reg *config_regs = cac_config_regs; + u32 data = 0, offset; + + if (!config_regs) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + switch (config_regs->type) { + case SISLANDS_CACCONFIG_CGIND: + offset = SMC_CG_IND_START + config_regs->offset; + if (offset < SMC_CG_IND_END) + data = RREG32_SMC(offset); + break; + default: + data = RREG32(config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + + switch (config_regs->type) { + case SISLANDS_CACCONFIG_CGIND: + offset = SMC_CG_IND_START + config_regs->offset; + if (offset < SMC_CG_IND_END) + WREG32_SMC(offset, data); + break; + default: + WREG32(config_regs->offset, data); + break; + } + config_regs++; + } + return 0; +} + +static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + + if ((ni_pi->enable_cac == false) || + (ni_pi->cac_configuration_required == false)) + return 0; + + ret = si_program_cac_config_registers(adev, si_pi->lcac_config); + if (ret) + return ret; + ret = si_program_cac_config_registers(adev, si_pi->cac_override); + if (ret) + return ret; + ret = si_program_cac_config_registers(adev, si_pi->cac_weights); + if (ret) + return ret; + + return 0; +} + +static int si_enable_smc_cac(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + bool enable) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (ni_pi->enable_cac) { + if (enable) { + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { + if (ni_pi->support_cac_long_term_average) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); + if (smc_result != PPSMC_Result_OK) + ni_pi->support_cac_long_term_average = false; + } + + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + ni_pi->cac_enabled = false; + } else { + ni_pi->cac_enabled = true; + } + + if (si_pi->enable_dte) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + } + } + } else if (ni_pi->cac_enabled) { + if (si_pi->enable_dte) + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); + + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); + + ni_pi->cac_enabled = false; + + if (ni_pi->support_cac_long_term_average) + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); + } + } + return ret; +} + +static int si_init_smc_spll_table(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; + SISLANDS_SMC_SCLK_VALUE sclk_params; + u32 fb_div, p_div; + u32 clk_s, clk_v; + u32 sclk = 0; + int ret = 0; + u32 tmp; + int i; + + if (si_pi->spll_table_start == 0) + return -EINVAL; + + spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); + if (spll_table == NULL) + return -ENOMEM; + + for (i = 0; i < 256; i++) { + ret = si_calculate_sclk_params(adev, sclk, &sclk_params); + if (ret) + break; + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; + + fb_div &= ~0x00001FFF; + fb_div >>= 1; + clk_v >>= 6; + + if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) + ret = -EINVAL; + if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) + ret = -EINVAL; + if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + ret = -EINVAL; + if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) + ret = -EINVAL; + + if (ret) + break; + + tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | + ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); + spll_table->freq[i] = cpu_to_be32(tmp); + + tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | + ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); + spll_table->ss[i] = cpu_to_be32(tmp); + + sclk += 512; + } + + + if (!ret) + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start, + (u8 *)spll_table, + sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), + si_pi->sram_end); + + if (ret) + ni_pi->enable_power_containment = false; + + kfree(spll_table); + + return ret; +} + +struct si_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 max_sclk; + u32 max_mclk; +}; + +/* cards with dpm stability problems */ +static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, + { 0, 0, 0, 0 }, +}; + +static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, + u16 vce_voltage) +{ + u16 highest_leakage = 0; + struct si_power_info *si_pi = si_get_pi(adev); + int i; + + for (i = 0; i < si_pi->leakage_voltage.count; i++){ + if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) + highest_leakage = si_pi->leakage_voltage.entries[i].voltage; + } + + if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) + return highest_leakage; + + return vce_voltage; +} + +static int si_get_vce_clock_voltage(struct amdgpu_device *adev, + u32 evclk, u32 ecclk, u16 *voltage) +{ + u32 i; + int ret = -EINVAL; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + if (((evclk == 0) && (ecclk == 0)) || + (table && (table->count == 0))) { + *voltage = 0; + return 0; + } + + for (i = 0; i < table->count; i++) { + if ((evclk <= table->entries[i].evclk) && + (ecclk <= table->entries[i].ecclk)) { + *voltage = table->entries[i].v; + ret = 0; + break; + } + } + + /* if no match return the highest voltage */ + if (ret) + *voltage = table->entries[table->count - 1].v; + + *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); + + return ret; +} + +static bool si_dpm_vblank_too_short(struct amdgpu_device *adev) +{ + + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we never hit the non-gddr5 limit so disable it */ + u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; + + if (vblank_time < switch_limit) + return true; + else + return false; + +} + +static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, + u32 arb_freq_src, u32 arb_freq_dest) +{ + u32 mc_arb_dram_timing; + u32 mc_arb_dram_timing2; + u32 burst_time; + u32 mc_cg_config; + + switch (arb_freq_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; + break; + case MC_CG_ARB_FREQ_F2: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; + break; + case MC_CG_ARB_FREQ_F3: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; + break; + default: + return -EINVAL; + } + + switch (arb_freq_dest) { + case MC_CG_ARB_FREQ_F0: + WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); + break; + case MC_CG_ARB_FREQ_F1: + WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); + break; + case MC_CG_ARB_FREQ_F2: + WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); + break; + case MC_CG_ARB_FREQ_F3: + WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); + break; + default: + return -EINVAL; + } + + mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; + WREG32(MC_CG_CONFIG, mc_cg_config); + WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); + + return 0; +} + +static void ni_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *new_ps = si_get_ps(rps); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + + eg_pi->current_rps = *rps; + ni_pi->current_ps = *new_ps; + eg_pi->current_rps.ps_priv = &ni_pi->current_ps; +} + +static void ni_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *new_ps = si_get_ps(rps); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + + eg_pi->requested_rps = *rps; + ni_pi->requested_ps = *new_ps; + eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; +} + +static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, + struct amdgpu_ps *new_ps, + struct amdgpu_ps *old_ps) +{ + struct si_ps *new_state = si_get_ps(new_ps); + struct si_ps *current_state = si_get_ps(old_ps); + + if ((new_ps->vclk == old_ps->vclk) && + (new_ps->dclk == old_ps->dclk)) + return; + + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= + current_state->performance_levels[current_state->performance_level_count - 1].sclk) + return; + + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); +} + +static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, + struct amdgpu_ps *new_ps, + struct amdgpu_ps *old_ps) +{ + struct si_ps *new_state = si_get_ps(new_ps); + struct si_ps *current_state = si_get_ps(old_ps); + + if ((new_ps->vclk == old_ps->vclk) && + (new_ps->dclk == old_ps->dclk)) + return; + + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < + current_state->performance_levels[current_state->performance_level_count - 1].sclk) + return; + + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); +} + +static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) +{ + unsigned int i; + + for (i = 0; i < table->count; i++) + if (voltage <= table->entries[i].value) + return table->entries[i].value; + + return table->entries[table->count - 1].value; +} + +static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, + u32 max_clock, u32 requested_clock) +{ + unsigned int i; + + if ((clocks == NULL) || (clocks->count == 0)) + return (requested_clock < max_clock) ? requested_clock : max_clock; + + for (i = 0; i < clocks->count; i++) { + if (clocks->values[i] >= requested_clock) + return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; + } + + return (clocks->values[clocks->count - 1] < max_clock) ? + clocks->values[clocks->count - 1] : max_clock; +} + +static u32 btc_get_valid_mclk(struct amdgpu_device *adev, + u32 max_mclk, u32 requested_mclk) +{ + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, + max_mclk, requested_mclk); +} + +static u32 btc_get_valid_sclk(struct amdgpu_device *adev, + u32 max_sclk, u32 requested_sclk) +{ + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, + max_sclk, requested_sclk); +} + +static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, + u32 *max_clock) +{ + u32 i, clock = 0; + + if ((table == NULL) || (table->count == 0)) { + *max_clock = clock; + return; + } + + for (i = 0; i < table->count; i++) { + if (clock < table->entries[i].clk) + clock = table->entries[i].clk; + } + *max_clock = clock; +} + +static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, + u32 clock, u16 max_voltage, u16 *voltage) +{ + u32 i; + + if ((table == NULL) || (table->count == 0)) + return; + + for (i= 0; i < table->count; i++) { + if (clock <= table->entries[i].clk) { + if (*voltage < table->entries[i].v) + *voltage = (u16)((table->entries[i].v < max_voltage) ? + table->entries[i].v : max_voltage); + return; + } + } + + *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; +} + +static void btc_adjust_clock_combinations(struct amdgpu_device *adev, + const struct amdgpu_clock_and_voltage_limits *max_limits, + struct rv7xx_pl *pl) +{ + + if ((pl->mclk == 0) || (pl->sclk == 0)) + return; + + if (pl->mclk == pl->sclk) + return; + + if (pl->mclk > pl->sclk) { + if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) + pl->sclk = btc_get_valid_sclk(adev, + max_limits->sclk, + (pl->mclk + + (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / + adev->pm.dpm.dyn_state.mclk_sclk_ratio); + } else { + if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) + pl->mclk = btc_get_valid_mclk(adev, + max_limits->mclk, + pl->sclk - + adev->pm.dpm.dyn_state.sclk_mclk_delta); + } +} + +static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, + u16 max_vddc, u16 max_vddci, + u16 *vddc, u16 *vddci) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + u16 new_voltage; + + if ((0 == *vddc) || (0 == *vddci)) + return; + + if (*vddc > *vddci) { + if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { + new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, + (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); + *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; + } + } else { + if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { + new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, + (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); + *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; + } + } +} + +static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev, + u32 sys_mask, + enum amdgpu_pcie_gen asic_gen, + enum amdgpu_pcie_gen default_gen) +{ + switch (asic_gen) { + case AMDGPU_PCIE_GEN1: + return AMDGPU_PCIE_GEN1; + case AMDGPU_PCIE_GEN2: + return AMDGPU_PCIE_GEN2; + case AMDGPU_PCIE_GEN3: + return AMDGPU_PCIE_GEN3; + default: + if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3)) + return AMDGPU_PCIE_GEN3; + else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2)) + return AMDGPU_PCIE_GEN2; + else + return AMDGPU_PCIE_GEN1; + } + return AMDGPU_PCIE_GEN1; +} + +static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, + u32 *p, u32 *u) +{ + u32 b_c = 0; + u32 i_c; + u32 tmp; + + i_c = (i * r_c) / 100; + tmp = i_c >> p_b; + + while (tmp) { + b_c++; + tmp >>= 1; + } + + *u = (b_c + 1) / 2; + *p = i_c / (1 << (2 * (*u))); +} + +static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) +{ + u32 k, a, ah, al; + u32 t1; + + if ((fl == 0) || (fh == 0) || (fl > fh)) + return -EINVAL; + + k = (100 * fh) / fl; + t1 = (t * (k - 100)); + a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); + a = (a + 5) / 10; + ah = ((a * t) + 5000) / 10000; + al = a - ah; + + *th = t - ah; + *tl = t + al; + + return 0; +} + +static bool r600_is_uvd_state(u32 class, u32 class2) +{ + if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return true; + if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return true; + return false; +} + +static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) +{ + return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); +} + +static void rv770_get_max_vddc(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + u16 vddc; + + if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) + pi->max_vddc = 0; + else + pi->max_vddc = vddc; +} + +static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct amdgpu_atom_ss ss; + + pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + if (pi->sclk_ss || pi->mclk_ss) + pi->dynamic_ss = true; + else + pi->dynamic_ss = false; +} + + +static void si_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *ps = si_get_ps(rps); + struct amdgpu_clock_and_voltage_limits *max_limits; + bool disable_mclk_switching = false; + bool disable_sclk_switching = false; + u32 mclk, sclk; + u16 vddc, vddci, min_vce_voltage = 0; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; + int i; + struct si_dpm_quirk *p = si_dpm_quirk_list; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (adev->pdev->vendor == p->chip_vendor && + adev->pdev->device == p->chip_device && + adev->pdev->subsystem_vendor == p->subsys_vendor && + adev->pdev->subsystem_device == p->subsys_device) { + max_sclk = p->max_sclk; + max_mclk = p->max_mclk; + break; + } + ++p; + } + /* limit mclk on all R7 370 parts for stability */ + if (adev->pdev->device == 0x6811 && + adev->pdev->revision == 0x81) + max_mclk = 120000; + /* limit sclk/mclk on Jet parts for stability */ + if (adev->pdev->device == 0x6665 && + adev->pdev->revision == 0xc3) { + max_sclk = 75000; + max_mclk = 80000; + } + + if (rps->vce_active) { + rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; + rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; + si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, + &min_vce_voltage); + } else { + rps->evclk = 0; + rps->ecclk = 0; + } + + if ((adev->pm.dpm.new_active_crtc_count > 1) || + si_dpm_vblank_too_short(adev)) + disable_mclk_switching = true; + + if (rps->vclk || rps->dclk) { + disable_mclk_switching = true; + disable_sclk_switching = true; + } + + if (adev->pm.dpm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + for (i = ps->performance_level_count - 2; i >= 0; i--) { + if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; + } + if (adev->pm.dpm.ac_power == false) { + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk > max_limits->mclk) + ps->performance_levels[i].mclk = max_limits->mclk; + if (ps->performance_levels[i].sclk > max_limits->sclk) + ps->performance_levels[i].sclk = max_limits->sclk; + if (ps->performance_levels[i].vddc > max_limits->vddc) + ps->performance_levels[i].vddc = max_limits->vddc; + if (ps->performance_levels[i].vddci > max_limits->vddci) + ps->performance_levels[i].vddci = max_limits->vddci; + } + } + + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } + } + + /* XXX validate the min clocks required for display */ + + if (disable_mclk_switching) { + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; + vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; + } else { + mclk = ps->performance_levels[0].mclk; + vddci = ps->performance_levels[0].vddci; + } + + if (disable_sclk_switching) { + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; + } else { + sclk = ps->performance_levels[0].sclk; + vddc = ps->performance_levels[0].vddc; + } + + if (rps->vce_active) { + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; + if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) + mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; + } + + /* adjusted low state */ + ps->performance_levels[0].sclk = sclk; + ps->performance_levels[0].mclk = mclk; + ps->performance_levels[0].vddc = vddc; + ps->performance_levels[0].vddci = vddci; + + if (disable_sclk_switching) { + sclk = ps->performance_levels[0].sclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (sclk < ps->performance_levels[i].sclk) + sclk = ps->performance_levels[i].sclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].sclk = sclk; + ps->performance_levels[i].vddc = vddc; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + } + } + + if (disable_mclk_switching) { + mclk = ps->performance_levels[0].mclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (mclk < ps->performance_levels[i].mclk) + mclk = ps->performance_levels[i].mclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].mclk = mclk; + ps->performance_levels[i].vddci = vddci; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) + ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; + if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) + ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; + } + } + + for (i = 0; i < ps->performance_level_count; i++) + btc_adjust_clock_combinations(adev, max_limits, + &ps->performance_levels[i]); + + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].vddc < min_vce_voltage) + ps->performance_levels[i].vddc = min_vce_voltage; + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + ps->performance_levels[i].sclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + ps->performance_levels[i].mclk, + max_limits->vddci, &ps->performance_levels[i].vddci); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + ps->performance_levels[i].mclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, + adev->clock.current_dispclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + } + + for (i = 0; i < ps->performance_level_count; i++) { + btc_apply_voltage_delta_rules(adev, + max_limits->vddc, max_limits->vddci, + &ps->performance_levels[i].vddc, + &ps->performance_levels[i].vddci); + } + + ps->dc_compatible = true; + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) + ps->dc_compatible = false; + } +} + +#if 0 +static int si_read_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 *value) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + return amdgpu_si_read_smc_sram_dword(adev, + si_pi->soft_regs_start + reg_offset, value, + si_pi->sram_end); +} +#endif + +static int si_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + return amdgpu_si_write_smc_sram_dword(adev, + si_pi->soft_regs_start + reg_offset, + value, si_pi->sram_end); +} + +static bool si_is_special_1gb_platform(struct amdgpu_device *adev) +{ + bool ret = false; + u32 tmp, width, row, column, bank, density; + bool is_memory_gddr5, is_special; + + tmp = RREG32(MC_SEQ_MISC0); + is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); + is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) + & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); + + WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); + width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; + + tmp = RREG32(MC_ARB_RAMCFG); + row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; + column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; + bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; + + density = (1 << (row + column - 20 + bank)) * width; + + if ((adev->pdev->device == 0x6819) && + is_memory_gddr5 && is_special && (density == 0x400)) + ret = true; + + return ret; +} + +static void si_get_leakage_vddc(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u16 vddc, count = 0; + int i, ret; + + for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { + ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); + + if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { + si_pi->leakage_voltage.entries[count].voltage = vddc; + si_pi->leakage_voltage.entries[count].leakage_index = + SISLANDS_LEAKAGE_INDEX0 + i; + count++; + } + } + si_pi->leakage_voltage.count = count; +} + +static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, + u32 index, u16 *leakage_voltage) +{ + struct si_power_info *si_pi = si_get_pi(adev); + int i; + + if (leakage_voltage == NULL) + return -EINVAL; + + if ((index & 0xff00) != 0xff00) + return -EINVAL; + + if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) + return -EINVAL; + + if (index < SISLANDS_LEAKAGE_INDEX0) + return -EINVAL; + + for (i = 0; i < si_pi->leakage_voltage.count; i++) { + if (si_pi->leakage_voltage.entries[i].leakage_index == index) { + *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; + return 0; + } + } + return -EAGAIN; +} + +static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + bool want_thermal_protection; + enum amdgpu_dpm_event_src dpm_event_src; + + switch (sources) { + case 0: + default: + want_thermal_protection = false; + break; + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; + break; + case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; + break; + case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | + (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): + want_thermal_protection = true; + dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; + break; + } + + if (want_thermal_protection) { + WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); + if (pi->thermal_protection) + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + } else { + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + } +} + +static void si_enable_auto_throttle_source(struct amdgpu_device *adev, + enum amdgpu_dpm_auto_throttle_src source, + bool enable) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + if (enable) { + if (!(pi->active_auto_throttle_sources & (1 << source))) { + pi->active_auto_throttle_sources |= 1 << source; + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } else { + if (pi->active_auto_throttle_sources & (1 << source)) { + pi->active_auto_throttle_sources &= ~(1 << source); + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } +} + +static void si_start_dpm(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); +} + +static void si_stop_dpm(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); +} + +static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); + else + WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); + +} + +#if 0 +static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, + u32 thermal_level) +{ + PPSMC_Result ret; + + if (thermal_level == 0) { + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); + if (ret == PPSMC_Result_OK) + return 0; + else + return -EINVAL; + } + return 0; +} + +static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) +{ + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); +} +#endif + +#if 0 +static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) +{ + if (ac_power) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? + 0 : -EINVAL; + + return 0; +} +#endif + +static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter) +{ + WREG32(SMC_SCRATCH0, parameter); + return amdgpu_si_send_msg_to_smc(adev, msg); +} + +static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) + return -EINVAL; + + return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_dpm_force_performance_level(struct amdgpu_device *adev, + enum amdgpu_dpm_forced_level level) +{ + struct amdgpu_ps *rps = adev->pm.dpm.current_ps; + struct si_ps *ps = si_get_ps(rps); + u32 levels = ps->performance_level_count; + + if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) + return -EINVAL; + } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) + return -EINVAL; + } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + return -EINVAL; + } + + adev->pm.dpm.forced_level = level; + + return 0; +} + +#if 0 +static int si_set_boot_state(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} +#endif + +static int si_set_sw_state(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_halt_smc(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) + return -EINVAL; + + return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_resume_smc(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) + return -EINVAL; + + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static void si_dpm_start_smc(struct amdgpu_device *adev) +{ + amdgpu_si_program_jump_on_start(adev); + amdgpu_si_start_smc(adev); + amdgpu_si_smc_clock(adev, true); +} + +static void si_dpm_stop_smc(struct amdgpu_device *adev) +{ + amdgpu_si_reset_smc(adev); + amdgpu_si_smc_clock(adev, false); +} + +static int si_process_firmware_header(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_stateTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->state_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->soft_regs_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->mc_reg_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_fanTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->fan_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->arb_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->cac_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->dte_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_spllTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->spll_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->papm_cfg_table_start = tmp; + + return ret; +} + +static void si_read_clock_registers(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); + si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); + si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); + si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); + si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); + si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); + si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); + si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); + si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); + si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); + si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); + si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); + si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); + si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); + si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); +} + +static void si_enable_thermal_protection(struct amdgpu_device *adev, + bool enable) +{ + if (enable) + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + else + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); +} + +static void si_enable_acpi_power_management(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); +} + +#if 0 +static int si_enter_ulp_state(struct amdgpu_device *adev) +{ + WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); + + udelay(25000); + + return 0; +} + +static int si_exit_ulp_state(struct amdgpu_device *adev) +{ + int i; + + WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); + + udelay(7000); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(SMC_RESP_0) == 1) + break; + udelay(1000); + } + + return 0; +} +#endif + +static int si_notify_smc_display_change(struct amdgpu_device *adev, + bool has_display) +{ + PPSMC_Msg msg = has_display ? + PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; + + return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static void si_program_response_times(struct amdgpu_device *adev) +{ + u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out; + u32 vddc_dly, acpi_dly, vbi_dly; + u32 reference_clock; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); + + voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; + backbias_response_time = (u32)adev->pm.dpm.backbias_response_time; + + if (voltage_response_time == 0) + voltage_response_time = 1000; + + acpi_delay_time = 15000; + vbi_time_out = 100000; + + reference_clock = amdgpu_asic_get_xclk(adev); + + vddc_dly = (voltage_response_time * reference_clock) / 100; + acpi_dly = (acpi_delay_time * reference_clock) / 100; + vbi_dly = (vbi_time_out * reference_clock) / 100; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); +} + +static void si_program_ds_registers(struct amdgpu_device *adev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + u32 tmp; + + /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ + if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) + tmp = 0x10; + else + tmp = 0x1; + + if (eg_pi->sclk_deep_sleep) { + WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); + WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, + ~AUTOSCALE_ON_SS_CLEAR); + } +} + +static void si_program_display_gap(struct amdgpu_device *adev) +{ + u32 tmp, pipe; + int i; + + tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + if (adev->pm.dpm.new_active_crtc_count > 0) + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + else + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); + + if (adev->pm.dpm.new_active_crtc_count > 1) + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + else + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); + + WREG32(CG_DISPLAY_GAP_CNTL, tmp); + + tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); + pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; + + if ((adev->pm.dpm.new_active_crtc_count > 0) && + (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { + /* find the first active crtc */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->pm.dpm.new_active_crtcs & (1 << i)) + break; + } + if (i == adev->mode_info.num_crtc) + pipe = 0; + else + pipe = i; + + tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; + tmp |= DCCG_DISP1_SLOW_SELECT(pipe); + WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); + } + + /* Setting this to false forces the performance state to low if the crtcs are disabled. + * This can be a problem on PowerXpress systems or if you want to use the card + * for offscreen rendering or compute if there are no crtcs enabled. + */ + si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); +} + +static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + if (enable) { + if (pi->sclk_ss) + WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); + } else { + WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); + WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); + } +} + +static void si_setup_bsp(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + u32 xclk = amdgpu_asic_get_xclk(adev); + + r600_calculate_u_and_p(pi->asi, + xclk, + 16, + &pi->bsp, + &pi->bsu); + + r600_calculate_u_and_p(pi->pasi, + xclk, + 16, + &pi->pbsp, + &pi->pbsu); + + + pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); + pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); + + WREG32(CG_BSP, pi->dsp); +} + +static void si_program_git(struct amdgpu_device *adev) +{ + WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); +} + +static void si_program_tp(struct amdgpu_device *adev) +{ + int i; + enum r600_td td = R600_TD_DFLT; + + for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) + WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); + + if (td == R600_TD_AUTO) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); + else + WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); + + if (td == R600_TD_UP) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); + + if (td == R600_TD_DOWN) + WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); +} + +static void si_program_tpp(struct amdgpu_device *adev) +{ + WREG32(CG_TPC, R600_TPC_DFLT); +} + +static void si_program_sstp(struct amdgpu_device *adev) +{ + WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); +} + +static void si_enable_display_gap(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); + + tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | + DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); + + tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); + tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | + DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); + WREG32(CG_DISPLAY_GAP_CNTL, tmp); +} + +static void si_program_vc(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + WREG32(CG_FTV, pi->vrc); +} + +static void si_clear_vc(struct amdgpu_device *adev) +{ + WREG32(CG_FTV, 0); +} + +static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) +{ + u8 mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); + return mc_para_index; +} + +static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) +{ + u8 mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 60000) / 5000); + } + return mc_para_index; +} + +static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + bool strobe_mode = false; + u8 result = 0; + + if (mclk <= pi->mclk_strobe_mode_threshold) + strobe_mode = true; + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + result = si_get_mclk_frequency_ratio(mclk, strobe_mode); + else + result = si_get_ddr3_mclk_frequency_ratio(mclk); + + if (strobe_mode) + result |= SISLANDS_SMC_STROBE_ENABLE; + + return result; +} + +static int si_upload_firmware(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + amdgpu_si_reset_smc(adev); + amdgpu_si_smc_clock(adev, false); + + return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end); +} + +static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + const struct amdgpu_phase_shedding_limits_table *limits) +{ + u32 data, num_bits, num_levels; + + if ((table == NULL) || (limits == NULL)) + return false; + + data = table->mask_low; + + num_bits = hweight32(data); + + if (num_bits == 0) + return false; + + num_levels = (1 << num_bits); + + if (table->count != num_levels) + return false; + + if (limits->count != (num_levels - 1)) + return false; + + return true; +} + +static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, + u32 max_voltage_steps, + struct atom_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) + return; + + diff = voltage_table->count - max_voltage_steps; + + for (i= 0; i < max_voltage_steps; i++) + voltage_table->entries[i] = voltage_table->entries[i + diff]; + + voltage_table->count = max_voltage_steps; +} + +static int si_get_svi2_voltage_table(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, + struct atom_voltage_table *voltage_table) +{ + u32 i; + + if (voltage_dependency_table == NULL) + return -EINVAL; + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = voltage_dependency_table->count; + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +static int si_construct_voltage_tables(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + + if (pi->voltage_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); + if (ret) + return ret; + + if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddc_voltage_table); + } else if (si_pi->voltage_control_svi2) { + ret = si_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &eg_pi->vddc_voltage_table); + if (ret) + return ret; + } else { + return -EINVAL; + } + + if (eg_pi->vddci_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); + if (ret) + return ret; + + if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddci_voltage_table); + } + if (si_pi->vddci_control_svi2) { + ret = si_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &eg_pi->vddci_voltage_table); + if (ret) + return ret; + } + + if (pi->mvdd_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); + + if (ret) { + pi->mvdd_control = false; + return ret; + } + + if (si_pi->mvdd_voltage_table.count == 0) { + pi->mvdd_control = false; + return -EINVAL; + } + + if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &si_pi->mvdd_voltage_table); + } + + if (si_pi->vddc_phase_shed_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); + if (ret) + si_pi->vddc_phase_shed_control = false; + + if ((si_pi->vddc_phase_shed_table.count == 0) || + (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) + si_pi->vddc_phase_shed_control = false; + } + + return 0; +} + +static void si_populate_smc_voltage_table(struct amdgpu_device *adev, + const struct atom_voltage_table *voltage_table, + SISLANDS_SMC_STATETABLE *table) +{ + unsigned int i; + + for (i = 0; i < voltage_table->count; i++) + table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); +} + +static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, + SISLANDS_SMC_STATETABLE *table) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u8 i; + + if (si_pi->voltage_control_svi2) { + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, + si_pi->svc_gpio_id); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, + si_pi->svd_gpio_id); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, + 2); + } else { + if (eg_pi->vddc_voltage_table.count) { + si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + + for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { + if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { + table->maxVDDCIndexInPPTable = i; + break; + } + } + } + + if (eg_pi->vddci_voltage_table.count) { + si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); + + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); + } + + + if (si_pi->mvdd_voltage_table.count) { + si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); + + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = + cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); + } + + if (si_pi->vddc_phase_shed_control) { + if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { + si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); + + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = + cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, + (u32)si_pi->vddc_phase_shed_table.phase_delay); + } else { + si_pi->vddc_phase_shed_control = false; + } + } + } + + return 0; +} + +static int si_populate_voltage_value(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + unsigned int i; + + for (i = 0; i < table->count; i++) { + if (value <= table->entries[i].value) { + voltage->index = (u8)i; + voltage->value = cpu_to_be16(table->entries[i].value); + break; + } + } + + if (i >= table->count) + return -EINVAL; + + return 0; +} + +static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, + SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (pi->mvdd_control) { + if (mclk <= pi->mvdd_split_frequency) + voltage->index = 0; + else + voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; + + voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); + } + return 0; +} + +static int si_get_std_voltage_value(struct amdgpu_device *adev, + SISLANDS_SMC_VOLTAGE_VALUE *voltage, + u16 *std_voltage) +{ + u16 v_index; + bool voltage_found = false; + *std_voltage = be16_to_cpu(voltage->value); + + if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { + if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) + return -EINVAL; + + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (be16_to_cpu(voltage->value) == + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; + else + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; + break; + } + } + + if (!voltage_found) { + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (be16_to_cpu(voltage->value) <= + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; + else + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; + break; + } + } + } + } else { + if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; + } + } + + return 0; +} + +static int si_populate_std_voltage_value(struct amdgpu_device *adev, + u16 value, u8 index, + SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + voltage->index = index; + voltage->value = cpu_to_be16(value); + + return 0; +} + +static int si_populate_phase_shedding_value(struct amdgpu_device *adev, + const struct amdgpu_phase_shedding_limits_table *limits, + u16 voltage, u32 sclk, u32 mclk, + SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) +{ + unsigned int i; + + for (i = 0; i < limits->count; i++) { + if ((voltage <= limits->entries[i].voltage) && + (sclk <= limits->entries[i].sclk) && + (mclk <= limits->entries[i].mclk)) + break; + } + + smc_voltage->phase_settings = (u8)i; + + return 0; +} + +static int si_init_arb_table_index(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + tmp &= 0x00FFFFFF; + tmp |= MC_CG_ARB_FREQ_F1 << 24; + + return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start, + tmp, si_pi->sram_end); +} + +static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) +{ + return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int si_reset_to_default(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + tmp = (tmp >> 24) & 0xff; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); +} + +static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, + u32 engine_clock) +{ + u32 dram_rows; + u32 dram_refresh_rate; + u32 mc_arb_rfsh_rate; + u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; + + if (tmp >= 4) + dram_rows = 16384; + else + dram_rows = 1 << (tmp + 10); + + dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); + mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; + + return mc_arb_rfsh_rate; +} + +static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) +{ + u32 dram_timing; + u32 dram_timing2; + u32 burst_time; + + arb_regs->mc_arb_rfsh_rate = + (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); + + amdgpu_atombios_set_engine_dram_timings(adev, + pl->sclk, + pl->mclk); + + dram_timing = RREG32(MC_ARB_DRAM_TIMING); + dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); + burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; + + arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); + arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); + arb_regs->mc_arb_burst_time = (u8)burst_time; + + return 0; +} + +static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + unsigned int first_arb_set) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; + int i, ret = 0; + + for (i = 0; i < state->performance_level_count; i++) { + ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); + if (ret) + break; + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->arb_table_start + + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), + (u8 *)&arb_regs, + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), + si_pi->sram_end); + if (ret) + break; + } + + return ret; +} + +static int si_program_memory_timing_parameters(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, + SISLANDS_DRIVER_STATE_ARB_INDEX); +} + +static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, + struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (pi->mvdd_control) + return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, + si_pi->mvdd_bootup_value, voltage); + + return 0; +} + +static int si_populate_smc_initial_state(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_initial_state, + SISLANDS_SMC_STATETABLE *table) +{ + struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 reg; + int ret; + + table->initialState.levels[0].mclk.vDLL_CNTL = + cpu_to_be32(si_pi->clock_registers.dll_cntl); + table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL = + cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); + table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); + table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); + table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); + table->initialState.levels[0].mclk.vMPLL_SS = + cpu_to_be32(si_pi->clock_registers.mpll_ss1); + table->initialState.levels[0].mclk.vMPLL_SS2 = + cpu_to_be32(si_pi->clock_registers.mpll_ss2); + + table->initialState.levels[0].mclk.mclk_value = + cpu_to_be32(initial_state->performance_levels[0].mclk); + + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); + table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM = + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); + table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); + + table->initialState.levels[0].sclk.sclk_value = + cpu_to_be32(initial_state->performance_levels[0].sclk); + + table->initialState.levels[0].arbRefreshState = + SISLANDS_INITIAL_STATE_ARB_INDEX; + + table->initialState.levels[0].ACIndex = 0; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + initial_state->performance_levels[0].vddc, + &table->initialState.levels[0].vddc); + + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->initialState.levels[0].vddc, + &std_vddc); + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->initialState.levels[0].vddc.index, + &table->initialState.levels[0].std_vddc); + } + + if (eg_pi->vddci_control) + si_populate_voltage_value(adev, + &eg_pi->vddci_voltage_table, + initial_state->performance_levels[0].vddci, + &table->initialState.levels[0].vddci); + + if (si_pi->vddc_phase_shed_control) + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + initial_state->performance_levels[0].vddc, + initial_state->performance_levels[0].sclk, + initial_state->performance_levels[0].mclk, + &table->initialState.levels[0].vddc); + + si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd); + + reg = CG_R(0xffff) | CG_L(0); + table->initialState.levels[0].aT = cpu_to_be32(reg); + table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); + table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + table->initialState.levels[0].strobeMode = + si_get_strobe_mode_settings(adev, + initial_state->performance_levels[0].mclk); + + if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) + table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; + else + table->initialState.levels[0].mcFlags = 0; + } + + table->initialState.levelCount = 1; + + table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; + + table->initialState.levels[0].dpm2.MaxPS = 0; + table->initialState.levels[0].dpm2.NearTDPDec = 0; + table->initialState.levels[0].dpm2.AboveSafeInc = 0; + table->initialState.levels[0].dpm2.BelowSafeInc = 0; + table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0; + + reg = MIN_POWER_MASK | MAX_POWER_MASK; + table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg); + + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); + + return 0; +} + +static int si_populate_smc_acpi_state(struct amdgpu_device *adev, + SISLANDS_SMC_STATETABLE *table) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; + u32 dll_cntl = si_pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; + u32 reg; + int ret; + + table->ACPIState = table->initialState; + + table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (pi->acpi_vddc) { + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + pi->acpi_vddc, &table->ACPIState.levels[0].vddc); + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->ACPIState.levels[0].vddc, &std_vddc); + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->ACPIState.levels[0].vddc.index, + &table->ACPIState.levels[0].std_vddc); + } + table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen; + + if (si_pi->vddc_phase_shed_control) { + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pi->acpi_vddc, + 0, + 0, + &table->ACPIState.levels[0].vddc); + } + } else { + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc); + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->ACPIState.levels[0].vddc, &std_vddc); + + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->ACPIState.levels[0].vddc.index, + &table->ACPIState.levels[0].std_vddc); + } + table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + AMDGPU_PCIE_GEN1); + + if (si_pi->vddc_phase_shed_control) + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pi->min_vddc_in_table, + 0, + 0, + &table->ACPIState.levels[0].vddc); + } + + if (pi->acpi_vddc) { + if (eg_pi->acpi_vddci) + si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, + eg_pi->acpi_vddci, + &table->ACPIState.levels[0].vddci); + } + + mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); + + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= SCLK_MUX_SEL(4); + + table->ACPIState.levels[0].mclk.vDLL_CNTL = + cpu_to_be32(dll_cntl); + table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = + cpu_to_be32(mclk_pwrmgt_cntl); + table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = + cpu_to_be32(mpll_ad_func_cntl); + table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = + cpu_to_be32(mpll_dq_func_cntl); + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL = + cpu_to_be32(mpll_func_cntl); + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 = + cpu_to_be32(mpll_func_cntl_1); + table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 = + cpu_to_be32(mpll_func_cntl_2); + table->ACPIState.levels[0].mclk.vMPLL_SS = + cpu_to_be32(si_pi->clock_registers.mpll_ss1); + table->ACPIState.levels[0].mclk.vMPLL_SS2 = + cpu_to_be32(si_pi->clock_registers.mpll_ss2); + + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = + cpu_to_be32(spll_func_cntl); + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = + cpu_to_be32(spll_func_cntl_2); + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = + cpu_to_be32(spll_func_cntl_3); + table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = + cpu_to_be32(spll_func_cntl_4); + + table->ACPIState.levels[0].mclk.mclk_value = 0; + table->ACPIState.levels[0].sclk.sclk_value = 0; + + si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd); + + if (eg_pi->dynamic_ac_timing) + table->ACPIState.levels[0].ACIndex = 0; + + table->ACPIState.levels[0].dpm2.MaxPS = 0; + table->ACPIState.levels[0].dpm2.NearTDPDec = 0; + table->ACPIState.levels[0].dpm2.AboveSafeInc = 0; + table->ACPIState.levels[0].dpm2.BelowSafeInc = 0; + table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0; + + reg = MIN_POWER_MASK | MAX_POWER_MASK; + table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg); + + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg); + + return 0; +} + +static int si_populate_ulv_state(struct amdgpu_device *adev, + SISLANDS_SMC_SWSTATE *state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + u32 sclk_in_sr = 1350; /* ??? */ + int ret; + + ret = si_convert_power_level_to_smc(adev, &ulv->pl, + &state->levels[0]); + if (!ret) { + if (eg_pi->sclk_deep_sleep) { + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) + state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; + else + state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; + } + if (ulv->one_pcie_lane_in_ulv) + state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; + state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); + state->levels[0].ACIndex = 1; + state->levels[0].std_vddc = state->levels[0].vddc; + state->levelCount = 1; + + state->flags |= PPSMC_SWSTATE_FLAG_DC; + } + + return ret; +} + +static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; + int ret; + + ret = si_populate_memory_timing_parameters(adev, &ulv->pl, + &arb_regs); + if (ret) + return ret; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, + ulv->volt_change_delay); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->arb_table_start + + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, + (u8 *)&arb_regs, + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), + si_pi->sram_end); + + return ret; +} + +static void si_get_mvdd_configuration(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + pi->mvdd_split_frequency = 30000; +} + +static int si_init_smc_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; + const struct si_ulv_param *ulv = &si_pi->ulv; + SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; + int ret; + u32 lane_width; + u32 vr_hot_gpio; + + si_populate_smc_voltage_tables(adev, table); + + switch (adev->pm.int_thermal_type) { + case THERMAL_TYPE_SI: + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; + break; + case THERMAL_TYPE_NONE: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; + break; + default: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; + break; + } + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) + table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { + if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; + } + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) + table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) + table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; + vr_hot_gpio = adev->pm.dpm.backbias_response_time; + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, + vr_hot_gpio); + } + + ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); + if (ret) + return ret; + + ret = si_populate_smc_acpi_state(adev, table); + if (ret) + return ret; + + table->driverState = table->initialState; + + ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, + SISLANDS_INITIAL_STATE_ARB_INDEX); + if (ret) + return ret; + + if (ulv->supported && ulv->pl.vddc) { + ret = si_populate_ulv_state(adev, &table->ULVState); + if (ret) + return ret; + + ret = si_program_ulv_memory_timing_parameters(adev); + if (ret) + return ret; + + WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); + WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); + + lane_width = amdgpu_get_pcie_lanes(adev); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); + } else { + table->ULVState = table->initialState; + } + + return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start, + (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), + si_pi->sram_end); +} + +static int si_calculate_sclk_params(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct atom_clock_dividers dividers; + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; + u64 tmp; + u32 reference_clock = adev->clock.spll.reference_freq; + u32 reference_divider; + u32 fbdiv; + int ret; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + engine_clock, false, ÷rs); + if (ret) + return ret; + + reference_divider = 1 + dividers.ref_div; + + tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; + do_div(tmp, reference_clock); + fbdiv = (u32) tmp; + + spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); + spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); + spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); + + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= SCLK_MUX_SEL(2); + + spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); + spll_func_cntl_3 |= SPLL_DITHEN; + + if (pi->sclk_ss) { + struct amdgpu_atom_ss ss; + u32 vco_freq = engine_clock * dividers.post_div; + + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum &= ~CLK_S_MASK; + cg_spll_spread_spectrum |= CLK_S(clk_s); + cg_spll_spread_spectrum |= SSEN; + + cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; + cg_spll_spread_spectrum_2 |= CLK_V(clk_v); + } + } + + sclk->sclk_value = engine_clock; + sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; + sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; + sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; + sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; + sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; + + return 0; +} + +static int si_populate_sclk_value(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk) +{ + SISLANDS_SMC_SCLK_VALUE sclk_tmp; + int ret; + + ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); + if (!ret) { + sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); + sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); + sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); + sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); + sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); + sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); + } + + return ret; +} + +static int si_populate_mclk_value(struct amdgpu_device *adev, + u32 engine_clock, + u32 memory_clock, + SISLANDS_SMC_MCLK_VALUE *mclk, + bool strobe_mode, + bool dll_state_on) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 dll_cntl = si_pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; + u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; + u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; + struct atom_mpll_param mpll_param; + int ret; + + ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); + if (ret) + return ret; + + mpll_func_cntl &= ~BWCTRL_MASK; + mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); + + mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); + mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | + CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); + + mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; + mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); + mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | + YCLK_POST_DIV(mpll_param.post_div); + } + + if (pi->mclk_ss) { + struct amdgpu_atom_ss ss; + u32 freq_nom; + u32 tmp; + u32 reference_clock = adev->clock.mpll.reference_freq; + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + freq_nom = memory_clock * 4; + else + freq_nom = memory_clock * 2; + + tmp = freq_nom / reference_clock; + tmp = tmp * tmp; + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_MEMORY_SS, freq_nom)) { + u32 clks = reference_clock * 5 / ss.rate; + u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); + + mpll_ss1 &= ~CLKV_MASK; + mpll_ss1 |= CLKV(clkv); + + mpll_ss2 &= ~CLKS_MASK; + mpll_ss2 |= CLKS(clks); + } + } + + mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; + mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); + + if (dll_state_on) + mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; + else + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + mclk->mclk_value = cpu_to_be32(memory_clock); + mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); + mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); + mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); + mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); + mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); + mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); + mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); + mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); + mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); + + return 0; +} + +static void si_populate_smc_sp(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct si_ps *ps = si_get_ps(amdgpu_state); + struct rv7xx_power_info *pi = rv770_get_pi(adev); + int i; + + for (i = 0; i < ps->performance_level_count - 1; i++) + smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); + + smc_state->levels[ps->performance_level_count - 1].bSP = + cpu_to_be32(pi->psp); +} + +static int si_convert_power_level_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + bool dll_state_on; + u16 std_vddc; + bool gmc_pg = false; + + if (eg_pi->pcie_performance_request && + (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID)) + level->gen2PCIE = (u8)si_pi->force_pcie_gen; + else + level->gen2PCIE = (u8)pl->pcie_gen; + + ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); + if (ret) + return ret; + + level->mcFlags = 0; + + if (pi->mclk_stutter_mode_threshold && + (pl->mclk <= pi->mclk_stutter_mode_threshold) && + !eg_pi->uvd_enabled && + (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && + (adev->pm.dpm.new_active_crtc_count <= 2)) { + level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; + + if (gmc_pg) + level->mcFlags |= SISLANDS_SMC_MC_PG_EN; + } + + if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + if (pl->mclk > pi->mclk_edc_enable_threshold) + level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; + + if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) + level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; + + level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); + + if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { + if (si_get_mclk_frequency_ratio(pl->mclk, true) >= + ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + else + dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; + } else { + dll_state_on = false; + } + } else { + level->strobeMode = si_get_strobe_mode_settings(adev, + pl->mclk); + + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + } + + ret = si_populate_mclk_value(adev, + pl->sclk, + pl->mclk, + &level->mclk, + (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); + if (ret) + return ret; + + ret = si_populate_voltage_value(adev, + &eg_pi->vddc_voltage_table, + pl->vddc, &level->vddc); + if (ret) + return ret; + + + ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); + if (ret) + return ret; + + ret = si_populate_std_voltage_value(adev, std_vddc, + level->vddc.index, &level->std_vddc); + if (ret) + return ret; + + if (eg_pi->vddci_control) { + ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, + pl->vddci, &level->vddci); + if (ret) + return ret; + } + + if (si_pi->vddc_phase_shed_control) { + ret = si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pl->vddc, + pl->sclk, + pl->mclk, + &level->vddc); + if (ret) + return ret; + } + + level->MaxPoweredUpCU = si_pi->max_cu; + + ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); + + return ret; +} + +static int si_populate_smc_t(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + u32 a_t; + u32 t_l, t_h; + u32 high_bsp; + int i, ret; + + if (state->performance_level_count >= 9) + return -EINVAL; + + if (state->performance_level_count < 2) { + a_t = CG_R(0xffff) | CG_L(0); + smc_state->levels[0].aT = cpu_to_be32(a_t); + return 0; + } + + smc_state->levels[0].aT = cpu_to_be32(0); + + for (i = 0; i <= state->performance_level_count - 2; i++) { + ret = r600_calculate_at( + (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), + 100 * R600_AH_DFLT, + state->performance_levels[i + 1].sclk, + state->performance_levels[i].sclk, + &t_l, + &t_h); + + if (ret) { + t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; + t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; + } + + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; + a_t |= CG_R(t_l * pi->bsp / 20000); + smc_state->levels[i].aT = cpu_to_be32(a_t); + + high_bsp = (i == state->performance_level_count - 2) ? + pi->pbsp : pi->bsp; + a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); + smc_state->levels[i + 1].aT = cpu_to_be32(a_t); + } + + return 0; +} + +static int si_disable_ulv(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + + if (ulv->supported) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + + return 0; +} + +static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + const struct si_power_info *si_pi = si_get_pi(adev); + const struct si_ulv_param *ulv = &si_pi->ulv; + const struct si_ps *state = si_get_ps(amdgpu_state); + int i; + + if (state->performance_levels[0].mclk != ulv->pl.mclk) + return false; + + /* XXX validate against display requirements! */ + + for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { + if (adev->clock.current_dispclk <= + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { + if (ulv->pl.vddc < + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) + return false; + } + } + + if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) + return false; + + return true; +} + +static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + const struct si_power_info *si_pi = si_get_pi(adev); + const struct si_ulv_param *ulv = &si_pi->ulv; + + if (ulv->supported) { + if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + return 0; +} + +static int si_convert_power_state_to_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + int i, ret; + u32 threshold; + u32 sclk_in_sr = 1350; /* ??? */ + + if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) + return -EINVAL; + + threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; + + if (amdgpu_state->vclk && amdgpu_state->dclk) { + eg_pi->uvd_enabled = true; + if (eg_pi->smu_uvd_hs) + smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; + } else { + eg_pi->uvd_enabled = false; + } + + if (state->dc_compatible) + smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; + + smc_state->levelCount = 0; + for (i = 0; i < state->performance_level_count; i++) { + if (eg_pi->sclk_deep_sleep) { + if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; + else + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; + } + } + + ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], + &smc_state->levels[i]); + smc_state->levels[i].arbRefreshState = + (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); + + if (ret) + return ret; + + if (ni_pi->enable_power_containment) + smc_state->levels[i].displayWatermark = + (state->performance_levels[i].sclk < threshold) ? + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; + else + smc_state->levels[i].displayWatermark = (i < 2) ? + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; + + if (eg_pi->dynamic_ac_timing) + smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; + else + smc_state->levels[i].ACIndex = 0; + + smc_state->levelCount++; + } + + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_watermark_threshold, + threshold / 512); + + si_populate_smc_sp(adev, amdgpu_state, smc_state); + + ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); + if (ret) + ni_pi->enable_power_containment = false; + + ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); + if (ret) + ni_pi->enable_sq_ramping = false; + + return si_populate_smc_t(adev, amdgpu_state, smc_state); +} + +static int si_upload_sw_state(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *new_state = si_get_ps(amdgpu_new_state); + int ret; + u32 address = si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, driverState); + u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) + + ((new_state->performance_level_count - 1) * + sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL)); + SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; + + memset(smc_state, 0, state_size); + + ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); + if (ret) + return ret; + + return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, + state_size, si_pi->sram_end); +} + +static int si_upload_ulv_state(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + int ret = 0; + + if (ulv->supported && ulv->pl.vddc) { + u32 address = si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, ULVState); + SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState; + u32 state_size = sizeof(SISLANDS_SMC_SWSTATE); + + memset(smc_state, 0, state_size); + + ret = si_populate_ulv_state(adev, smc_state); + if (!ret) + ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, + state_size, si_pi->sram_end); + } + + return ret; +} + +static int si_upload_smc_data(struct amdgpu_device *adev) +{ + struct amdgpu_crtc *amdgpu_crtc = NULL; + int i; + + if (adev->pm.dpm.new_active_crtc_count == 0) + return 0; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->pm.dpm.new_active_crtcs & (1 << i)) { + amdgpu_crtc = adev->mode_info.crtcs[i]; + break; + } + } + + if (amdgpu_crtc == NULL) + return 0; + + if (amdgpu_crtc->line_time <= 0) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_crtc_index, + amdgpu_crtc->crtc_id) != PPSMC_Result_OK) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, + amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, + amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) + return 0; + + return 0; +} + +static int si_set_mc_special_registers(struct amdgpu_device *adev, + struct si_mc_reg_table *table) +{ + u8 i, j, k; + u32 temp_reg; + + for (i = 0, j = table->last; i < table->last; i++) { + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + switch (table->mc_reg_address[i].s1) { + case MC_SEQ_MISC1: + temp_reg = RREG32(MC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + j++; + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + temp_reg = RREG32(MC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { + table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + j++; + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + } + break; + case MC_SEQ_RESERVE_M: + temp_reg = RREG32(MC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; + for(k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + j++; + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + break; + default: + break; + } + } + + table->last = j; + + return 0; +} + +static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) +{ + bool result = true; + switch (in_reg) { + case MC_SEQ_RAS_TIMING: + *out_reg = MC_SEQ_RAS_TIMING_LP; + break; + case MC_SEQ_CAS_TIMING: + *out_reg = MC_SEQ_CAS_TIMING_LP; + break; + case MC_SEQ_MISC_TIMING: + *out_reg = MC_SEQ_MISC_TIMING_LP; + break; + case MC_SEQ_MISC_TIMING2: + *out_reg = MC_SEQ_MISC_TIMING2_LP; + break; + case MC_SEQ_RD_CTL_D0: + *out_reg = MC_SEQ_RD_CTL_D0_LP; + break; + case MC_SEQ_RD_CTL_D1: + *out_reg = MC_SEQ_RD_CTL_D1_LP; + break; + case MC_SEQ_WR_CTL_D0: + *out_reg = MC_SEQ_WR_CTL_D0_LP; + break; + case MC_SEQ_WR_CTL_D1: + *out_reg = MC_SEQ_WR_CTL_D1_LP; + break; + case MC_PMG_CMD_EMRS: + *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; + break; + case MC_PMG_CMD_MRS: + *out_reg = MC_SEQ_PMG_CMD_MRS_LP; + break; + case MC_PMG_CMD_MRS1: + *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; + break; + case MC_SEQ_PMG_TIMING: + *out_reg = MC_SEQ_PMG_TIMING_LP; + break; + case MC_PMG_CMD_MRS2: + *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; + break; + case MC_SEQ_WR_CTL_2: + *out_reg = MC_SEQ_WR_CTL_2_LP; + break; + default: + result = false; + break; + } + + return result; +} + +static void si_set_valid_flag(struct si_mc_reg_table *table) +{ + u8 i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { + table->valid_flag |= 1 << i; + break; + } + } + } +} + +static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) +{ + u32 i; + u16 address; + + for (i = 0; i < table->last; i++) + table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? + address : table->mc_reg_address[i].s1; + +} + +static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, + struct si_mc_reg_table *si_table) +{ + u8 i, j; + + if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + if (table->num_entries > MAX_AC_TIMING_ENTRIES) + return -EINVAL; + + for (i = 0; i < table->last; i++) + si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + si_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + si_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + si_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + si_table->num_entries = table->num_entries; + + return 0; +} + +static int si_initialize_mc_reg_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct atom_mc_reg_table *table; + struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; + u8 module_index = rv770_get_memory_module_index(adev); + int ret; + + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); + WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); + WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); + WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); + WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); + WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); + WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); + WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); + WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); + WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); + WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); + WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); + WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); + WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); + + ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); + if (ret) + goto init_mc_done; + + ret = si_copy_vbios_mc_reg_table(table, si_table); + if (ret) + goto init_mc_done; + + si_set_s0_mc_reg_index(si_table); + + ret = si_set_mc_special_registers(adev, si_table); + if (ret) + goto init_mc_done; + + si_set_valid_flag(si_table); + +init_mc_done: + kfree(table); + + return ret; + +} + +static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, + SMC_SIslands_MCRegisters *mc_reg_table) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 i, j; + + for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { + if (si_pi->mc_reg_table.valid_flag & (1 << j)) { + if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + break; + mc_reg_table->address[i].s0 = + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + mc_reg_table->last = (u8)i; +} + +static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, + SMC_SIslands_MCRegisterSet *data, + u32 num_entries, u32 valid_flag) +{ + u32 i, j; + + for(i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & (1 << j)) { + data->value[i] = cpu_to_be32(entry->mc_data[j]); + i++; + } + } +} + +static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SMC_SIslands_MCRegisterSet *mc_reg_table_data) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 i = 0; + + for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { + if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) + break; + } + + if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) + --i; + + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); +} + +static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SMC_SIslands_MCRegisters *mc_reg_table) +{ + struct si_ps *state = si_get_ps(amdgpu_state); + int i; + + for (i = 0; i < state->performance_level_count; i++) { + si_convert_mc_reg_table_entry_to_smc(adev, + &state->performance_levels[i], + &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); + } +} + +static int si_populate_mc_reg_table(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_boot_state) +{ + struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; + + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); + + si_populate_mc_reg_addresses(adev, smc_mc_reg_table); + + si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); + + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], + si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); + + if (ulv->supported && ulv->pl.vddc != 0) + si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); + else + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], + si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); + + si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); + + return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, + (u8 *)smc_mc_reg_table, + sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); +} + +static int si_upload_mc_reg_table(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + struct si_ps *new_state = si_get_ps(amdgpu_new_state); + struct si_power_info *si_pi = si_get_pi(adev); + u32 address = si_pi->mc_reg_table_start + + offsetof(SMC_SIslands_MCRegisters, + data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; + + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); + + si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); + + return amdgpu_si_copy_bytes_to_smc(adev, address, + (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], + sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, + si_pi->sram_end); +} + +static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); + else + WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); +} + +static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct si_ps *state = si_get_ps(amdgpu_state); + int i; + u16 pcie_speed, max_speed = 0; + + for (i = 0; i < state->performance_level_count; i++) { + pcie_speed = state->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + return max_speed; +} + +static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) +{ + u32 speed_cntl; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; + + return (u16)speed_cntl; +} + +static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + enum amdgpu_pcie_gen current_link_speed; + + if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) + current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); + else + current_link_speed = si_pi->force_pcie_gen; + + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + si_pi->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { +#if defined(CONFIG_ACPI) + case AMDGPU_PCIE_GEN3: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) + break; + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; + if (current_link_speed == AMDGPU_PCIE_GEN2) + break; + case AMDGPU_PCIE_GEN2: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) + break; +#endif + default: + si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); + break; + } + } else { + if (target_link_speed < current_link_speed) + si_pi->pspp_notify_required = true; + } +} + +static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + u8 request; + + if (si_pi->pspp_notify_required) { + if (target_link_speed == AMDGPU_PCIE_GEN3) + request = PCIE_PERF_REQ_PECI_GEN3; + else if (target_link_speed == AMDGPU_PCIE_GEN2) + request = PCIE_PERF_REQ_PECI_GEN2; + else + request = PCIE_PERF_REQ_PECI_GEN1; + + if ((request == PCIE_PERF_REQ_PECI_GEN1) && + (si_get_current_pcie_speed(adev) > 0)) + return; + +#if defined(CONFIG_ACPI) + amdgpu_acpi_pcie_performance_request(adev, request, false); +#endif + } +} + +#if 0 +static int si_ds_request(struct amdgpu_device *adev, + bool ds_status_on, u32 count_write) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + + if (eg_pi->sclk_deep_sleep) { + if (ds_status_on) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == + PPSMC_Result_OK) ? + 0 : -EINVAL; + else + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == + PPSMC_Result_OK) ? 0 : -EINVAL; + } + return 0; +} +#endif + +static void si_set_max_cu_value(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + if (adev->asic_type == CHIP_VERDE) { + switch (adev->pdev->device) { + case 0x6820: + case 0x6825: + case 0x6821: + case 0x6823: + case 0x6827: + si_pi->max_cu = 10; + break; + case 0x682D: + case 0x6824: + case 0x682F: + case 0x6826: + si_pi->max_cu = 8; + break; + case 0x6828: + case 0x6830: + case 0x6831: + case 0x6838: + case 0x6839: + case 0x683D: + si_pi->max_cu = 10; + break; + case 0x683B: + case 0x683F: + case 0x6829: + si_pi->max_cu = 8; + break; + default: + si_pi->max_cu = 0; + break; + } + } else { + si_pi->max_cu = 0; + } +} + +static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *table) +{ + u32 i; + int j; + u16 leakage_voltage; + + if (table) { + for (i = 0; i < table->count; i++) { + switch (si_get_leakage_voltage_from_leakage_index(adev, + table->entries[i].v, + &leakage_voltage)) { + case 0: + table->entries[i].v = leakage_voltage; + break; + case -EAGAIN: + return -EINVAL; + case -EINVAL: + default: + break; + } + } + + for (j = (table->count - 2); j >= 0; j--) { + table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? + table->entries[j].v : table->entries[j + 1].v; + } + } + return 0; +} + +static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) +{ + int ret = 0; + + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); + if (ret) + DRM_ERROR("Could not patch vddc_on_sclk leakage table\n"); + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); + if (ret) + DRM_ERROR("Could not patch vddc_on_mclk leakage table\n"); + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); + if (ret) + DRM_ERROR("Could not patch vddci_on_mclk leakage table\n"); + return ret; +} + +static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + u32 lane_width; + u32 new_lane_width = + (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + u32 current_lane_width = + (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT; + + if (new_lane_width != current_lane_width) { + amdgpu_set_pcie_lanes(adev, new_lane_width); + lane_width = amdgpu_get_pcie_lanes(adev); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); + } +} + +static void si_dpm_setup_asic(struct amdgpu_device *adev) +{ + si_read_clock_registers(adev); + si_enable_acpi_power_management(adev); +} + +static int si_thermal_enable_alert(struct amdgpu_device *adev, + bool enable) +{ + u32 thermal_int = RREG32(CG_THERMAL_INT); + + if (enable) { + PPSMC_Result result; + + thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); + WREG32(CG_THERMAL_INT, thermal_int); + result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); + if (result != PPSMC_Result_OK) { + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); + return -EINVAL; + } + } else { + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; + WREG32(CG_THERMAL_INT, thermal_int); + } + + return 0; +} + +static int si_thermal_set_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); + WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); + + adev->pm.dpm.thermal.min_temp = low_temp; + adev->pm.dpm.thermal.max_temp = high_temp; + + return 0; +} + +static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (si_pi->fan_ctrl_is_in_default_mode) { + tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; + si_pi->fan_ctrl_default_mode = tmp; + tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; + si_pi->t_min = tmp; + si_pi->fan_ctrl_is_in_default_mode = false; + } + + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; + tmp |= TMIN(0); + WREG32(CG_FDO_CTRL2, tmp); + + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; + tmp |= FDO_PWM_MODE(mode); + WREG32(CG_FDO_CTRL2, tmp); +} + +static int si_thermal_setup_fan_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; + u32 duty100; + u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; + u16 fdo_min, slope1, slope2; + u32 reference_clock, tmp; + int ret; + u64 tmp64; + + if (!si_pi->fan_table_start) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + + if (duty100 == 0) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; + do_div(tmp64, 10000); + fdo_min = (u16)tmp64; + + t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; + t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; + + pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; + pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; + + slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); + fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); + fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); + fan_table.slope1 = cpu_to_be16(slope1); + fan_table.slope2 = cpu_to_be16(slope2); + fan_table.fdo_min = cpu_to_be16(fdo_min); + fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); + fan_table.hys_up = cpu_to_be16(1); + fan_table.hys_slope = cpu_to_be16(1); + fan_table.temp_resp_lim = cpu_to_be16(5); + reference_clock = amdgpu_asic_get_xclk(adev); + + fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * + reference_clock) / 1600); + fan_table.fdo_max = cpu_to_be16((u16)duty100); + + tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; + fan_table.temp_src = (uint8_t)tmp; + + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->fan_table_start, + (u8 *)(&fan_table), + sizeof(fan_table), + si_pi->sram_end); + + if (ret) { + DRM_ERROR("Failed to load fan table to the SMC."); + adev->pm.dpm.fan.ucode_fan_control = false; + } + + return ret; +} + +static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result ret; + + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl); + if (ret == PPSMC_Result_OK) { + si_pi->fan_is_controlled_by_smc = true; + return 0; + } else { + return -EINVAL; + } +} + +static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result ret; + + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl); + + if (ret == PPSMC_Result_OK) { + si_pi->fan_is_controlled_by_smc = false; + return 0; + } else { + return -EINVAL; + } +} + +static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev, + u32 *speed) +{ + u32 duty, duty100; + u64 tmp64; + + if (adev->pm.no_fan) + return -ENOENT; + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)duty * 100; + do_div(tmp64, duty100); + *speed = (u32)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev, + u32 speed) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + u32 duty, duty100; + u64 tmp64; + + if (adev->pm.no_fan) + return -ENOENT; + + if (si_pi->fan_is_controlled_by_smc) + return -EINVAL; + + if (speed > 100) + return -EINVAL; + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)speed * duty100; + do_div(tmp64, 100); + duty = (u32)tmp64; + + tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; + tmp |= FDO_STATIC_DUTY(duty); + WREG32(CG_FDO_CTRL0, tmp); + + return 0; +} + +static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) +{ + if (mode) { + /* stop auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + si_fan_ctrl_stop_smc_fan_control(adev); + si_fan_ctrl_set_static_mode(adev, mode); + } else { + /* restart auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + si_thermal_start_smc_fan_control(adev); + else + si_fan_ctrl_set_default_mode(adev); + } +} + +static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (si_pi->fan_is_controlled_by_smc) + return 0; + + tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; + return (tmp >> FDO_PWM_MODE_SHIFT); +} + +#if 0 +static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, + u32 *speed) +{ + u32 tach_period; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; + if (tach_period == 0) + return -ENOENT; + + *speed = 60 * xclk * 10000 / tach_period; + + return 0; +} + +static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, + u32 speed) +{ + u32 tach_period, tmp; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + if ((speed < adev->pm.fan_min_rpm) || + (speed > adev->pm.fan_max_rpm)) + return -EINVAL; + + if (adev->pm.dpm.fan.ucode_fan_control) + si_fan_ctrl_stop_smc_fan_control(adev); + + tach_period = 60 * xclk * 10000 / (8 * speed); + tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; + tmp |= TARGET_PERIOD(tach_period); + WREG32(CG_TACH_CTRL, tmp); + + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); + + return 0; +} +#endif + +static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (!si_pi->fan_ctrl_is_in_default_mode) { + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; + tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); + WREG32(CG_FDO_CTRL2, tmp); + + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; + tmp |= TMIN(si_pi->t_min); + WREG32(CG_FDO_CTRL2, tmp); + si_pi->fan_ctrl_is_in_default_mode = true; + } +} + +static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) +{ + if (adev->pm.dpm.fan.ucode_fan_control) { + si_fan_ctrl_start_smc_fan_control(adev); + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); + } +} + +static void si_thermal_initialize(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->pm.fan_pulses_per_revolution) { + tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; + tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); + WREG32(CG_TACH_CTRL, tmp); + } + + tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; + tmp |= TACH_PWM_RESP_RATE(0x28); + WREG32(CG_FDO_CTRL2, tmp); +} + +static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) +{ + int ret; + + si_thermal_initialize(adev); + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + if (adev->pm.dpm.fan.ucode_fan_control) { + ret = si_halt_smc(adev); + if (ret) + return ret; + ret = si_thermal_setup_fan_table(adev); + if (ret) + return ret; + ret = si_resume_smc(adev); + if (ret) + return ret; + si_thermal_start_smc_fan_control(adev); + } + + return 0; +} + +static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) +{ + if (!adev->pm.no_fan) { + si_fan_ctrl_set_default_mode(adev); + si_fan_ctrl_stop_smc_fan_control(adev); + } +} + +static int si_dpm_enable(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + int ret; + + if (amdgpu_si_is_smc_running(adev)) + return -EINVAL; + if (pi->voltage_control || si_pi->voltage_control_svi2) + si_enable_voltage_control(adev, true); + if (pi->mvdd_control) + si_get_mvdd_configuration(adev); + if (pi->voltage_control || si_pi->voltage_control_svi2) { + ret = si_construct_voltage_tables(adev); + if (ret) { + DRM_ERROR("si_construct_voltage_tables failed\n"); + return ret; + } + } + if (eg_pi->dynamic_ac_timing) { + ret = si_initialize_mc_reg_table(adev); + if (ret) + eg_pi->dynamic_ac_timing = false; + } + if (pi->dynamic_ss) + si_enable_spread_spectrum(adev, true); + if (pi->thermal_protection) + si_enable_thermal_protection(adev, true); + si_setup_bsp(adev); + si_program_git(adev); + si_program_tp(adev); + si_program_tpp(adev); + si_program_sstp(adev); + si_enable_display_gap(adev); + si_program_vc(adev); + ret = si_upload_firmware(adev); + if (ret) { + DRM_ERROR("si_upload_firmware failed\n"); + return ret; + } + ret = si_process_firmware_header(adev); + if (ret) { + DRM_ERROR("si_process_firmware_header failed\n"); + return ret; + } + ret = si_initial_switch_from_arb_f0_to_f1(adev); + if (ret) { + DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); + return ret; + } + ret = si_init_smc_table(adev); + if (ret) { + DRM_ERROR("si_init_smc_table failed\n"); + return ret; + } + ret = si_init_smc_spll_table(adev); + if (ret) { + DRM_ERROR("si_init_smc_spll_table failed\n"); + return ret; + } + ret = si_init_arb_table_index(adev); + if (ret) { + DRM_ERROR("si_init_arb_table_index failed\n"); + return ret; + } + if (eg_pi->dynamic_ac_timing) { + ret = si_populate_mc_reg_table(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_mc_reg_table failed\n"); + return ret; + } + } + ret = si_initialize_smc_cac_tables(adev); + if (ret) { + DRM_ERROR("si_initialize_smc_cac_tables failed\n"); + return ret; + } + ret = si_initialize_hardware_cac_manager(adev); + if (ret) { + DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); + return ret; + } + ret = si_initialize_smc_dte_tables(adev); + if (ret) { + DRM_ERROR("si_initialize_smc_dte_tables failed\n"); + return ret; + } + ret = si_populate_smc_tdp_limits(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_smc_tdp_limits failed\n"); + return ret; + } + ret = si_populate_smc_tdp_limits_2(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); + return ret; + } + si_program_response_times(adev); + si_program_ds_registers(adev); + si_dpm_start_smc(adev); + ret = si_notify_smc_display_change(adev, false); + if (ret) { + DRM_ERROR("si_notify_smc_display_change failed\n"); + return ret; + } + si_enable_sclk_control(adev, true); + si_start_dpm(adev); + + si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + si_thermal_start_thermal_controller(adev); + ni_update_current_ps(adev, boot_ps); + + return 0; +} + +static int si_set_temperature_range(struct amdgpu_device *adev) +{ + int ret; + + ret = si_thermal_enable_alert(adev, false); + if (ret) + return ret; + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + + return ret; +} + +static void si_dpm_disable(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + + if (!amdgpu_si_is_smc_running(adev)) + return; + si_thermal_stop_thermal_controller(adev); + si_disable_ulv(adev); + si_clear_vc(adev); + if (pi->thermal_protection) + si_enable_thermal_protection(adev, false); + si_enable_power_containment(adev, boot_ps, false); + si_enable_smc_cac(adev, boot_ps, false); + si_enable_spread_spectrum(adev, false); + si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + si_stop_dpm(adev); + si_reset_to_default(adev); + si_dpm_stop_smc(adev); + si_force_switch_to_arb_f0(adev); + + ni_update_current_ps(adev, boot_ps); +} + +static int si_dpm_pre_set_power_state(struct amdgpu_device *adev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + ni_update_requested_ps(adev, new_ps); + si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); + + return 0; +} + +static int si_power_control_set_level(struct amdgpu_device *adev) +{ + struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; + int ret; + + ret = si_restrict_performance_levels_before_switch(adev); + if (ret) + return ret; + ret = si_halt_smc(adev); + if (ret) + return ret; + ret = si_populate_smc_tdp_limits(adev, new_ps); + if (ret) + return ret; + ret = si_populate_smc_tdp_limits_2(adev, new_ps); + if (ret) + return ret; + ret = si_resume_smc(adev); + if (ret) + return ret; + ret = si_set_sw_state(adev); + if (ret) + return ret; + return 0; +} + +static int si_dpm_set_power_state(struct amdgpu_device *adev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; + struct amdgpu_ps *old_ps = &eg_pi->current_rps; + int ret; + + ret = si_disable_ulv(adev); + if (ret) { + DRM_ERROR("si_disable_ulv failed\n"); + return ret; + } + ret = si_restrict_performance_levels_before_switch(adev); + if (ret) { + DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); + return ret; + } + if (eg_pi->pcie_performance_request) + si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); + ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); + ret = si_enable_power_containment(adev, new_ps, false); + if (ret) { + DRM_ERROR("si_enable_power_containment failed\n"); + return ret; + } + ret = si_enable_smc_cac(adev, new_ps, false); + if (ret) { + DRM_ERROR("si_enable_smc_cac failed\n"); + return ret; + } + ret = si_halt_smc(adev); + if (ret) { + DRM_ERROR("si_halt_smc failed\n"); + return ret; + } + ret = si_upload_sw_state(adev, new_ps); + if (ret) { + DRM_ERROR("si_upload_sw_state failed\n"); + return ret; + } + ret = si_upload_smc_data(adev); + if (ret) { + DRM_ERROR("si_upload_smc_data failed\n"); + return ret; + } + ret = si_upload_ulv_state(adev); + if (ret) { + DRM_ERROR("si_upload_ulv_state failed\n"); + return ret; + } + if (eg_pi->dynamic_ac_timing) { + ret = si_upload_mc_reg_table(adev, new_ps); + if (ret) { + DRM_ERROR("si_upload_mc_reg_table failed\n"); + return ret; + } + } + ret = si_program_memory_timing_parameters(adev, new_ps); + if (ret) { + DRM_ERROR("si_program_memory_timing_parameters failed\n"); + return ret; + } + si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); + + ret = si_resume_smc(adev); + if (ret) { + DRM_ERROR("si_resume_smc failed\n"); + return ret; + } + ret = si_set_sw_state(adev); + if (ret) { + DRM_ERROR("si_set_sw_state failed\n"); + return ret; + } + ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); + if (eg_pi->pcie_performance_request) + si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); + ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); + if (ret) { + DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); + return ret; + } + ret = si_enable_smc_cac(adev, new_ps, true); + if (ret) { + DRM_ERROR("si_enable_smc_cac failed\n"); + return ret; + } + ret = si_enable_power_containment(adev, new_ps, true); + if (ret) { + DRM_ERROR("si_enable_power_containment failed\n"); + return ret; + } + + ret = si_power_control_set_level(adev); + if (ret) { + DRM_ERROR("si_power_control_set_level failed\n"); + return ret; + } + + return 0; +} + +static void si_dpm_post_set_power_state(struct amdgpu_device *adev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; + + ni_update_current_ps(adev, new_ps); +} + +#if 0 +void si_dpm_reset_asic(struct amdgpu_device *adev) +{ + si_restrict_performance_levels_before_switch(adev); + si_disable_ulv(adev); + si_set_boot_state(adev); +} +#endif + +static void si_dpm_display_configuration_changed(struct amdgpu_device *adev) +{ + si_program_display_gap(adev); +} + + +static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else if (r600_is_uvd_state(rps->class, rps->class2)) { + rps->vclk = RV770_DEFAULT_VCLK_FREQ; + rps->dclk = RV770_DEFAULT_DCLK_FREQ; + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) + adev->pm.dpm.boot_ps = rps; + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; +} + +static void si_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *ps = si_get_ps(rps); + u16 leakage_voltage; + struct rv7xx_pl *pl = &ps->performance_levels[index]; + int ret; + + ps->performance_level_count = index + 1; + + pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); + pl->sclk |= clock_info->si.ucEngineClockHigh << 16; + pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); + pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; + + pl->vddc = le16_to_cpu(clock_info->si.usVDDC); + pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); + pl->flags = le32_to_cpu(clock_info->si.ulFlags); + pl->pcie_gen = r600_get_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); + + /* patch up vddc if necessary */ + ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, + &leakage_voltage); + if (ret == 0) + pl->vddc = leakage_voltage; + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { + pi->acpi_vddc = pl->vddc; + eg_pi->acpi_vddci = pl->vddci; + si_pi->acpi_pcie_gen = pl->pcie_gen; + } + + if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && + index == 0) { + /* XXX disable for A0 tahiti */ + si_pi->ulv.supported = false; + si_pi->ulv.pl = *pl; + si_pi->ulv.one_pcie_lane_in_ulv = false; + si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; + si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; + si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; + } + + if (pi->min_vddc_in_table > pl->vddc) + pi->min_vddc_in_table = pl->vddc; + + if (pi->max_vddc_in_table < pl->vddc) + pi->max_vddc_in_table = pl->vddc; + + /* patch up boot state */ + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + u16 vddc, vddci, mvdd; + amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); + pl->mclk = adev->clock.default_mclk; + pl->sclk = adev->clock.default_sclk; + pl->vddc = vddc; + pl->vddci = vddci; + si_pi->mvdd_bootup_value = mvdd; + } + + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; + } +} + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static int si_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct si_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + amdgpu_add_thermal_controller(adev); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * + state_array->ucNumEntries, GFP_KERNEL); + if (!adev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + adev->pm.dpm.ps[i].ps_priv = ps; + si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = idx[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); + si_parse_pplib_clock_info(adev, + &adev->pm.dpm.ps[i], k, + clock_info); + k++; + } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { + u32 sclk, mclk; + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->si.usEngineClockLow); + sclk |= clock_info->si.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); + mclk |= clock_info->si.ucMemoryClockHigh << 16; + adev->pm.dpm.vce_states[i].sclk = sclk; + adev->pm.dpm.vce_states[i].mclk = mclk; + } + + return 0; +} + +static int si_dpm_init(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi; + struct evergreen_power_info *eg_pi; + struct ni_power_info *ni_pi; + struct si_power_info *si_pi; + struct atom_clock_dividers dividers; + int ret; + u32 mask; + + si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); + if (si_pi == NULL) + return -ENOMEM; + adev->pm.dpm.priv = si_pi; + ni_pi = &si_pi->ni; + eg_pi = &ni_pi->eg; + pi = &eg_pi->rv7xx; + + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (ret) + si_pi->sys_pcie_mask = 0; + else + si_pi->sys_pcie_mask = mask; + si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); + + si_set_max_cu_value(adev); + + rv770_get_max_vddc(adev); + si_get_leakage_vddc(adev); + si_patch_dependency_tables_based_on_leakage(adev); + + pi->acpi_vddc = 0; + eg_pi->acpi_vddci = 0; + pi->min_vddc_in_table = 0; + pi->max_vddc_in_table = 0; + + ret = amdgpu_get_platform_caps(adev); + if (ret) + return ret; + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) + return ret; + + ret = si_parse_power_table(adev); + if (ret) + return ret; + + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = + kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; + + if (adev->pm.dpm.voltage_response_time == 0) + adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; + if (adev->pm.dpm.backbias_response_time == 0) + adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + 0, false, ÷rs); + if (ret) + pi->ref_div = dividers.ref_div + 1; + else + pi->ref_div = R600_REFERENCEDIVIDER_DFLT; + + eg_pi->smu_uvd_hs = false; + + pi->mclk_strobe_mode_threshold = 40000; + if (si_is_special_1gb_platform(adev)) + pi->mclk_stutter_mode_threshold = 0; + else + pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; + pi->mclk_edc_enable_threshold = 40000; + eg_pi->mclk_edc_wr_enable_threshold = 40000; + + ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; + + pi->voltage_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_GPIO_LUT); + if (!pi->voltage_control) { + si_pi->voltage_control_svi2 = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_SVID2); + if (si_pi->voltage_control_svi2) + amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); + } + + pi->mvdd_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, + VOLTAGE_OBJ_GPIO_LUT); + + eg_pi->vddci_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_GPIO_LUT); + if (!eg_pi->vddci_control) + si_pi->vddci_control_svi2 = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_SVID2); + + si_pi->vddc_phase_shed_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_PHASE_LUT); + + rv770_get_engine_memory_ss(adev); + + pi->asi = RV770_ASI_DFLT; + pi->pasi = CYPRESS_HASI_DFLT; + pi->vrc = SISLANDS_VRC_DFLT; + + pi->gfx_clock_gating = true; + + eg_pi->sclk_deep_sleep = true; + si_pi->sclk_deep_sleep_above_low = false; + + if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) + pi->thermal_protection = true; + else + pi->thermal_protection = false; + + eg_pi->dynamic_ac_timing = true; + + eg_pi->light_sleep = true; +#if defined(CONFIG_ACPI) + eg_pi->pcie_performance_request = + amdgpu_acpi_is_pcie_performance_request_supported(adev); +#else + eg_pi->pcie_performance_request = false; +#endif + + si_pi->sram_end = SMC_RAM_END; + + adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; + adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; + adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; + adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; + adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; + + si_initialize_powertune_defaults(adev); + + /* make sure dc limits are valid */ + if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + si_pi->fan_ctrl_is_in_default_mode = true; + + return 0; +} + +static void si_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + if (adev->pm.dpm.ps) + for (i = 0; i < adev->pm.dpm.num_ps; i++) + kfree(adev->pm.dpm.ps[i].ps_priv); + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); + amdgpu_free_extended_power_table(adev); +} + +static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *rps = &eg_pi->current_rps; + struct si_ps *ps = si_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + pl = &ps->performance_levels[current_index]; + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); + } +} + +static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cg_thermal_int; + + switch (type) { + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int |= THERM_INT_MASK_HIGH; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int &= ~THERM_INT_MASK_HIGH; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int |= THERM_INT_MASK_LOW; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int &= ~THERM_INT_MASK_LOW; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + default: + break; + } + return 0; +} + +static int si_dpm_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + bool queue_thermal = false; + + if (entry == NULL) + return -EINVAL; + + switch (entry->src_id) { + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + adev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + adev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + default: + break; + } + + if (queue_thermal) + schedule_work(&adev->pm.dpm.thermal.work); + + return 0; +} + +static int si_dpm_late_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!amdgpu_dpm) + return 0; + + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + + ret = si_set_temperature_range(adev); + if (ret) + return ret; +#if 0 //TODO ? + si_dpm_powergate_uvd(adev, true); +#endif + return 0; +} + +/** + * si_dpm_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int si_dpm_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_name = "tahiti"; + break; + case CHIP_PITCAIRN: + if ((adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811) || + (adev->pdev->device == 0x6816) || + (adev->pdev->device == 0x6817) || + (adev->pdev->device == 0x6806)) + chip_name = "pitcairn_k"; + else + chip_name = "pitcairn"; + break; + case CHIP_VERDE: + if ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87) || + (adev->pdev->device == 0x6820) || + (adev->pdev->device == 0x6821) || + (adev->pdev->device == 0x6822) || + (adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682A) || + (adev->pdev->device == 0x682B)) + chip_name = "verde_k"; + else + chip_name = "verde"; + break; + case CHIP_OLAND: + if ((adev->pdev->revision == 0xC7) || + (adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605)) + chip_name = "oland_k"; + else + chip_name = "oland"; + break; + case CHIP_HAINAN: + if ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6664) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)) + chip_name = "hainan_k"; + else + chip_name = "hainan"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", + err, fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; + +} + +static int si_dpm_sw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + ret = si_dpm_init_microcode(adev); + if (ret) + return ret; + + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); + mutex_lock(&adev->pm.mutex); + ret = si_dpm_init(adev); + if (ret) + goto dpm_failed; + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_failed: + si_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + return ret; +} + +static int si_dpm_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + mutex_lock(&adev->pm.mutex); + amdgpu_pm_sysfs_fini(adev); + si_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int si_dpm_hw_init(void *handle) +{ + int ret; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + si_dpm_setup_asic(adev); + ret = si_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + + return ret; +} + +static int si_dpm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + si_dpm_disable(adev); + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int si_dpm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + /* disable dpm */ + si_dpm_disable(adev); + /* reset the power state */ + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); + } + return 0; +} + +static int si_dpm_resume(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); + si_dpm_setup_asic(adev); + ret = si_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) + amdgpu_pm_compute_clocks(adev); + } + return 0; +} + +static bool si_dpm_is_idle(void *handle) +{ + /* XXX */ + return true; +} + +static int si_dpm_wait_for_idle(void *handle) +{ + /* XXX */ + return 0; +} + +static int si_dpm_soft_reset(void *handle) +{ + return 0; +} + +static int si_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int si_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +/* get temperature in millidegrees */ +static int si_dpm_get_temp(struct amdgpu_device *adev) +{ + u32 temp; + int actual_temp = 0; + + temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> + CTF_TEMP_SHIFT; + + if (temp & 0x200) + actual_temp = 255; + else + actual_temp = temp & 0x1ff; + + actual_temp = (actual_temp * 1000); + + return actual_temp; +} + +static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].sclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; +} + +static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].mclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; +} + +static void si_dpm_print_power_state(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *ps = si_get_ps(rps); + struct rv7xx_pl *pl; + int i; + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->performance_level_count; i++) { + pl = &ps->performance_levels[i]; + if (adev->asic_type >= CHIP_TAHITI) + DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); + else + DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } + amdgpu_dpm_print_ps_status(adev, rps); +} + +static int si_dpm_early_init(void *handle) +{ + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_dpm_set_dpm_funcs(adev); + si_dpm_set_irq_funcs(adev); + return 0; +} + + +const struct amd_ip_funcs si_dpm_ip_funcs = { + .name = "si_dpm", + .early_init = si_dpm_early_init, + .late_init = si_dpm_late_init, + .sw_init = si_dpm_sw_init, + .sw_fini = si_dpm_sw_fini, + .hw_init = si_dpm_hw_init, + .hw_fini = si_dpm_hw_fini, + .suspend = si_dpm_suspend, + .resume = si_dpm_resume, + .is_idle = si_dpm_is_idle, + .wait_for_idle = si_dpm_wait_for_idle, + .soft_reset = si_dpm_soft_reset, + .set_clockgating_state = si_dpm_set_clockgating_state, + .set_powergating_state = si_dpm_set_powergating_state, +}; + +static const struct amdgpu_dpm_funcs si_dpm_funcs = { + .get_temperature = &si_dpm_get_temp, + .pre_set_power_state = &si_dpm_pre_set_power_state, + .set_power_state = &si_dpm_set_power_state, + .post_set_power_state = &si_dpm_post_set_power_state, + .display_configuration_changed = &si_dpm_display_configuration_changed, + .get_sclk = &si_dpm_get_sclk, + .get_mclk = &si_dpm_get_mclk, + .print_power_state = &si_dpm_print_power_state, + .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, + .force_performance_level = &si_dpm_force_performance_level, + .vblank_too_short = &si_dpm_vblank_too_short, + .set_fan_control_mode = &si_dpm_set_fan_control_mode, + .get_fan_control_mode = &si_dpm_get_fan_control_mode, + .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, + .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, +}; + +static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) +{ + if (adev->pm.funcs == NULL) + adev->pm.funcs = &si_dpm_funcs; +} + +static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { + .set = si_dpm_set_interrupt_state, + .process = si_dpm_process_interrupt, +}; + +static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; + adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h new file mode 100644 index 000000000000..51ce21c5f4fb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h @@ -0,0 +1,1015 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SI_DPM_H__ +#define __SI_DPM_H__ + +#include "amdgpu_atombios.h" +#include "sislands_smc.h" + +#define MC_CG_CONFIG 0x96f +#define MC_ARB_CG 0x9fa +#define CG_ARB_REQ(x) ((x) << 0) +#define CG_ARB_REQ_MASK (0xff << 0) + +#define MC_ARB_DRAM_TIMING_1 0x9fc +#define MC_ARB_DRAM_TIMING_2 0x9fd +#define MC_ARB_DRAM_TIMING_3 0x9fe +#define MC_ARB_DRAM_TIMING2_1 0x9ff +#define MC_ARB_DRAM_TIMING2_2 0xa00 +#define MC_ARB_DRAM_TIMING2_3 0xa01 + +#define MAX_NO_OF_MVDD_VALUES 2 +#define MAX_NO_VREG_STEPS 32 +#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 +#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 +#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 +#define RV770_ASI_DFLT 1000 +#define CYPRESS_HASI_DFLT 400000 +#define PCIE_PERF_REQ_PECI_GEN1 2 +#define PCIE_PERF_REQ_PECI_GEN2 3 +#define PCIE_PERF_REQ_PECI_GEN3 4 +#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ +#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ + +#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 + +#define RV770_SMC_TABLE_ADDRESS 0xB000 +#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 + +#define SMC_STROBE_RATIO 0x0F +#define SMC_STROBE_ENABLE 0x10 + +#define SMC_MC_EDC_RD_FLAG 0x01 +#define SMC_MC_EDC_WR_FLAG 0x02 +#define SMC_MC_RTT_ENABLE 0x04 +#define SMC_MC_STUTTER_EN 0x08 + +#define RV770_SMC_VOLTAGEMASK_VDDC 0 +#define RV770_SMC_VOLTAGEMASK_MVDD 1 +#define RV770_SMC_VOLTAGEMASK_VDDCI 2 +#define RV770_SMC_VOLTAGEMASK_MAX 4 + +#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 +#define NISLANDS_SMC_STROBE_RATIO 0x0F +#define NISLANDS_SMC_STROBE_ENABLE 0x10 + +#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 +#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 +#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 +#define NISLANDS_SMC_MC_STUTTER_EN 0x08 + +#define MAX_NO_VREG_STEPS 32 + +#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 +#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 +#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 +#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 + +#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 +#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 +#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 +#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 + +#define SISLANDS_LEAKAGE_INDEX0 0xff01 +#define SISLANDS_MAX_LEAKAGE_COUNT 4 + +#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5 +#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 +#define SISLANDS_ACPI_STATE_ARB_INDEX 1 +#define SISLANDS_ULV_STATE_ARB_INDEX 2 +#define SISLANDS_DRIVER_STATE_ARB_INDEX 3 + +#define SISLANDS_DPM2_MAX_PULSE_SKIP 256 + +#define SISLANDS_DPM2_NEAR_TDP_DEC 10 +#define SISLANDS_DPM2_ABOVE_SAFE_INC 5 +#define SISLANDS_DPM2_BELOW_SAFE_INC 20 + +#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 + +#define SISLANDS_DPM2_MAXPS_PERCENT_H 99 +#define SISLANDS_DPM2_MAXPS_PERCENT_M 99 + +#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E +#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF + +#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10 + +#define SISLANDS_VRC_DFLT 0xC000B3 +#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687 +#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035 +#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550 + +#define SI_ASI_DFLT 10000 +#define SI_BSP_DFLT 0x41EB +#define SI_BSU_DFLT 0x2 +#define SI_AH_DFLT 5 +#define SI_RLP_DFLT 25 +#define SI_RMP_DFLT 65 +#define SI_LHP_DFLT 40 +#define SI_LMP_DFLT 15 +#define SI_TD_DFLT 0 +#define SI_UTC_DFLT_00 0x24 +#define SI_UTC_DFLT_01 0x22 +#define SI_UTC_DFLT_02 0x22 +#define SI_UTC_DFLT_03 0x22 +#define SI_UTC_DFLT_04 0x22 +#define SI_UTC_DFLT_05 0x22 +#define SI_UTC_DFLT_06 0x22 +#define SI_UTC_DFLT_07 0x22 +#define SI_UTC_DFLT_08 0x22 +#define SI_UTC_DFLT_09 0x22 +#define SI_UTC_DFLT_10 0x22 +#define SI_UTC_DFLT_11 0x22 +#define SI_UTC_DFLT_12 0x22 +#define SI_UTC_DFLT_13 0x22 +#define SI_UTC_DFLT_14 0x22 +#define SI_DTC_DFLT_00 0x24 +#define SI_DTC_DFLT_01 0x22 +#define SI_DTC_DFLT_02 0x22 +#define SI_DTC_DFLT_03 0x22 +#define SI_DTC_DFLT_04 0x22 +#define SI_DTC_DFLT_05 0x22 +#define SI_DTC_DFLT_06 0x22 +#define SI_DTC_DFLT_07 0x22 +#define SI_DTC_DFLT_08 0x22 +#define SI_DTC_DFLT_09 0x22 +#define SI_DTC_DFLT_10 0x22 +#define SI_DTC_DFLT_11 0x22 +#define SI_DTC_DFLT_12 0x22 +#define SI_DTC_DFLT_13 0x22 +#define SI_DTC_DFLT_14 0x22 +#define SI_VRC_DFLT 0x0000C003 +#define SI_VOLTAGERESPONSETIME_DFLT 1000 +#define SI_BACKBIASRESPONSETIME_DFLT 1000 +#define SI_VRU_DFLT 0x3 +#define SI_SPLLSTEPTIME_DFLT 0x1000 +#define SI_SPLLSTEPUNIT_DFLT 0x3 +#define SI_TPU_DFLT 0 +#define SI_TPC_DFLT 0x200 +#define SI_SSTU_DFLT 0 +#define SI_SST_DFLT 0x00C8 +#define SI_GICST_DFLT 0x200 +#define SI_FCT_DFLT 0x0400 +#define SI_FCTU_DFLT 0 +#define SI_CTXCGTT3DRPHC_DFLT 0x20 +#define SI_CTXCGTT3DRSDC_DFLT 0x40 +#define SI_VDDC3DOORPHC_DFLT 0x100 +#define SI_VDDC3DOORSDC_DFLT 0x7 +#define SI_VDDC3DOORSU_DFLT 0 +#define SI_MPLLLOCKTIME_DFLT 100 +#define SI_MPLLRESETTIME_DFLT 150 +#define SI_VCOSTEPPCT_DFLT 20 +#define SI_ENDINGVCOSTEPPCT_DFLT 5 +#define SI_REFERENCEDIVIDER_DFLT 4 + +#define SI_PM_NUMBER_OF_TC 15 +#define SI_PM_NUMBER_OF_SCLKS 20 +#define SI_PM_NUMBER_OF_MCLKS 4 +#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4 +#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3 + +/* XXX are these ok? */ +#define SI_TEMP_RANGE_MIN (90 * 1000) +#define SI_TEMP_RANGE_MAX (120 * 1000) + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +enum ni_dc_cac_level +{ + NISLANDS_DCCAC_LEVEL_0 = 0, + NISLANDS_DCCAC_LEVEL_1, + NISLANDS_DCCAC_LEVEL_2, + NISLANDS_DCCAC_LEVEL_3, + NISLANDS_DCCAC_LEVEL_4, + NISLANDS_DCCAC_LEVEL_5, + NISLANDS_DCCAC_LEVEL_6, + NISLANDS_DCCAC_LEVEL_7, + NISLANDS_DCCAC_MAX_LEVELS +}; + +enum si_cac_config_reg_type +{ + SISLANDS_CACCONFIG_MMR = 0, + SISLANDS_CACCONFIG_CGIND, + SISLANDS_CACCONFIG_MAX +}; + +enum si_power_level { + SI_POWER_LEVEL_LOW = 0, + SI_POWER_LEVEL_MEDIUM = 1, + SI_POWER_LEVEL_HIGH = 2, + SI_POWER_LEVEL_CTXSW = 3, +}; + +enum si_td { + SI_TD_AUTO, + SI_TD_UP, + SI_TD_DOWN, +}; + +enum si_display_watermark { + SI_DISPLAY_WATERMARK_LOW = 0, + SI_DISPLAY_WATERMARK_HIGH = 1, +}; + +enum si_display_gap +{ + SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, + SI_PM_DISPLAY_GAP_VBLANK = 1, + SI_PM_DISPLAY_GAP_WATERMARK = 2, + SI_PM_DISPLAY_GAP_IGNORE = 3, +}; + +extern const struct amd_ip_funcs si_dpm_ip_funcs; + +struct ni_leakage_coeffients +{ + u32 at; + u32 bt; + u32 av; + u32 bv; + s32 t_slope; + s32 t_intercept; + u32 t_ref; +}; + +struct SMC_Evergreen_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; + +struct evergreen_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +struct evergreen_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +struct SMC_Evergreen_MCRegisterSet +{ + uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; + +struct SMC_Evergreen_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; + SMC_Evergreen_MCRegisterSet data[5]; +}; + +typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; + +struct SMC_NIslands_MCRegisterSet +{ + uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; + +struct ni_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct SMC_NIslands_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; + +struct SMC_NIslands_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; + SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; + +struct evergreen_ulv_param { + bool supported; + struct rv7xx_pl *pl; +}; + +struct evergreen_arb_registers { + u32 mc_arb_dram_timing; + u32 mc_arb_dram_timing2; + u32 mc_arb_rfsh_rate; + u32 mc_arb_burst_time; +}; + +struct at { + u32 rlp; + u32 rmp; + u32 lhp; + u32 lmp; +}; + +struct ni_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_ad_func_cntl_2; + u32 mpll_dq_func_cntl; + u32 mpll_dq_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct RV770_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; + +struct RV770_SMC_MCLK_VALUE +{ + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL_2; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL_2; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; + + +struct RV730_SMC_MCLK_VALUE +{ + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL2; + uint32_t vMPLL_FUNC_CNTL3; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; + +struct RV770_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t padding; +}; + +typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; + +union RV7XX_SMC_MCLK_VALUE +{ + RV770_SMC_MCLK_VALUE mclk770; + RV730_SMC_MCLK_VALUE mclk730; +}; + +typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; + +struct RV770_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t arbValue; + union{ + uint8_t seqValue; + uint8_t ACIndex; + }; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t gen2XSP; + uint8_t backbias; + uint8_t strobeMode; + uint8_t mcFlags; + uint32_t aT; + uint32_t bSP; + RV770_SMC_SCLK_VALUE sclk; + RV7XX_SMC_MCLK_VALUE mclk; + RV770_SMC_VOLTAGE_VALUE vddc; + RV770_SMC_VOLTAGE_VALUE mvdd; + RV770_SMC_VOLTAGE_VALUE vddci; + uint8_t reserved1; + uint8_t reserved2; + uint8_t stateFlags; + uint8_t padding; +}; + +typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; + +struct RV770_SMC_SWSTATE +{ + uint8_t flags; + uint8_t padding1; + uint8_t padding2; + uint8_t padding3; + RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; +}; + +typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; + +struct RV770_SMC_VOLTAGEMASKTABLE +{ + uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; + uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; + +struct RV770_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint8_t highSMIO[MAX_NO_VREG_STEPS]; + uint32_t lowSMIO[MAX_NO_VREG_STEPS]; + RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; + RV770_SMC_SWSTATE initialState; + RV770_SMC_SWSTATE ACPIState; + RV770_SMC_SWSTATE driverState; + RV770_SMC_SWSTATE ULVState; +}; + +typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; + +struct vddc_table_entry { + u16 vddc; + u8 vddc_index; + u8 high_smio; + u32 low_smio; +}; + +struct rv770_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mpll_ad_func_cntl; + u32 mpll_ad_func_cntl_2; + u32 mpll_dq_func_cntl; + u32 mpll_dq_func_cntl_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct rv730_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl2; + u32 mpll_func_cntl3; + u32 mpll_ss; + u32 mpll_ss2; +}; + +union r7xx_clock_registers { + struct rv770_clock_registers rv770; + struct rv730_clock_registers rv730; +}; + +struct rv7xx_power_info { + /* flags */ + bool mem_gddr5; + bool pcie_gen2; + bool dynamic_pcie_gen2; + bool acpi_pcie_gen2; + bool boot_in_gen2; + bool voltage_control; /* vddc */ + bool mvdd_control; + bool sclk_ss; + bool mclk_ss; + bool dynamic_ss; + bool gfx_clock_gating; + bool mg_clock_gating; + bool mgcgtssm; + bool power_gating; + bool thermal_protection; + bool display_gap; + bool dcodt; + bool ulps; + /* registers */ + union r7xx_clock_registers clk_regs; + u32 s0_vid_lower_smio_cntl; + /* voltage */ + u32 vddc_mask_low; + u32 mvdd_mask_low; + u32 mvdd_split_frequency; + u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; + u16 max_vddc; + u16 max_vddc_in_table; + u16 min_vddc_in_table; + struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; + u8 valid_vddc_entries; + /* dc odt */ + u32 mclk_odt_threshold; + u8 odt_value_0[2]; + u8 odt_value_1[2]; + /* stored values */ + u32 boot_sclk; + u16 acpi_vddc; + u32 ref_div; + u32 active_auto_throttle_sources; + u32 mclk_stutter_mode_threshold; + u32 mclk_strobe_mode_threshold; + u32 mclk_edc_enable_threshold; + u32 bsp; + u32 bsu; + u32 pbsp; + u32 pbsu; + u32 dsp; + u32 psp; + u32 asi; + u32 pasi; + u32 vrc; + u32 restricted_levels; + u32 rlp; + u32 rmp; + u32 lhp; + u32 lmp; + /* smc offsets */ + u16 state_table_start; + u16 soft_regs_start; + u16 sram_end; + /* scratch structs */ + RV770_SMC_STATETABLE smc_statetable; +}; + +struct rv7xx_pl { + u32 sclk; + u32 mclk; + u16 vddc; + u16 vddci; /* eg+ only */ + u32 flags; + enum amdgpu_pcie_gen pcie_gen; /* si+ only */ +}; + +struct rv7xx_ps { + struct rv7xx_pl high; + struct rv7xx_pl medium; + struct rv7xx_pl low; + bool dc_compatible; +}; + +struct si_ps { + u16 performance_level_count; + bool dc_compatible; + struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; +}; + +struct ni_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ni_cac_data +{ + struct ni_leakage_coeffients leakage_coefficients; + u32 i_leakage; + s32 leakage_minimum_temperature; + u32 pwr_const; + u32 dc_cac_value; + u32 bif_cac_value; + u32 lkge_pwr; + u8 mc_wr_weight; + u8 mc_rd_weight; + u8 allow_ovrflw; + u8 num_win_tdp; + u8 l2num_win_tdp; + u8 lts_truncate_n; +}; + +struct evergreen_power_info { + /* must be first! */ + struct rv7xx_power_info rv7xx; + /* flags */ + bool vddci_control; + bool dynamic_ac_timing; + bool abm; + bool mcls; + bool light_sleep; + bool memory_transition; + bool pcie_performance_request; + bool pcie_performance_request_registered; + bool sclk_deep_sleep; + bool dll_default_on; + bool ls_clock_gating; + bool smu_uvd_hs; + bool uvd_enabled; + /* stored values */ + u16 acpi_vddci; + u8 mvdd_high_index; + u8 mvdd_low_index; + u32 mclk_edc_wr_enable_threshold; + struct evergreen_mc_reg_table mc_reg_table; + struct atom_voltage_table vddc_voltage_table; + struct atom_voltage_table vddci_voltage_table; + struct evergreen_arb_registers bootup_arb_registers; + struct evergreen_ulv_param ulv; + struct at ats[2]; + /* smc offsets */ + u16 mc_reg_table_start; + struct amdgpu_ps current_rps; + struct rv7xx_ps current_ps; + struct amdgpu_ps requested_rps; + struct rv7xx_ps requested_ps; +}; + +struct PP_NIslands_Dpm2PerfLevel +{ + uint8_t MaxPS; + uint8_t TgtAct; + uint8_t MaxPS_StepInc; + uint8_t MaxPS_StepDec; + uint8_t PSST; + uint8_t NearTDPDec; + uint8_t AboveSafeInc; + uint8_t BelowSafeInc; + uint8_t PSDeltaLimit; + uint8_t PSDeltaWin; + uint8_t Reserved[6]; +}; + +typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; + +struct PP_NIslands_DPM2Parameters +{ + uint32_t TDPLimit; + uint32_t NearTDPLimit; + uint32_t SafePowerLimit; + uint32_t PowerBoostLimit; +}; +typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; + +struct NISLANDS_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; + +struct NISLANDS_SMC_MCLK_VALUE +{ + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL_2; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL_2; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; + +struct NISLANDS_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t padding; +}; + +typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; + +struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t arbValue; + uint8_t ACIndex; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t reserved1; + uint8_t reserved2; + uint8_t strobeMode; + uint8_t mcFlags; + uint32_t aT; + uint32_t bSP; + NISLANDS_SMC_SCLK_VALUE sclk; + NISLANDS_SMC_MCLK_VALUE mclk; + NISLANDS_SMC_VOLTAGE_VALUE vddc; + NISLANDS_SMC_VOLTAGE_VALUE mvdd; + NISLANDS_SMC_VOLTAGE_VALUE vddci; + NISLANDS_SMC_VOLTAGE_VALUE std_vddc; + uint32_t powergate_en; + uint8_t hUp; + uint8_t hDown; + uint8_t stateFlags; + uint8_t arbRefreshState; + uint32_t SQPowerThrottle; + uint32_t SQPowerThrottle_2; + uint32_t reserved[2]; + PP_NIslands_Dpm2PerfLevel dpm2; +}; + +typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; + +struct NISLANDS_SMC_SWSTATE +{ + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; +}; + +typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; + +struct NISLANDS_SMC_VOLTAGEMASKTABLE +{ + uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; + uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; + +#define NISLANDS_MAX_NO_VREG_STEPS 32 + +struct NISLANDS_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; + uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; + NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; + PP_NIslands_DPM2Parameters dpm2Params; + NISLANDS_SMC_SWSTATE initialState; + NISLANDS_SMC_SWSTATE ACPIState; + NISLANDS_SMC_SWSTATE ULVState; + NISLANDS_SMC_SWSTATE driverState; + NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; +}; + +typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; + +struct ni_power_info { + /* must be first! */ + struct evergreen_power_info eg; + struct ni_clock_registers clock_registers; + struct ni_mc_reg_table mc_reg_table; + u32 mclk_rtt_mode_threshold; + /* flags */ + bool use_power_boost_limit; + bool support_cac_long_term_average; + bool cac_enabled; + bool cac_configuration_required; + bool driver_calculate_cac_leakage; + bool pc_enabled; + bool enable_power_containment; + bool enable_cac; + bool enable_sq_ramping; + /* smc offsets */ + u16 arb_table_start; + u16 fan_table_start; + u16 cac_table_start; + u16 spll_table_start; + /* CAC stuff */ + struct ni_cac_data cac_data; + u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; + const struct ni_cac_weights *cac_weights; + u8 lta_window_size; + u8 lts_truncate; + struct si_ps current_ps; + struct si_ps requested_ps; + /* scratch structs */ + SMC_NIslands_MCRegisters smc_mc_reg_table; + NISLANDS_SMC_STATETABLE smc_statetable; +}; + +struct si_cac_config_reg +{ + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum si_cac_config_reg_type type; +}; + +struct si_powertune_data +{ + u32 cac_window; + u32 l2_lta_window_size_default; + u8 lts_truncate_default; + u8 shift_n_default; + u8 operating_temp; + struct ni_leakage_coeffients leakage_coefficients; + u32 fixed_kt; + u32 lkge_lut_v0_percent; + u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; + bool enable_powertune_by_default; +}; + +struct si_dyn_powertune_data +{ + u32 cac_leakage; + s32 leakage_minimum_temperature; + u32 wintime; + u32 l2_lta_window_size; + u8 lts_truncate; + u8 shift_n; + u8 dc_pwr_value; + bool disable_uvd_powertune; +}; + +struct si_dte_data +{ + u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + u32 k; + u32 t0; + u32 max_t; + u8 window_size; + u8 temp_select; + u8 dte_mode; + u8 tdep_count; + u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 t_threshold; + bool enable_dte_by_default; +}; + +struct si_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 dll_cntl; + u32 mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_dq_func_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl_1; + u32 mpll_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct si_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct si_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct si_leakage_voltage_entry +{ + u16 voltage; + u16 leakage_index; +}; + +struct si_leakage_voltage +{ + u16 count; + struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; +}; + + +struct si_ulv_param { + bool supported; + u32 cg_ulv_control; + u32 cg_ulv_parameter; + u32 volt_change_delay; + struct rv7xx_pl pl; + bool one_pcie_lane_in_ulv; +}; + +struct si_power_info { + /* must be first! */ + struct ni_power_info ni; + struct si_clock_registers clock_registers; + struct si_mc_reg_table mc_reg_table; + struct atom_voltage_table mvdd_voltage_table; + struct atom_voltage_table vddc_phase_shed_table; + struct si_leakage_voltage leakage_voltage; + u16 mvdd_bootup_value; + struct si_ulv_param ulv; + u32 max_cu; + /* pcie gen */ + enum amdgpu_pcie_gen force_pcie_gen; + enum amdgpu_pcie_gen boot_pcie_gen; + enum amdgpu_pcie_gen acpi_pcie_gen; + u32 sys_pcie_mask; + /* flags */ + bool enable_dte; + bool enable_ppm; + bool vddc_phase_shed_control; + bool pspp_notify_required; + bool sclk_deep_sleep_above_low; + bool voltage_control_svi2; + bool vddci_control_svi2; + /* smc offsets */ + u32 sram_end; + u32 state_table_start; + u32 soft_regs_start; + u32 mc_reg_table_start; + u32 arb_table_start; + u32 cac_table_start; + u32 dte_table_start; + u32 spll_table_start; + u32 papm_cfg_table_start; + u32 fan_table_start; + /* CAC stuff */ + const struct si_cac_config_reg *cac_weights; + const struct si_cac_config_reg *lcac_config; + const struct si_cac_config_reg *cac_override; + const struct si_powertune_data *powertune_data; + struct si_dyn_powertune_data dyn_powertune_data; + /* DTE stuff */ + struct si_dte_data dte_data; + /* scratch structs */ + SMC_SIslands_MCRegisters smc_mc_reg_table; + SISLANDS_SMC_STATETABLE smc_statetable; + PP_SIslands_PAPMParameters papm_parm; + /* SVI2 */ + u8 svd_gpio_id; + u8 svc_gpio_id; + /* fan control */ + bool fan_ctrl_is_in_default_mode; + u32 t_min; + u32 fan_ctrl_default_mode; + bool fan_is_controlled_by_smc; +}; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c new file mode 100644 index 000000000000..8fae3d4a2360 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -0,0 +1,299 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_ih.h" +#include "si/sid.h" +#include "si_ih.h" + +static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev); + +static void si_ih_enable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_cntl = RREG32(IH_CNTL); + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + + ih_cntl |= ENABLE_INTR; + ih_rb_cntl |= IH_RB_ENABLE; + WREG32(IH_CNTL, ih_cntl); + WREG32(IH_RB_CNTL, ih_rb_cntl); + adev->irq.ih.enabled = true; +} + +static void si_ih_disable_interrupts(struct amdgpu_device *adev) +{ + u32 ih_rb_cntl = RREG32(IH_RB_CNTL); + u32 ih_cntl = RREG32(IH_CNTL); + + ih_rb_cntl &= ~IH_RB_ENABLE; + ih_cntl &= ~ENABLE_INTR; + WREG32(IH_RB_CNTL, ih_rb_cntl); + WREG32(IH_CNTL, ih_cntl); + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + adev->irq.ih.enabled = false; + adev->irq.ih.rptr = 0; +} + +static int si_ih_irq_init(struct amdgpu_device *adev) +{ + int rb_bufsz; + u32 interrupt_cntl, ih_cntl, ih_rb_cntl; + u64 wptr_off; + + si_ih_disable_interrupts(adev); + WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + interrupt_cntl = RREG32(INTERRUPT_CNTL); + interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; + interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; + WREG32(INTERRUPT_CNTL, interrupt_cntl); + + WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); + + ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE | + IH_WPTR_OVERFLOW_CLEAR | + (rb_bufsz << 1) | + IH_WPTR_WRITEBACK_ENABLE; + + wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); + WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); + WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(IH_RB_CNTL, ih_rb_cntl); + WREG32(IH_RB_RPTR, 0); + WREG32(IH_RB_WPTR, 0); + + ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0); + if (adev->irq.msi_enabled) + ih_cntl |= RPTR_REARM; + WREG32(IH_CNTL, ih_cntl); + + pci_set_master(adev->pdev); + si_ih_enable_interrupts(adev); + + return 0; +} + +static void si_ih_irq_disable(struct amdgpu_device *adev) +{ + si_ih_disable_interrupts(adev); + mdelay(1); +} + +static u32 si_ih_get_wptr(struct amdgpu_device *adev) +{ + u32 wptr, tmp; + + wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + + if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { + wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; + dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); + adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + tmp = RREG32(IH_RB_CNTL); + tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(IH_RB_CNTL, tmp); + } + return (wptr & adev->irq.ih.ptr_mask); +} + +static void si_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + u32 ring_index = adev->irq.ih.rptr >> 2; + uint32_t dw[4]; + + dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); + dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); + dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); + dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + + entry->src_id = dw[0] & 0xff; + entry->src_data = dw[1] & 0xfffffff; + entry->ring_id = dw[2] & 0xff; + entry->vm_id = (dw[2] >> 8) & 0xff; + + adev->irq.ih.rptr += 16; +} + +static void si_ih_set_rptr(struct amdgpu_device *adev) +{ + WREG32(IH_RB_RPTR, adev->irq.ih.rptr); +} + +static int si_ih_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_ih_set_interrupt_funcs(adev); + + return 0; +} + +static int si_ih_sw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + if (r) + return r; + + return amdgpu_irq_init(adev); +} + +static int si_ih_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev); + + return 0; +} + +static int si_ih_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_ih_irq_init(adev); +} + +static int si_ih_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_ih_irq_disable(adev); + + return 0; +} + +static int si_ih_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_ih_hw_fini(adev); +} + +static int si_ih_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return si_ih_hw_init(adev); +} + +static bool si_ih_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 tmp = RREG32(SRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + return false; + + return true; +} + +static int si_ih_wait_for_idle(void *handle) +{ + unsigned i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (si_ih_is_idle(handle)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int si_ih_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(SRBM_STATUS); + + if (tmp & SRBM_STATUS__IH_BUSY_MASK) + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK; + + if (srbm_soft_reset) { + tmp = RREG32(SRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + + udelay(50); + } + + return 0; +} + +static int si_ih_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int si_ih_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs si_ih_ip_funcs = { + .name = "si_ih", + .early_init = si_ih_early_init, + .late_init = NULL, + .sw_init = si_ih_sw_init, + .sw_fini = si_ih_sw_fini, + .hw_init = si_ih_hw_init, + .hw_fini = si_ih_hw_fini, + .suspend = si_ih_suspend, + .resume = si_ih_resume, + .is_idle = si_ih_is_idle, + .wait_for_idle = si_ih_wait_for_idle, + .soft_reset = si_ih_soft_reset, + .set_clockgating_state = si_ih_set_clockgating_state, + .set_powergating_state = si_ih_set_powergating_state, +}; + +static const struct amdgpu_ih_funcs si_ih_funcs = { + .get_wptr = si_ih_get_wptr, + .decode_iv = si_ih_decode_iv, + .set_rptr = si_ih_set_rptr +}; + +static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) +{ + if (adev->irq.ih_funcs == NULL) + adev->irq.ih_funcs = &si_ih_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h new file mode 100644 index 000000000000..f3e3a954369c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h @@ -0,0 +1,29 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SI_IH_H__ +#define __SI_IH_H__ + +extern const struct amd_ip_funcs si_ih_ip_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c new file mode 100644 index 000000000000..668ba99d6c05 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c @@ -0,0 +1,273 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include <linux/firmware.h> +#include "drmP.h" +#include "amdgpu.h" +#include "si/sid.h" +#include "ppsmc.h" +#include "amdgpu_ucode.h" +#include "sislands_smc.h" + +static int si_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(SMC_IND_INDEX_0, smc_address); + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + + return 0; +} + +int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + unsigned long flags; + int ret = 0; + u32 data, original_data, addr, extra_shift; + + if (smc_start_address & 3) + return -EINVAL; + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + original_data = RREG32(SMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* SMC address space is BE */ + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(SMC_IND_DATA_0, data); + } + +done: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +void amdgpu_si_start_smc(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + + tmp &= ~RST_REG; + + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +void amdgpu_si_reset_smc(struct amdgpu_device *adev) +{ + u32 tmp; + + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | + RST_REG; + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev) +{ + static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; + + return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); +} + +void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + if (enable) + tmp &= ~CK_DISABLE; + else + tmp |= CK_DISABLE; + + WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +bool amdgpu_si_is_smc_running(struct amdgpu_device *adev) +{ + u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + if (!(rst & RST_REG) && !(clk & CK_DISABLE)) + return true; + + return false; +} + +PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + u32 tmp; + int i; + + if (!amdgpu_si_is_smc_running(adev)) + return PPSMC_Result_Failed; + + WREG32(SMC_MESSAGE_0, msg); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(SMC_RESP_0); + if (tmp != 0) + break; + udelay(1); + } + + return (PPSMC_Result)RREG32(SMC_RESP_0); +} + +PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + u32 tmp; + int i; + + if (!amdgpu_si_is_smc_running(adev)) + return PPSMC_Result_OK; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + if ((tmp & CKEN) == 0) + break; + udelay(1); + } + + return PPSMC_Result_OK; +} + +int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) +{ + const struct smc_firmware_header_v1_0 *hdr; + unsigned long flags; + u32 ucode_start_address; + u32 ucode_size; + const u8 *src; + u32 data; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + src = (const u8 *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (ucode_size & 3) + return -EINVAL; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(SMC_IND_INDEX_0, ucode_start_address); + WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); + while (ucode_size >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + ucode_size -= 4; + } + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = si_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + *value = RREG32(SMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = si_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + WREG32(SMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h new file mode 100644 index 000000000000..d2930eceaf3c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h @@ -0,0 +1,423 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_SISLANDS_SMC_H +#define PP_SISLANDS_SMC_H + +#include "ppsmc.h" + +#pragma pack(push, 1) + +#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 + +struct PP_SIslands_Dpm2PerfLevel +{ + uint8_t MaxPS; + uint8_t TgtAct; + uint8_t MaxPS_StepInc; + uint8_t MaxPS_StepDec; + uint8_t PSSamplingTime; + uint8_t NearTDPDec; + uint8_t AboveSafeInc; + uint8_t BelowSafeInc; + uint8_t PSDeltaLimit; + uint8_t PSDeltaWin; + uint16_t PwrEfficiencyRatio; + uint8_t Reserved[4]; +}; + +typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel; + +struct PP_SIslands_DPM2Status +{ + uint32_t dpm2Flags; + uint8_t CurrPSkip; + uint8_t CurrPSkipPowerShift; + uint8_t CurrPSkipTDP; + uint8_t CurrPSkipOCP; + uint8_t MaxSPLLIndex; + uint8_t MinSPLLIndex; + uint8_t CurrSPLLIndex; + uint8_t InfSweepMode; + uint8_t InfSweepDir; + uint8_t TDPexceeded; + uint8_t reserved; + uint8_t SwitchDownThreshold; + uint32_t SwitchDownCounter; + uint32_t SysScalingFactor; +}; + +typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status; + +struct PP_SIslands_DPM2Parameters +{ + uint32_t TDPLimit; + uint32_t NearTDPLimit; + uint32_t SafePowerLimit; + uint32_t PowerBoostLimit; + uint32_t MinLimitDelta; +}; +typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters; + +struct PP_SIslands_PAPMStatus +{ + uint32_t EstimatedDGPU_T; + uint32_t EstimatedDGPU_P; + uint32_t EstimatedAPU_T; + uint32_t EstimatedAPU_P; + uint8_t dGPU_T_Limit_Exceeded; + uint8_t reserved[3]; +}; +typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; + +struct PP_SIslands_PAPMParameters +{ + uint32_t NearTDPLimitTherm; + uint32_t NearTDPLimitPAPM; + uint32_t PlatformPowerLimit; + uint32_t dGPU_T_Limit; + uint32_t dGPU_T_Warning; + uint32_t dGPU_T_Hysteresis; +}; +typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; + +struct SISLANDS_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; + +struct SISLANDS_SMC_MCLK_VALUE +{ + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; + +struct SISLANDS_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t phase_settings; +}; + +typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; + +struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t ACIndex; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t UVDWatermark; + uint8_t VCEWatermark; + uint8_t strobeMode; + uint8_t mcFlags; + uint8_t padding; + uint32_t aT; + uint32_t bSP; + SISLANDS_SMC_SCLK_VALUE sclk; + SISLANDS_SMC_MCLK_VALUE mclk; + SISLANDS_SMC_VOLTAGE_VALUE vddc; + SISLANDS_SMC_VOLTAGE_VALUE mvdd; + SISLANDS_SMC_VOLTAGE_VALUE vddci; + SISLANDS_SMC_VOLTAGE_VALUE std_vddc; + uint8_t hysteresisUp; + uint8_t hysteresisDown; + uint8_t stateFlags; + uint8_t arbRefreshState; + uint32_t SQPowerThrottle; + uint32_t SQPowerThrottle_2; + uint32_t MaxPoweredUpCU; + SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc; + SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc; + uint32_t reserved[2]; + PP_SIslands_Dpm2PerfLevel dpm2; +}; + +#define SISLANDS_SMC_STROBE_RATIO 0x0F +#define SISLANDS_SMC_STROBE_ENABLE 0x10 + +#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01 +#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02 +#define SISLANDS_SMC_MC_RTT_ENABLE 0x04 +#define SISLANDS_SMC_MC_STUTTER_EN 0x08 +#define SISLANDS_SMC_MC_PG_EN 0x10 + +typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; + +struct SISLANDS_SMC_SWSTATE +{ + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1]; +}; + +typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; + +#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 +#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 +#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 +#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 +#define SISLANDS_SMC_VOLTAGEMASK_MAX 4 + +struct SISLANDS_SMC_VOLTAGEMASKTABLE +{ + uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; + +#define SISLANDS_MAX_NO_VREG_STEPS 32 + +struct SISLANDS_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; + SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; + SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; + PP_SIslands_DPM2Parameters dpm2Params; + SISLANDS_SMC_SWSTATE initialState; + SISLANDS_SMC_SWSTATE ACPIState; + SISLANDS_SMC_SWSTATE ULVState; + SISLANDS_SMC_SWSTATE driverState; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; +}; + +typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; + +#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0 +#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC +#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28 +#define SI_SMC_SOFT_REGISTER_seq_index 0x5C +#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60 +#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70 +#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78 +#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88 +#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C +#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98 +#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8 +#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4 +#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8 +#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC +#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 +#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC +#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 +#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118 +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 + +struct PP_SIslands_FanTable +{ + uint8_t fdo_mode; + uint8_t padding; + int16_t temp_min; + int16_t temp_med; + int16_t temp_max; + int16_t slope1; + int16_t slope2; + int16_t fdo_min; + int16_t hys_up; + int16_t hys_down; + int16_t hys_slope; + int16_t temp_resp_lim; + int16_t temp_curr; + int16_t slope_curr; + int16_t pwm_curr; + uint32_t refresh_period; + int16_t fdo_max; + uint8_t temp_src; + int8_t padding2; +}; + +typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; + +#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 + +#define SMC_SISLANDS_SCALE_I 7 +#define SMC_SISLANDS_SCALE_R 12 + +struct PP_SIslands_CacConfig +{ + uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; + uint32_t lkge_lut_V0; + uint32_t lkge_lut_Vstep; + uint32_t WinTime; + uint32_t R_LL; + uint32_t calculation_repeats; + uint32_t l2numWin_TDP; + uint32_t dc_cac; + uint8_t lts_truncate_n; + uint8_t SHIFT_N; + uint8_t log2_PG_LKG_SCALE; + uint8_t cac_temp; + uint32_t lkge_lut_T0; + uint32_t lkge_lut_Tstep; +}; + +typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; + +#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 +#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 + +struct SMC_SIslands_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; + +struct SMC_SIslands_MCRegisterSet +{ + uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; + +struct SMC_SIslands_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; + SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; + +struct SMC_SIslands_MCArbDramTimingRegisterSet +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint8_t mc_arb_rfsh_rate; + uint8_t mc_arb_burst_time; + uint8_t padding[2]; +}; + +typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; + +struct SMC_SIslands_MCArbDramTimingRegisters +{ + uint8_t arb_current; + uint8_t reserved[3]; + SMC_SIslands_MCArbDramTimingRegisterSet data[16]; +}; + +typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; + +struct SMC_SISLANDS_SPLL_DIV_TABLE +{ + uint32_t freq[256]; + uint32_t ss[256]; +}; + +#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff +#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0 +#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000 +#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20 + +typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; + +#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5 + +#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 + +struct Smc_SIslands_DTE_Configuration +{ + uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + uint32_t K; + uint32_t T0; + uint32_t MaxT; + uint8_t WindowSize; + uint8_t Tdep_count; + uint8_t temp_select; + uint8_t DTE_mode; + uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tthreshold; +}; + +typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; + +#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1 + +#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000 + +#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0 +#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4 +#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC +#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10 +#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14 +#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18 +#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24 +#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30 +#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38 +#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40 +#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48 + +#pragma pack(pop) + +int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); +void amdgpu_si_start_smc(struct amdgpu_device *adev); +void amdgpu_si_reset_smc(struct amdgpu_device *adev); +int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev); +void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable); +bool amdgpu_si_is_smc_running(struct amdgpu_device *adev); +PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); +PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev); +int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit); +int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit); +int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 value, u32 limit); + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c deleted file mode 100644 index f06f6f4dc3a8..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "tonga_smum.h" - -MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); - -static void tonga_dpm_set_funcs(struct amdgpu_device *adev); - -static int tonga_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - tonga_dpm_set_funcs(adev); - - return 0; -} - -static int tonga_dpm_init_microcode(struct amdgpu_device *adev) -{ - char fw_name[30] = "amdgpu/tonga_smc.bin"; - int err; - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int tonga_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = tonga_dpm_init_microcode(adev); - if (ret) - return ret; - - return 0; -} - -static int tonga_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int tonga_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = tonga_smu_init(adev); - if (ret) { - DRM_ERROR("SMU initialization failed\n"); - goto fail; - } - - ret = tonga_smu_start(adev); - if (ret) { - DRM_ERROR("SMU start failed\n"); - goto fail; - } - - mutex_unlock(&adev->pm.mutex); - return 0; - -fail: - adev->firmware.smu_load = false; - mutex_unlock(&adev->pm.mutex); - return -EINVAL; -} - -static int tonga_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - tonga_smu_fini(adev); - mutex_unlock(&adev->pm.mutex); - return 0; -} - -static int tonga_dpm_suspend(void *handle) -{ - return tonga_dpm_hw_fini(handle); -} - -static int tonga_dpm_resume(void *handle) -{ - return tonga_dpm_hw_init(handle); -} - -static int tonga_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int tonga_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -const struct amd_ip_funcs tonga_dpm_ip_funcs = { - .name = "tonga_dpm", - .early_init = tonga_dpm_early_init, - .late_init = NULL, - .sw_init = tonga_dpm_sw_init, - .sw_fini = tonga_dpm_sw_fini, - .hw_init = tonga_dpm_hw_init, - .hw_fini = tonga_dpm_hw_fini, - .suspend = tonga_dpm_suspend, - .resume = tonga_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = tonga_dpm_set_clockgating_state, - .set_powergating_state = tonga_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs tonga_dpm_funcs = { - .get_temperature = NULL, - .pre_set_power_state = NULL, - .set_power_state = NULL, - .post_set_power_state = NULL, - .display_configuration_changed = NULL, - .get_sclk = NULL, - .get_mclk = NULL, - .print_power_state = NULL, - .debugfs_print_current_performance_level = NULL, - .force_performance_level = NULL, - .vblank_too_short = NULL, - .powergate_uvd = NULL, -}; - -static void tonga_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &tonga_dpm_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index c92055805a45..d127d59f953a 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; + adev->irq.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; + adev->irq.srbm_soft_reset = 0; + } + + return 0; +} + +static int tonga_ih_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_fini(adev); +} + +static int tonga_ih_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_init(adev); +} + +static int tonga_ih_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + srbm_soft_reset = adev->irq.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = { .resume = tonga_ih_resume, .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, + .check_soft_reset = tonga_ih_check_soft_reset, + .pre_soft_reset = tonga_ih_pre_soft_reset, .soft_reset = tonga_ih_soft_reset, + .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c deleted file mode 100644 index 940de1836f8f..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ /dev/null @@ -1,862 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "tonga_ppsmc.h" -#include "tonga_smum.h" -#include "smu_ucode_xfer_vi.h" -#include "amdgpu_ucode.h" - -#include "smu/smu_7_1_2_d.h" -#include "smu/smu_7_1_2_sh_mask.h" - -#define TONGA_SMC_SIZE 0x20000 - -static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) -{ - uint32_t val; - - if (smc_address & 3) - return -EINVAL; - - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - return 0; -} - -static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) -{ - uint32_t addr; - uint32_t data, orig_data; - int result = 0; - uint32_t extra_shift; - unsigned long flags; - - if (smc_start_address & 3) - return -EINVAL; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* Bytes are written into the SMC addres space with the MSB first */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - result = tonga_set_smc_sram_address(adev, addr, limit); - - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - if (0 != byte_count) { - /* Now write odd bytes left, do a read modify write cycle */ - data = 0; - - result = tonga_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - orig_data = RREG32(mmSMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (orig_data & ~((~0UL) << extra_shift)); - - result = tonga_set_smc_sram_address(adev, addr, limit); - if (result) - goto out; - - WREG32(mmSMC_IND_DATA_0, data); - } - -out: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_program_jump_on_start(struct amdgpu_device *adev) -{ - static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; - tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); - - return 0; -} - -static bool tonga_is_smc_ram_running(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); - - return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); -} - -static int wait_smu_response(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32(mmSMC_RESP_0); - if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} - -static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, 0x20000); - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - if (!tonga_is_smc_ram_running(adev)) - { - return -EINVAL; - } - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send message\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MESSAGE_0, msg); - - return 0; -} - -static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, - uint32_t parameter) -{ - if (!tonga_is_smc_ram_running(adev)) - return -EINVAL; - - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc(adev, msg); -} - -static int tonga_send_msg_to_smc_with_parameter_without_waiting( - struct amdgpu_device *adev, - PPSMC_Msg msg, uint32_t parameter) -{ - if (wait_smu_response(adev)) { - DRM_ERROR("Failed to send previous message\n"); - return -EINVAL; - } - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return tonga_send_msg_to_smc_without_waiting(adev, msg); -} - -#if 0 /* not used yet */ -static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - int i; - uint32_t val; - - if (!tonga_is_smc_ram_running(adev)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) - break; - udelay(1); - } - - if (i == adev->usec_timeout) - return -EINVAL; - - return 0; -} -#endif - -static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) -{ - const struct smc_firmware_header_v1_0 *hdr; - uint32_t ucode_size; - uint32_t ucode_start_address; - const uint8_t *src; - uint32_t val; - uint32_t byte_count; - uint32_t *data; - unsigned long flags; - - if (!adev->pm.fw) - return -EINVAL; - - /* Skip SMC ucode loading on SR-IOV capable boards. - * vbios does this for us in asic_init in that case. - */ - if (adev->virtualization.supports_sr_iov) - return 0; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - src = (const uint8_t *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) { - DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); - return -EINVAL; - } - - if (ucode_size > TONGA_SMC_SIZE) { - DRM_ERROR("SMC address is beyond the SMC RAM area\n"); - return -EINVAL; - } - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - - byte_count = ucode_size; - data = (uint32_t *)src; - for (; byte_count >= 4; data++, byte_count -= 4) - WREG32(mmSMC_IND_DATA_0, data[0]); - - val = RREG32(mmSMC_IND_ACCESS_CNTL); - val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); - WREG32(mmSMC_IND_ACCESS_CNTL, val); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -#if 0 /* not used yet */ -static int tonga_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t *value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = tonga_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_write_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, - uint32_t value, - uint32_t limit) -{ - int result; - unsigned long flags; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - result = tonga_set_smc_sram_address(adev, smc_address, limit); - if (result == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - return result; -} - -static int tonga_smu_stop_smc(struct amdgpu_device *adev) -{ - uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - return 0; -} -#endif - -static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case UCODE_ID_SDMA0: - return AMDGPU_UCODE_ID_SDMA0; - case UCODE_ID_SDMA1: - return AMDGPU_UCODE_ID_SDMA1; - case UCODE_ID_CP_CE: - return AMDGPU_UCODE_ID_CP_CE; - case UCODE_ID_CP_PFP: - return AMDGPU_UCODE_ID_CP_PFP; - case UCODE_ID_CP_ME: - return AMDGPU_UCODE_ID_CP_ME; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - return AMDGPU_UCODE_ID_CP_MEC1; - case UCODE_ID_CP_MEC_JT2: - return AMDGPU_UCODE_ID_CP_MEC2; - case UCODE_ID_RLC_G: - return AMDGPU_UCODE_ID_RLC_G; - default: - DRM_ERROR("ucode type is out of range!\n"); - return AMDGPU_UCODE_ID_MAXIMUM; - } -} - -static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - uint32_t fw_type, - struct SMU_Entry *entry) -{ - enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header = NULL; - uint64_t gpu_addr; - uint32_t data_size; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((fw_type == UCODE_ID_CP_MEC_JT1) || - (fw_type == UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); - entry->id = (uint16_t)fw_type; - entry->image_addr_high = upper_32_bits(gpu_addr); - entry->image_addr_low = lower_32_bits(gpu_addr); - entry->meta_data_addr_high = 0; - entry->meta_data_addr_low = 0; - entry->data_size_byte = data_size; - entry->num_register_entries = 0; - - if (fw_type == UCODE_ID_RLC_G) - entry->flags = 1; - else - entry->flags = 0; - - return 0; -} - -static int tonga_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv; - struct SMU_DRAMData_TOC *toc; - uint32_t fw_to_load; - - WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); - - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); - - toc = (struct SMU_DRAMData_TOC *)private->header; - toc->num_entries = 0; - toc->structure_version = 1; - - if (!adev->firmware.smu_load) - return 0; - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for RLC\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for CE\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for PFP\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for ME\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA0\n"); - return -EINVAL; - } - - if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for SDMA1\n"); - return -EINVAL; - } - - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); - tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); - - fw_to_load = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_MASK; - - if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { - DRM_ERROR("Fail to request SMU load ucode\n"); - return -EINVAL; - } - - return 0; -} - -static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type) -{ - switch (fw_type) { - case AMDGPU_UCODE_ID_SDMA0: - return UCODE_ID_SDMA0_MASK; - case AMDGPU_UCODE_ID_SDMA1: - return UCODE_ID_SDMA1_MASK; - case AMDGPU_UCODE_ID_CP_CE: - return UCODE_ID_CP_CE_MASK; - case AMDGPU_UCODE_ID_CP_PFP: - return UCODE_ID_CP_PFP_MASK; - case AMDGPU_UCODE_ID_CP_ME: - return UCODE_ID_CP_ME_MASK; - case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_CP_MEC2: - return UCODE_ID_CP_MEC_MASK; - case AMDGPU_UCODE_ID_RLC_G: - return UCODE_ID_RLC_G_MASK; - default: - DRM_ERROR("ucode type is out of range!\n"); - return 0; - } -} - -static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_type) -{ - uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type); - int i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("check firmware loading failed\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - int i; - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = tonga_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Clear status */ - WREG32_SMC(ixSMU_STATUS, 0); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Set SMU Auto Start */ - val = RREG32_SMC(ixSMU_INPUT_DATA); - val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); - WREG32_SMC(ixSMU_INPUT_DATA, val); - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Interrupt is not enabled by firmware\n"); - return -EINVAL; - } - - /* Call Test SMU message with 0x20000 offset - * to trigger SMU start - */ - tonga_send_msg_to_smc_offset(adev); - - /* Wait for done bit to be set */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixSMU_STATUS); - if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMU start\n"); - return -EINVAL; - } - - /* Check pass/failed indicator */ - val = RREG32_SMC(ixSMU_STATUS); - if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { - DRM_ERROR("SMU Firmware start failed\n"); - return -EINVAL; - } - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMU firmware initialization failed\n"); - return -EINVAL; - } - - return 0; -} - -static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev) -{ - int i, result; - uint32_t val; - - /* wait for smc boot up */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixRCU_UC_EVENTS); - val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); - if (val) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("SMC boot sequence is not completed\n"); - return -EINVAL; - } - - /* Clear firmware interrupt enable flag */ - WREG32_SMC(ixFIRMWARE_FLAGS, 0); - - /* Assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - result = tonga_smu_upload_firmware_image(adev); - if (result) - return result; - - /* Set smc instruct start point at 0x0 */ - tonga_program_jump_on_start(adev); - - /* Enable clock */ - val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); - - /* De-assert reset */ - val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); - - /* Wait for firmware to initialize */ - for (i = 0; i < adev->usec_timeout; i++) { - val = RREG32_SMC(ixFIRMWARE_FLAGS); - if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) - break; - udelay(1); - } - - if (i == adev->usec_timeout) { - DRM_ERROR("Timeout for SMC firmware initialization\n"); - return -EINVAL; - } - - return 0; -} - -int tonga_smu_start(struct amdgpu_device *adev) -{ - int result; - uint32_t val; - - if (!tonga_is_smc_ram_running(adev)) { - val = RREG32_SMC(ixSMU_FIRMWARE); - if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { - result = tonga_smu_start_in_non_protection_mode(adev); - if (result) - return result; - } else { - result = tonga_smu_start_in_protection_mode(adev); - if (result) - return result; - } - } - - return tonga_smu_request_load_fw(adev); -} - -static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = { - .check_fw_load_finish = tonga_smu_check_fw_load_finish, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -int tonga_smu_init(struct amdgpu_device *adev) -{ - struct tonga_smu_private_data *private; - uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; - uint32_t smu_internal_buffer_size = 200*4096; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - uint64_t mc_addr; - void *toc_buf_ptr; - void *smu_buf_ptr; - int ret; - - private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL); - if (NULL == private) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = private; - adev->smu.fw_flags = 0; - - /* Allocate FW image data structure and header buffer */ - ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, toc_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for TOC buffer\n"); - return -ENOMEM; - } - - /* Allocate buffer for SMU internal buffer */ - ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, smu_buf); - if (ret) { - DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); - return -ENOMEM; - } - - /* Retrieve GPU address for header buffer and internal buffer */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the TOC buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the TOC buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.toc_buf); - private->header_addr_low = lower_32_bits(mc_addr); - private->header_addr_high = upper_32_bits(mc_addr); - private->header = toc_buf_ptr; - - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to reserve the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to pin the SMU internal buffer\n"); - return -EINVAL; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - DRM_ERROR("Failed to map the SMU internal buffer\n"); - return -EINVAL; - } - - amdgpu_bo_unreserve(adev->smu.smu_buf); - private->smu_buffer_addr_low = lower_32_bits(mc_addr); - private->smu_buffer_addr_high = upper_32_bits(mc_addr); - - adev->smu.smumgr_funcs = &tonga_smumgr_funcs; - - return 0; -} - -int tonga_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.fw_buf) - amdgpu_ucode_fini_bo(adev); - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 132e613ed674..f6c941550b8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 4; /* uvd_v4_2_ring_emit_ib */ +} + +static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 2 + /* uvd_v4_2_ring_emit_hdp_flush */ + 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ + 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */ +} + /** * uvd_v4_2_mc_resume - memory controller programming * @@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, + .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size, + .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 101de136ba63..400c16fe579e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 6; /* uvd_v5_0_ring_emit_ib */ +} + +static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 2 + /* uvd_v5_0_ring_emit_hdp_flush */ + 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ + 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */ +} + static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, + .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size, + .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7f21102bfb99..e0fd9f21ed95 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); - /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { - uvd_v6_0_set_sw_clock_gating(adev); - } else { - /* disable clock gating */ - uint32_t data = RREG32(mmUVD_CGC_CTRL); - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, data); - } + /* disable clock gating */ + WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ @@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* take UVD block out of reset */ - WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); mdelay(5); /* initialize UVD memory controller */ @@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmUVD_SOFT_RESET, 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); mdelay(10); r = -1; } @@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); - tmp = 0; - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ @@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); - WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); return 0; } @@ -735,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xE); } +static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 8; /* uvd_v6_0_ring_emit_ib */ +} + +static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 2 + /* uvd_v6_0_ring_emit_hdp_flush */ + 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ + 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ + 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */ +} + +static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) +{ + return + 2 + /* uvd_v6_0_ring_emit_hdp_flush */ + 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ + 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ + 20 + /* uvd_v6_0_ring_emit_vm_flush */ + 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */ +} + static bool uvd_v6_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -748,20 +763,82 @@ static int uvd_v6_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + if (uvd_v6_0_is_idle(handle)) return 0; } return -ETIMEDOUT; } -static int uvd_v6_0_soft_reset(void *handle) +#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd +static int uvd_v6_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || + REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || + (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; + adev->uvd.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; + adev->uvd.srbm_soft_reset = 0; + } + return 0; +} +static int uvd_v6_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; uvd_v6_0_stop(adev); + return 0; +} + +static int uvd_v6_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; + srbm_soft_reset = adev->uvd.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int uvd_v6_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); return uvd_v6_0_start(adev); @@ -902,21 +979,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; if (adev->asic_type == CHIP_FIJI || - adev->asic_type == CHIP_POLARIS10) - uvd_v6_set_bypass_mode(adev, enable); + adev->asic_type == CHIP_POLARIS10) + uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; - - curstate = state; - if (enable) { + if (state == AMD_CG_STATE_GATE) { /* disable HW gating and enable Sw gating */ uvd_v6_0_set_sw_clock_gating(adev); } else { @@ -946,6 +1017,8 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; + WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; @@ -966,7 +1039,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, .wait_for_idle = uvd_v6_0_wait_for_idle, + .check_soft_reset = uvd_v6_0_check_soft_reset, + .pre_soft_reset = uvd_v6_0_pre_soft_reset, .soft_reset = uvd_v6_0_soft_reset, + .post_soft_reset = uvd_v6_0_post_soft_reset, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, }; @@ -986,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, + .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, + .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { @@ -1005,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, + .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, + .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 80a37a602181..76e64ad04a53 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -30,16 +30,17 @@ #include "amdgpu.h" #include "amdgpu_vce.h" #include "cikd.h" - #include "vce/vce_2_0_d.h" #include "vce/vce_2_0_sh_mask.h" - +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #define VCE_V2_0_FW_SIZE (256 * 1024) #define VCE_V2_0_STACK_SIZE (64 * 1024) #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); @@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmVCE_RB_WPTR2, ring->wptr); } +static int vce_v2_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_LMI_STATUS); + + if (status & 0x337f) + return 0; + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) static int vce_v2_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j, r; + int r; vce_v2_0_mc_resume(adev); @@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev) WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + r = vce_v2_0_firmware_loaded(adev); /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~1); @@ -173,6 +193,8 @@ static int vce_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vce.num_rings = 2; + vce_v2_0_set_ring_funcs(adev); vce_v2_0_set_irq_funcs(adev); @@ -182,7 +204,7 @@ static int vce_v2_0_early_init(void *handle) static int vce_v2_0_sw_init(void *handle) { struct amdgpu_ring *ring; - int r; + int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* VCE */ @@ -199,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle) if (r) return r; - ring = &adev->vce.ring[0]; - sprintf(ring->name, "vce0"); - r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, - &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); - if (r) - return r; - - ring = &adev->vce.ring[1]; - sprintf(ring->name, "vce1"); - r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, - &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); - if (r) - return r; + for (i = 0; i < adev->vce.num_rings; i++) { + ring = &adev->vce.ring[i]; + sprintf(ring->name, "vce%d", i); + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + } return r; } @@ -234,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle) static int vce_v2_0_hw_init(void *handle) { - struct amdgpu_ring *ring; - int r; + int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; r = vce_v2_0_start(adev); + /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ if (r) -/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ return 0; - ring = &adev->vce.ring[0]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - return r; - } + for (i = 0; i < adev->vce.num_rings; i++) + adev->vce.ring[i].ready = false; - ring = &adev->vce.ring[1]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - return r; + for (i = 0; i < adev->vce.num_rings; i++) { + r = amdgpu_ring_test_ring(&adev->vce.ring[i]); + if (r) + return r; + else + adev->vce.ring[i].ready = true; } DRM_INFO("VCE initialized successfully.\n"); @@ -338,47 +349,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) { - u32 orig, tmp; + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return; + } - if (gated) { - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); - WREG32(mmVCE_STATUS, 0); - } else { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); + WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("LMI is busy, Can't set clock gateing"); + return; } - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp &= ~0x00060006; + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32(mmVCE_STATUS, 0); + + if (gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ if (gated) { - tmp |= 0xe10000; + /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ + WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); } else { - tmp |= 0xe1; - tmp &= ~0xe10000; + /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ + WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); } - WREG32(mmVCE_CLOCK_GATING_B, tmp); - orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0x1fe000; - tmp &= ~0xff000000; - if (tmp != orig) - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; + WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); - orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - if (tmp != orig) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); - if (gated) - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); + if(!gated) { + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + mdelay(100); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + vce_v2_0_firmware_loaded(adev); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + } } static void vce_v2_0_disable_cg(struct amdgpu_device *adev) @@ -458,9 +472,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev) WREG32(mmVCE_VCPU_CACHE_SIZE2, size); WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); vce_v2_0_init_cg(adev); } @@ -474,11 +486,11 @@ static bool vce_v2_0_is_idle(void *handle) static int vce_v2_0_wait_for_idle(void *handle) { - unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + if (vce_v2_0_is_idle(handle)) return 0; } return -ETIMEDOUT; @@ -488,8 +500,7 @@ static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); return vce_v2_0_start(adev); @@ -516,10 +527,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: VCE\n"); switch (entry->src_data) { case 0: - amdgpu_fence_process(&adev->vce.ring[0]); - break; case 1: - amdgpu_fence_process(&adev->vce.ring[1]); + amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -530,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + else + tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + + static int vce_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { bool gate = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + + vce_v2_0_set_bypass_mode(adev, enable); if (state == AMD_CG_STATE_GATE) gate = true; @@ -596,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_vce_ring_begin_use, .end_use = amdgpu_vce_ring_end_use, + .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size, + .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs; - adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs; + int i; + + for (i = 0; i < adev->vce.num_rings; i++) + adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs; } static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index c271abffd8dd..3f6db4ec0102 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -37,6 +37,9 @@ #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -67,8 +70,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) if (ring == &adev->vce.ring[0]) return RREG32(mmVCE_RB_RPTR); - else + else if (ring == &adev->vce.ring[1]) return RREG32(mmVCE_RB_RPTR2); + else + return RREG32(mmVCE_RB_RPTR3); } /** @@ -84,8 +89,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) if (ring == &adev->vce.ring[0]) return RREG32(mmVCE_RB_WPTR); - else + else if (ring == &adev->vce.ring[1]) return RREG32(mmVCE_RB_WPTR2); + else + return RREG32(mmVCE_RB_WPTR3); } /** @@ -101,108 +108,80 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) if (ring == &adev->vce.ring[0]) WREG32(mmVCE_RB_WPTR, ring->wptr); - else + else if (ring == &adev->vce.ring[1]) WREG32(mmVCE_RB_WPTR2, ring->wptr); + else + WREG32(mmVCE_RB_WPTR3, ring->wptr); } static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) { - u32 tmp, data; - - tmp = data = RREG32(mmVCE_RB_ARB_CTRL); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(mmVCE_RB_ARB_CTRL, data); + WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); } static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, bool gated) { - u32 tmp, data; + u32 data; + /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); - if (!gated) { - /* Force CLOCK ON for VCE_CLOCK_GATING_B, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - * VREG can be FORCE ON or set to Dynamic, but can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + /* This function enables MGCG which is controlled by firmware. + With the clocks in the gated state the core is still + accessible but the firmware will throttle the clocks on the + fly as necessary. + */ + if (gated) { + data = RREG32(mmVCE_CLOCK_GATING_B); data |= 0x1ff; data &= ~0xef0000; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); + WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK ON for VCE_UENC_CLOCK_GATING, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0x3ff000; data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); + WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x2; - data &= ~0x2; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + data &= ~0x00010000; + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data |= 0x37f; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8; + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { - /* Force CLOCK OFF for VCE_CLOCK_GATING_B, - * {*, *_FORCE_OFF} = {*, 1} - * set VREG to Dynamic, as it can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data = RREG32(mmVCE_CLOCK_GATING_B); data &= ~0x80010; data |= 0xe70008; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING, - * Force ClOCK OFF takes precedent over Force CLOCK ON setting. - * {*_FORCE_ON, *_FORCE_OFF} = {*, 1} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + WREG32(mmVCE_CLOCK_GATING_B, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* Set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + WREG32(mmVCE_UENC_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x10000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8); + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } vce_v3_0_override_vce_clock_gating(adev, false); } @@ -221,12 +200,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) } DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(10); } @@ -259,43 +235,34 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); + ring = &adev->vce.ring[2]; + WREG32(mmVCE_RB_RPTR3, ring->wptr); + WREG32(mmVCE_RB_WPTR3, ring->wptr); + WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); + WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); + WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); + mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); vce_v3_0_mc_resume(adev, idx); - - WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, - ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(100); r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - - /* Set Clock-Gating off */ - if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) - vce_v3_0_set_vce_sw_clock_gating(adev, false); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); @@ -304,7 +271,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -319,33 +286,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, 0, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); + /* hold on ECPU */ - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -399,6 +358,8 @@ static int vce_v3_0_early_init(void *handle) (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) return -ENOENT; + adev->vce.num_rings = 3; + vce_v3_0_set_ring_funcs(adev); vce_v3_0_set_irq_funcs(adev); @@ -409,7 +370,7 @@ static int vce_v3_0_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int r; + int r, i; /* VCE */ r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); @@ -425,19 +386,14 @@ static int vce_v3_0_sw_init(void *handle) if (r) return r; - ring = &adev->vce.ring[0]; - sprintf(ring->name, "vce0"); - r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, - &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); - if (r) - return r; - - ring = &adev->vce.ring[1]; - sprintf(ring->name, "vce1"); - r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, - &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); - if (r) - return r; + for (i = 0; i < adev->vce.num_rings; i++) { + ring = &adev->vce.ring[i]; + sprintf(ring->name, "vce%d", i); + r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, + &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); + if (r) + return r; + } return r; } @@ -467,10 +423,10 @@ static int vce_v3_0_hw_init(void *handle) if (r) return r; - adev->vce.ring[0].ready = false; - adev->vce.ring[1].ready = false; + for (i = 0; i < adev->vce.num_rings; i++) + adev->vce.ring[i].ready = false; - for (i = 0; i < 2; i++) { + for (i = 0; i < adev->vce.num_rings; i++) { r = amdgpu_ring_test_ring(&adev->vce.ring[i]); if (r) return r; @@ -534,7 +490,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); WREG32(mmVCE_LMI_CTRL, 0x00398000); WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); @@ -573,9 +529,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) } WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); } static bool vce_v3_0_is_idle(void *handle) @@ -601,20 +555,108 @@ static int vce_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ +#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ + VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) + +static int vce_v3_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + /* According to VCE team , we should use VCE_STATUS instead + * SRBM_STATUS.VCE_BUSY bit for busy status checking. + * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE + * instance's registers are accessed + * (0 for 1st instance, 10 for 2nd instance). + * + *VCE_STATUS + *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | + *|----+----+-----------+----+----+----+----------+---------+----| + *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| + * + * VCE team suggest use bit 3--bit 6 for busy status check + */ + mutex_lock(&adev->grbm_idx_mutex); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; + adev->vce.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; + adev->vce.srbm_soft_reset = 0; + } + mutex_unlock(&adev->grbm_idx_mutex); + return 0; +} + static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + srbm_soft_reset = adev->vce.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int vce_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, mask, - ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | - SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); mdelay(5); - return vce_v3_0_start(adev); + return vce_v3_0_suspend(adev); +} + + +static int vce_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + + mdelay(5); + + return vce_v3_0_resume(adev); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -637,13 +679,12 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(mmVCE_SYS_INT_STATUS, - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); switch (entry->src_data) { case 0: case 1: + case 2: amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: @@ -655,7 +696,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) +static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) { u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); @@ -674,8 +715,10 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; - if (adev->asic_type == CHIP_POLARIS10) - vce_v3_set_bypass_mode(adev, enable); + if ((adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_TONGA) || + (adev->asic_type == CHIP_FIJI)) + vce_v3_0_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -686,13 +729,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - if (i == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -711,7 +748,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -739,6 +776,60 @@ static int vce_v3_0_set_powergating_state(void *handle, return vce_v3_0_start(adev); } +static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) +{ + amdgpu_ring_write(ring, VCE_CMD_IB_VM); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); +} + +static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vm_id, uint64_t pd_addr) +{ + amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, pd_addr >> 12); + + amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, VCE_CMD_END); +} + +static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, seq); +} + +static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) +{ + return + 5; /* vce_v3_0_ring_emit_ib */ +} + +static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) +{ + return + 4 + /* vce_v3_0_emit_pipeline_sync */ + 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ +} + +static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) +{ + return + 6 + /* vce_v3_0_emit_vm_flush */ + 4 + /* vce_v3_0_emit_pipeline_sync */ + 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */ +} + const struct amd_ip_funcs vce_v3_0_ip_funcs = { .name = "vce_v3_0", .early_init = vce_v3_0_early_init, @@ -751,12 +842,15 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { .resume = vce_v3_0_resume, .is_idle = vce_v3_0_is_idle, .wait_for_idle = vce_v3_0_wait_for_idle, + .check_soft_reset = vce_v3_0_check_soft_reset, + .pre_soft_reset = vce_v3_0_pre_soft_reset, .soft_reset = vce_v3_0_soft_reset, + .post_soft_reset = vce_v3_0_post_soft_reset, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, }; -static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { +static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { .get_rptr = vce_v3_0_ring_get_rptr, .get_wptr = vce_v3_0_ring_get_wptr, .set_wptr = vce_v3_0_ring_set_wptr, @@ -769,12 +863,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_vce_ring_begin_use, .end_use = amdgpu_vce_ring_end_use, + .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, + .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size, +}; + +static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { + .get_rptr = vce_v3_0_ring_get_rptr, + .get_wptr = vce_v3_0_ring_get_wptr, + .set_wptr = vce_v3_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = vce_v3_0_ring_emit_ib, + .emit_vm_flush = vce_v3_0_emit_vm_flush, + .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, + .emit_fence = amdgpu_vce_ring_emit_fence, + .test_ring = amdgpu_vce_ring_test_ring, + .test_ib = amdgpu_vce_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, + .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, + .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs; - adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs; + int i; + + if (adev->asic_type >= CHIP_STONEY) { + for (i = 0; i < adev->vce.num_rings; i++) + adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; + DRM_INFO("VCE enabled in VM mode\n"); + } else { + for (i = 0; i < adev->vce.num_rings; i++) + adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; + DRM_INFO("VCE enabled in physical mode\n"); + } } static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 03a31c53aec3..c0d9aad7126f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -77,7 +77,11 @@ #if defined(CONFIG_DRM_AMD_ACP) #include "amdgpu_acp.h" #endif +#include "dce_virtual.h" +MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); @@ -444,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } -static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +static void vi_detect_hw_virtualization(struct amdgpu_device *adev) { - u32 caps = 0; - u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); - - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) - caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; - - if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) - caps |= AMDGPU_VIRT_CAPS_IS_VF; - - return caps; + uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + /* bit0: 0 means pf and 1 means vf */ + /* bit31: 0 means disable IOV and 1 means enable */ + if (reg & 1) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; + + if (reg & 0x80000000) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (reg == 0) { + if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ + adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; + } } static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { @@ -822,6 +829,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 4, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &iceland_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &sdma_v2_4_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version tonga_ip_blocks[] = { /* ORDER MATTERS! */ @@ -890,6 +951,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &uvd_v5_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version fiji_ip_blocks[] = { /* ORDER MATTERS! */ @@ -958,6 +1087,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1026,6 +1223,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 3, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 4, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1103,34 +1368,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = #endif }; +static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &cz_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif +}; + int vi_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->ip_blocks = topaz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); - break; - case CHIP_FIJI: - adev->ip_blocks = fiji_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); - break; - case CHIP_TONGA: - adev->ip_blocks = tonga_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - adev->ip_blocks = polaris11_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->ip_blocks = cz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); + break; + + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; @@ -1154,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, .read_bios_from_rom = &vi_read_bios_from_rom, + .detect_hw_virtualization = vi_detect_hw_virtualization, .read_register = &vi_read_register, .reset = &vi_asic_reset, .set_vga_state = &vi_vga_set_state, .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, - .get_virtual_caps = &vi_get_virtual_caps, }; static int vi_common_early_init(void *handle) @@ -1248,8 +1621,17 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; + if (adev->rev_id != 0x00) { + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; + } adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: @@ -1267,14 +1649,24 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; - adev->external_rev_id = adev->rev_id + 0x1; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; + adev->external_rev_id = adev->rev_id + 0x61; break; default: /* FIXME: not supported yet */ return -EINVAL; } + /* in early init stage, vbios code won't work */ + if (adev->asic_funcs->detect_hw_virtualization) + amdgpu_asic_detect_hw_virtualization(adev); + if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; @@ -1418,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); } +static int vi_common_set_clockgating_state_by_smu(void *handle, + enum amd_clockgating_state state) +{ + uint32_t msg_id, pp_state; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + void *pp_handle = adev->powerplay.pp_handle; + + if (state == AMD_CG_STATE_UNGATE) + pp_state = 0; + else + pp_state = PP_STATE_CG | PP_STATE_LS; + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_MC, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_SDMA, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_HDP, + PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_BIF, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_DRM, + PP_STATE_SUPPORT_LS, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, + PP_BLOCK_SYS_ROM, + PP_STATE_SUPPORT_CG, + pp_state); + amd_set_clockgating_by_smu(pp_handle, msg_id); + + return 0; +} + static int vi_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -1443,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle, vi_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); break; + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + vi_common_set_clockgating_state_by_smu(adev, state); default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 062ee1676480..11746f22d0c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -369,4 +369,45 @@ #define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 +#define VCE_CMD_IB_VM 0x00000102 +#define VCE_CMD_WAIT_GE 0x00000106 +#define VCE_CMD_UPDATE_PTB 0x00000107 +#define VCE_CMD_FLUSH_TLB 0x00000108 + +/* mmPA_SC_RASTER_CONFIG mask */ +#define RB_MAP_PKR0(x) ((x) << 0) +#define RB_MAP_PKR0_MASK (0x3 << 0) +#define RB_MAP_PKR1(x) ((x) << 2) +#define RB_MAP_PKR1_MASK (0x3 << 2) +#define RB_XSEL2(x) ((x) << 4) +#define RB_XSEL2_MASK (0x3 << 4) +#define RB_XSEL (1 << 6) +#define RB_YSEL (1 << 7) +#define PKR_MAP(x) ((x) << 8) +#define PKR_MAP_MASK (0x3 << 8) +#define PKR_XSEL(x) ((x) << 10) +#define PKR_XSEL_MASK (0x3 << 10) +#define PKR_YSEL(x) ((x) << 12) +#define PKR_YSEL_MASK (0x3 << 12) +#define SC_MAP(x) ((x) << 16) +#define SC_MAP_MASK (0x3 << 16) +#define SC_XSEL(x) ((x) << 18) +#define SC_XSEL_MASK (0x3 << 18) +#define SC_YSEL(x) ((x) << 20) +#define SC_YSEL_MASK (0x3 << 20) +#define SE_MAP(x) ((x) << 24) +#define SE_MAP_MASK (0x3 << 24) +#define SE_XSEL(x) ((x) << 26) +#define SE_XSEL_MASK (0x3 << 26) +#define SE_YSEL(x) ((x) << 28) +#define SE_YSEL_MASK (0x3 << 28) + +/* mmPA_SC_RASTER_CONFIG_1 mask */ +#define SE_PAIR_MAP(x) ((x) << 0) +#define SE_PAIR_MAP_MASK (0x3 << 0) +#define SE_PAIR_XSEL(x) ((x) << 2) +#define SE_PAIR_XSEL_MASK (0x3 << 2) +#define SE_PAIR_YSEL(x) ((x) << 4) +#define SE_PAIR_YSEL_MASK (0x3 << 4) + #endif |