diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-04 03:02:26 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-04 03:02:26 +0300 |
commit | 906dde0f355bd97c080c215811ae7db1137c4af8 (patch) | |
tree | 1e1b4ba58a59c4027f06d86e7430566ee0dcbb15 /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |
parent | 69c0067aa3f40d3e52ab78643aecb17d669d3389 (diff) | |
parent | 7846b12fe0b5feab5446d892f41b5140c1419109 (diff) | |
download | linux-906dde0f355bd97c080c215811ae7db1137c4af8.tar.xz |
Merge tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This is the main drm pull request for 4.14 merge window.
I'm sending this early, as my continuing journey into fatherhood is
occurring really soon now, I'm going to be mostly useless for the next
couple of weeks, though I may be able to read email, I doubt I'll be
doing much patch applications or git sending. If anything urgent pops
up I've asked Daniel/Jani/Alex/Sean to try and direct stuff towards
you.
Outside drm changes:
Some rcar-du updates that touch the V4L tree, all acks should be in
place. It adds one export to the radix tree code for new i915 use
case. There are some minor AGP cleanups (don't see that too often).
Changes to the vbox driver in staging to avoid breaking compilation.
Summary:
core:
- Atomic helper fixes
- Atomic UAPI fixes
- Add YCBCR 4:2:0 support
- Drop set_busid hook
- Refactor fb_helper locking
- Remove a bunch of internal APIs
- Add a bunch of better default handlers
- Format modifier/blob plane property added
- More internal header refactoring
- Make more internal API names consistent
- Enhanced syncobj APIs (wait/signal/reset/create signalled)
bridge:
- Add Synopsys Designware MIPI DSI host bridge driver
tiny:
- Add Pervasive Displays RePaper displays
- Add support for LEGO MINDSTORMS EV3 LCD
i915:
- Lots of GEN10/CNL support patches
- drm syncobj support
- Skylake+ watermark refactoring
- GVT vGPU 48-bit ppgtt support
- GVT performance improvements
- NOA change ioctl
- CCS (color compression) scanout support
- GPU reset improvements
amdgpu:
- Initial hugepage support
- BO migration logic rework
- Vega10 improvements
- Powerplay fixes
- Stop reprogramming the MC
- Fixes for ACP audio on stoney
- SR-IOV fixes/improvements
- Command submission overhead improvements
amdkfd:
- Non-dGPU upstreaming patches
- Scratch VA ioctl
- Image tiling modes
- Update PM4 headers for new firmware
- Drop all BUG_ONs.
nouveau:
- GP108 modesetting support.
- Disable MSI on big endian.
vmwgfx:
- Add fence fd support.
msm:
- Runtime PM improvements
exynos:
- NV12MT support
- Refactor KMS drivers
imx-drm:
- Lock scanout channel to improve memory bw
- Cleanups
etnaviv:
- GEM object population fixes
tegra:
- Prep work for Tegra186 support
- PRIME mmap support
sunxi:
- HDMI support improvements
- HDMI CEC support
omapdrm:
- HDMI hotplug IRQ support
- Big driver cleanup
- OMAP5 DSI support
rcar-du:
- vblank fixes
- VSP1 updates
arcgpu:
- Minor fixes
stm:
- Add STM32 DSI controller driver
dw_hdmi:
- Add support for Rockchip RK3399
- HDMI CEC support
atmel-hlcdc:
- Add 8-bit color support
vc4:
- Atomic fixes
- New ioctl to attach a label to a buffer object
- HDMI CEC support
- Allow userspace to dictate rendering order on submit ioctl"
* tag 'drm-for-v4.14' of git://people.freedesktop.org/~airlied/linux: (1074 commits)
drm/syncobj: Add a signal ioctl (v3)
drm/syncobj: Add a reset ioctl (v3)
drm/syncobj: Add a syncobj_array_find helper
drm/syncobj: Allow wait for submit and signal behavior (v5)
drm/syncobj: Add a CREATE_SIGNALED flag
drm/syncobj: Add a callback mechanism for replace_fence (v3)
drm/syncobj: add sync obj wait interface. (v8)
i915: Use drm_syncobj_fence_get
drm/syncobj: Add a race-free drm_syncobj_fence_get helper (v2)
drm/syncobj: Rename fence_get to find_fence
drm: kirin: Add mode_valid logic to avoid mode clocks we can't generate
drm/vmwgfx: Bump the version for fence FD support
drm/vmwgfx: Add export fence to file descriptor support
drm/vmwgfx: Add support for imported Fence File Descriptor
drm/vmwgfx: Prepare to support fence fd
drm/vmwgfx: Fix incorrect command header offset at restart
drm/vmwgfx: Support the NOP_ERROR command
drm/vmwgfx: Restart command buffers after errors
drm/vmwgfx: Move irq bottom half processing to threads
drm/vmwgfx: Don't use drm_irq_[un]install
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 251 |
1 files changed, 106 insertions, 145 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4a8fc15467cf..1a459ac63df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -53,6 +53,9 @@ #include "bif/bif_4_1_d.h" #include <linux/pci.h> #include <linux/firmware.h> +#include "amdgpu_vf_error.h" + +#include "amdgpu_amdkfd.h" MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } + if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { BUG_ON(in_interrupt()); return amdgpu_virt_kiq_wreg(adev, reg, v); @@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) @@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg) void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { + if (adev->asic_type >= CHIP_VEGA10 && reg == 0) { + adev->last_mm_index = v; + } if ((reg * 4) < adev->rio_mem_size) iowrite32(v, adev->rio_mem + (reg * 4)); @@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v) iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4)); iowrite32(v, adev->rio_mem + (mmMM_DATA * 4)); } + + if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { + udelay(500); + } } /** @@ -318,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &adev->vram_scratch.robj); - if (r) { - return r; - } - } - - r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->vram_scratch.robj, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->vram_scratch.robj); - return r; - } - r = amdgpu_bo_kmap(adev->vram_scratch.robj, - (void **)&adev->vram_scratch.ptr); - if (r) - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - - return r; + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &adev->vram_scratch.robj, + &adev->vram_scratch.gpu_addr, + (void **)&adev->vram_scratch.ptr); } static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) { - int r; - - if (adev->vram_scratch.robj == NULL) { - return; - } - r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); - if (likely(r == 0)) { - amdgpu_bo_kunmap(adev->vram_scratch.robj); - amdgpu_bo_unpin(adev->vram_scratch.robj); - amdgpu_bo_unreserve(adev->vram_scratch.robj); - } - amdgpu_bo_unref(&adev->vram_scratch.robj); + amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } /** @@ -521,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), + /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->wb.wb_obj, &adev->wb.gpu_addr, (void **)&adev->wb.wb); @@ -552,32 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); - if (offset < adev->wb.num_wb) { - __set_bit(offset, adev->wb.used); - *wb = offset; - return 0; - } else { - return -EINVAL; - } -} -/** - * amdgpu_wb_get_64bit - Allocate a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Allocate a wb slot for use by the driver (all asics). - * Returns 0 on success or -EINVAL on failure. - */ -int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) -{ - unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, - adev->wb.num_wb, 0, 2, 7, 0); - if ((offset + 1) < adev->wb.num_wb) { + if (offset < adev->wb.num_wb) { __set_bit(offset, adev->wb.used); - __set_bit(offset + 1, adev->wb.used); - *wb = offset; + *wb = offset * 8; /* convert to dw offset */ return 0; } else { return -EINVAL; @@ -599,22 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_wb_free_64bit - Free a wb entry - * - * @adev: amdgpu_device pointer - * @wb: wb index - * - * Free a wb slot allocated for use by the driver (all asics) - */ -void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) -{ - if ((wb + 1) < adev->wb.num_wb) { - __clear_bit(wb, adev->wb.used); - __clear_bit(wb + 1, adev->wb.used); - } -} - -/** * amdgpu_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations @@ -665,7 +611,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gtt_location - try to find GTT location + * amdgpu_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -676,28 +622,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) { u64 size_af, size_bf; - size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; - size_bf = mc->vram_start & ~mc->gtt_base_align; + size_af = adev->mc.mc_mask - mc->vram_end; + size_bf = mc->vram_start; if (size_bf > size_af) { - if (mc->gtt_size > size_bf) { + if (mc->gart_size > size_bf) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_bf; + mc->gart_size = size_bf; } - mc->gtt_start = 0; + mc->gart_start = 0; } else { - if (mc->gtt_size > size_af) { + if (mc->gart_size > size_af) { dev_warn(adev->dev, "limiting GTT\n"); - mc->gtt_size = size_af; + mc->gart_size = size_af; } - mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; + mc->gart_start = mc->vram_end + 1; } - mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", - mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); + mc->gart_size >> 20, mc->gart_start, mc->gart_end); } /* @@ -720,7 +666,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev) adev->has_hw_reset = false; return true; } - /* then check MEM_SIZE, in case the crtcs are off */ + + /* bios scratch used on CIK+ */ + if (adev->asic_type >= CHIP_BONAIRE) + return amdgpu_atombios_scratch_need_asic_init(adev); + + /* check MEM_SIZE for older asics */ reg = amdgpu_asic_get_config_memsize(adev); if ((reg != 0) && (reg != 0xffffffff)) @@ -1031,19 +982,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * amdgpu_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool amdgpu_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - static void amdgpu_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, @@ -1077,7 +1015,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + if (!is_power_of_2(amdgpu_vm_size)) { dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", amdgpu_vm_size); goto def_value; @@ -1118,19 +1056,31 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", amdgpu_sched_jobs); amdgpu_sched_jobs = 4; - } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ + } else if (!is_power_of_2(amdgpu_sched_jobs)){ dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", amdgpu_sched_jobs); amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); } - if (amdgpu_gart_size != -1) { + if (amdgpu_gart_size < 32) { + /* gart size must be greater or equal to 32M */ + dev_warn(adev->dev, "gart size (%d) too small\n", + amdgpu_gart_size); + amdgpu_gart_size = 32; + } + + if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { /* gtt size must be greater or equal to 32M */ - if (amdgpu_gart_size < 32) { - dev_warn(adev->dev, "gart size (%d) too small\n", - amdgpu_gart_size); - amdgpu_gart_size = -1; - } + dev_warn(adev->dev, "gtt size (%d) too small\n", + amdgpu_gtt_size); + amdgpu_gtt_size = -1; + } + + /* valid range is between 4 and 9 inclusive */ + if (amdgpu_vm_fragment_size != -1 && + (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { + dev_warn(adev->dev, "valid range is between 4 and 9\n"); + amdgpu_vm_fragment_size = -1; } amdgpu_check_vm_size(adev); @@ -1138,7 +1088,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || - !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { + !is_power_of_2(amdgpu_vram_page_split))) { dev_warn(adev->dev, "invalid VRAM page split (%d)\n", amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; @@ -1901,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_DCE, AMD_IP_BLOCK_TYPE_GFX, AMD_IP_BLOCK_TYPE_SDMA, - AMD_IP_BLOCK_TYPE_VCE, + AMD_IP_BLOCK_TYPE_UVD, + AMD_IP_BLOCK_TYPE_VCE }; for (i = 0; i < ARRAY_SIZE(ip_order); i++) { @@ -2019,7 +1970,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; - adev->mc.gtt_size = 512 * 1024 * 1024; + adev->mc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; adev->mman.buffer_funcs = NULL; @@ -2068,6 +2019,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); + spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); @@ -2143,6 +2095,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); goto failed; } @@ -2153,6 +2106,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); r = -EINVAL; goto failed; } @@ -2160,18 +2114,28 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) { dev_err(adev->dev, "gpu post error!\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0); goto failed; } } else { DRM_INFO("GPU post is not needed\n"); } - if (!adev->is_atom_fw) { + if (adev->is_atom_fw) { + /* Initialize clocks */ + r = amdgpu_atomfirmware_get_clock_info(adev); + if (r) { + dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; + } + } else { /* Initialize clocks */ r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -2181,6 +2145,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); goto failed; } @@ -2190,6 +2155,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); goto failed; } @@ -2209,6 +2175,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); goto failed; } @@ -2253,12 +2220,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } return 0; failed: + amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); return r; @@ -2351,6 +2320,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } drm_modeset_unlock_all(dev); + amdgpu_amdkfd_suspend(adev); + /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); @@ -2392,10 +2363,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2444,10 +2412,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { @@ -2490,6 +2455,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } } } + r = amdgpu_amdkfd_resume(adev); + if (r) + return r; /* blat the mode back in */ if (fbcon) { @@ -2860,21 +2828,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) r = amdgpu_suspend(adev); retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); - } - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_save(adev); - else - amdgpu_atombios_scratch_regs_save(adev); + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - if (adev->is_atom_fw) - amdgpu_atomfirmware_scratch_regs_restore(adev); - else - amdgpu_atombios_scratch_regs_restore(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2952,6 +2908,7 @@ out: } } else { dev_err(adev->dev, "asic resume failed (%d).\n", r); + amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { if (adev->rings[i] && adev->rings[i]->sched.thread) { kthread_unpark(adev->rings[i]->sched.thread); @@ -2962,12 +2919,16 @@ out: drm_helper_resume_force_mode(adev->ddev); ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) + if (r) { /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); - else + amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } + else { dev_info(adev->dev, "GPU reset successed!\n"); + } + amdgpu_vf_error_trans_all(adev); return r; } |