summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-09-08 09:40:13 +0300
committerDave Airlie <airlied@redhat.com>2020-09-08 09:40:13 +0300
commit0c8d22fcae2f9590a07b000e1724f665820b77f7 (patch)
tree5a2405fe298358d861a58dc933184ee6d3415eb4 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
parentce5c207c6b8dd9cdeaeeb2345b8a69335c0d98bf (diff)
parent11bc98bd71fe2e0cb572988519e51bca9d58a18a (diff)
downloadlinux-0c8d22fcae2f9590a07b000e1724f665820b77f7.tar.xz
Merge tag 'amd-drm-next-5.10-2020-09-03' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.10-2020-09-03: amdgpu: - RAS fixes - Sienna Cichlid updates - Navy Flounder updates - DCE6 (SI) support in DC - Enable plane rotation - Rework pre-OS vram reservation handling during driver init - Add standard interface to dump GPU metrics table from SMU - Rework tiling and tmz state handling in atomic commits - Pstate fixes - Add voltage and power hwmon interfaces for renoir - SW CTF fixes - S/G display fix for Raven - Print client strings for vmfaults for vega and newer - Manual fan control fixes - Display updates - Reorg power management directory structure - Misc bug fixes - Misc code cleanups amdkfd: - Topology fixes - Add SMI events for thermal throttling and GPU resets radeon: - switch from pci_* to dma_* for dma allocations - PLL fix Scheduler: - Clean up priority levels UAPI: - amdgpu INFO IOCTL query update for TMZ state https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6049 - amdkfd SMI event interface updates https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/therm_thrott From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200903222921.4152-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8bc2253939be..420931d36732 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -28,6 +28,7 @@
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -35,6 +36,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: GPUVM
@@ -1691,13 +1693,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t max_entries;
uint64_t addr, last;
+ max_entries = mapping->last - start + 1;
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
- max_entries = (nodes->size - pfn) *
- AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = min((nodes->size - pfn) *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
} else {
addr = 0;
- max_entries = S64_MAX;
}
if (pages_addr) {
@@ -1727,7 +1729,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += pfn << PAGE_SHIFT;
}
- last = min((uint64_t)mapping->last, start + max_entries - 1);
+ last = start + max_entries - 1;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
@@ -1778,15 +1780,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv;
} else {
+ struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm;
+ resv = bo->tbo.base.resv;
+ if (obj->import_attach && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo = gem_to_amdgpu_bo(gobj);
+ }
mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- resv = bo->tbo.base.resv;
}
if (bo) {
@@ -2132,8 +2143,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
- (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ if (!bo)
+ return bo_va;
+
+ if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
bo_va->is_xgmi = true;
/* Power up XGMI if it can be potentially used */
amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
@@ -3209,7 +3222,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
long timeout = msecs_to_jiffies(2000);
int r;