summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
blob: ec040c2fd6c9a534d74d4fdb735a64ce87e62c3d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/* SPDX-License-Identifier: MIT */
/*
 * Copyright 2023 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#ifndef AMDGPU_USERQ_H_
#define AMDGPU_USERQ_H_
#include "amdgpu_eviction_fence.h"

#define AMDGPU_MAX_USERQ_COUNT 512

#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)

enum amdgpu_userq_state {
	AMDGPU_USERQ_STATE_UNMAPPED = 0,
	AMDGPU_USERQ_STATE_MAPPED,
	AMDGPU_USERQ_STATE_PREEMPTED,
	AMDGPU_USERQ_STATE_HUNG,
};

struct amdgpu_mqd_prop;

struct amdgpu_userq_obj {
	void		 *cpu_ptr;
	uint64_t	 gpu_addr;
	struct amdgpu_bo *obj;
};

struct amdgpu_usermode_queue {
	int			queue_type;
	enum amdgpu_userq_state state;
	uint64_t		doorbell_handle;
	uint64_t		doorbell_index;
	uint64_t		flags;
	struct amdgpu_mqd_prop	*userq_prop;
	struct amdgpu_userq_mgr *userq_mgr;
	struct amdgpu_vm	*vm;
	struct amdgpu_userq_obj mqd;
	struct amdgpu_userq_obj	db_obj;
	struct amdgpu_userq_obj fw_obj;
	struct amdgpu_userq_obj wptr_obj;
	struct xarray		fence_drv_xa;
	struct amdgpu_userq_fence_driver *fence_drv;
	struct dma_fence	*last_fence;
	u32			xcp_id;
	int			priority;
};

struct amdgpu_userq_funcs {
	int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
			  struct drm_amdgpu_userq_in *args,
			  struct amdgpu_usermode_queue *queue);
	void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
			    struct amdgpu_usermode_queue *uq);
	int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
		     struct amdgpu_usermode_queue *queue);
	int (*map)(struct amdgpu_userq_mgr *uq_mgr,
		   struct amdgpu_usermode_queue *queue);
};

/* Usermode queues for gfx */
struct amdgpu_userq_mgr {
	struct idr			userq_idr;
	struct mutex			userq_mutex;
	struct amdgpu_device		*adev;
	struct delayed_work		resume_work;
	struct list_head		list;
	struct drm_file			*file;
};

struct amdgpu_db_info {
	uint64_t doorbell_handle;
	uint32_t queue_type;
	uint32_t doorbell_offset;
	struct amdgpu_userq_obj	*db_obj;
};

int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);

int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
			  struct amdgpu_device *adev);

void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);

int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
			       struct amdgpu_userq_obj *userq_obj,
			       int size);

void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
				 struct amdgpu_userq_obj *userq_obj);

void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
			struct amdgpu_eviction_fence *ev_fence);

int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);

void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
				  struct amdgpu_eviction_fence_mgr *evf_mgr);

uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
					 struct amdgpu_db_info *db_info,
					     struct drm_file *filp);

u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);

int amdgpu_userq_suspend(struct amdgpu_device *adev);
int amdgpu_userq_resume(struct amdgpu_device *adev);

int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
						  u32 idx);
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
						   u32 idx);

#endif