1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_MMIO_H_
#define _XE_MMIO_H_
#include <linux/delay.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "xe_gt_types.h"
struct drm_device;
struct drm_file;
struct xe_device;
int xe_mmio_init(struct xe_device *xe);
static inline u8 xe_mmio_read8(struct xe_gt *gt, u32 reg)
{
if (reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset;
return readb(gt->mmio.regs + reg);
}
static inline void xe_mmio_write32(struct xe_gt *gt,
u32 reg, u32 val)
{
if (reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset;
writel(val, gt->mmio.regs + reg);
}
static inline u32 xe_mmio_read32(struct xe_gt *gt, u32 reg)
{
if (reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset;
return readl(gt->mmio.regs + reg);
}
static inline u32 xe_mmio_rmw32(struct xe_gt *gt, u32 reg, u32 mask,
u32 val)
{
u32 old, reg_val;
old = xe_mmio_read32(gt, reg);
reg_val = (old & mask) | val;
xe_mmio_write32(gt, reg, reg_val);
return old;
}
static inline void xe_mmio_write64(struct xe_gt *gt,
u32 reg, u64 val)
{
if (reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset;
writeq(val, gt->mmio.regs + reg);
}
static inline u64 xe_mmio_read64(struct xe_gt *gt, u32 reg)
{
if (reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset;
return readq(gt->mmio.regs + reg);
}
static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
u32 reg, u32 val,
u32 mask, u32 eval)
{
u32 reg_val;
xe_mmio_write32(gt, reg, val);
reg_val = xe_mmio_read32(gt, reg);
return (reg_val & mask) != eval ? -EINVAL : 0;
}
static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val, u32 mask,
u32 timeout_us, u32 *out_val, bool atomic)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
for (;;) {
if ((xe_mmio_read32(gt, reg) & mask) == val)
return 0;
read = xe_mmio_read32(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
if (atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
wait <<= 1;
}
if (out_val)
*out_val = read;
return ret;
}
int xe_mmio_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
static inline bool xe_mmio_in_range(const struct xe_mmio_range *range, u32 reg)
{
return range && reg >= range->start && reg <= range->end;
}
int xe_mmio_probe_vram(struct xe_device *xe);
int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *flat_ccs_base);
#endif
|