1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021-2023 Intel Corporation
*/
#ifndef _XE_MMIO_H_
#define _XE_MMIO_H_
#include <linux/delay.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "regs/xe_reg_defs.h"
#include "xe_device_types.h"
#include "xe_gt_printk.h"
#include "xe_gt_types.h"
struct drm_device;
struct drm_file;
struct xe_device;
#define GEN12_LMEM_BAR 2
int xe_mmio_init(struct xe_device *xe);
static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}
static inline u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}
static inline void xe_mmio_write32(struct xe_gt *gt,
struct xe_reg reg, u32 val)
{
struct xe_tile *tile = gt_to_tile(gt);
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}
static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{
struct xe_tile *tile = gt_to_tile(gt);
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}
static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
u32 set)
{
u32 old, reg_val;
old = xe_mmio_read32(gt, reg);
reg_val = (old & ~clr) | set;
xe_mmio_write32(gt, reg, reg_val);
return old;
}
static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
struct xe_reg reg, u32 val,
u32 mask, u32 eval)
{
u32 reg_val;
xe_mmio_write32(gt, reg, val);
reg_val = xe_mmio_read32(gt, reg);
return (reg_val & mask) != eval ? -EINVAL : 0;
}
static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask,
u32 val, u32 timeout_us, u32 *out_val,
bool atomic)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
for (;;) {
read = xe_mmio_read32(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
if (atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
wait <<= 1;
}
if (out_val)
*out_val = read;
return ret;
}
static inline bool xe_mmio_in_range(const struct xe_gt *gt,
const struct xe_mmio_range *range,
struct xe_reg reg)
{
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
return range && reg.addr >= range->start && reg.addr <= range->end;
}
int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
#endif
|