1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_PCI_IO_H
#define _ASM_S390_PCI_IO_H
#ifdef CONFIG_PCI
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/pci_insn.h>
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
#define ZPCI_IOMAP_MAX_ENTRIES \
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
#define ZPCI_IOMAP_ADDR_IDX_MASK \
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
struct zpci_iomap_entry {
u32 fh;
u8 bar;
u16 count;
};
extern struct zpci_iomap_entry *zpci_iomap_start;
#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT))
#define ZPCI_IDX(addr) \
(((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT)
#define ZPCI_OFFSET(addr) \
((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
#define ZPCI_CREATE_REQ(handle, space, len) \
((u64) handle << 32 | space << 16 | len)
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
}
#define zpci_write(LENGTH, VALTYPE) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
zpci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
zpci_read(4, u32)
zpci_read(2, u16)
zpci_read(1, u8)
zpci_write(8, u64)
zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
{
u64 val;
switch (len) {
case 1:
val = (u64) *((u8 *) data);
break;
case 2:
val = (u64) *((u16 *) data);
break;
case 4:
val = (u64) *((u32 *) data);
break;
case 8:
val = (u64) *((u64 *) data);
break;
default:
val = 0; /* let FW report error */
break;
}
return zpci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
int cc;
cc = zpci_load(&data, req, offset);
if (cc)
goto out;
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
break;
case 2:
*((u16 *) dst) = (u16) data;
break;
case 4:
*((u32 *) dst) = (u32) data;
break;
case 8:
*((u64 *) dst) = (u64) data;
break;
}
out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
return zpci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
int count = len > max ? max : len, size = 1;
while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
dst = dst >> 1;
src = src >> 1;
size = size << 1;
}
return size;
}
static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64) dst, n, 8);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
rc = zpci_read_single(req, dst, offset, size);
if (rc)
break;
offset += size;
dst += size;
n -= size;
}
return rc;
}
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
return -EINVAL;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64) src, n, 128);
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
if (size > 8) /* main path */
rc = zpci_write_block(req, src, offset);
else
rc = zpci_write_single(req, src, offset, size);
if (rc)
break;
offset += size;
src += size;
n -= size;
}
return rc;
}
static inline int zpci_memset_io(volatile void __iomem *dst,
unsigned char val, size_t count)
{
u8 *src = kmalloc(count, GFP_KERNEL);
int rc;
if (src == NULL)
return -ENOMEM;
memset(src, val, count);
rc = zpci_memcpy_toio(dst, src, count);
kfree(src);
return rc;
}
#endif /* CONFIG_PCI */
#endif /* _ASM_S390_PCI_IO_H */
|