1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
|
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <arch/chip.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
void *memset(void *s, int c, size_t n)
{
uint32_t *out32;
int n32;
uint32_t v16, v32;
uint8_t *out8 = s;
#if !CHIP_HAS_WH64()
int ahead32;
#else
int to_align32;
#endif
/* Experimentation shows that a trivial tight loop is a win up until
* around a size of 20, where writing a word at a time starts to win.
*/
#define BYTE_CUTOFF 20
#if BYTE_CUTOFF < 3
/* This must be at least at least this big, or some code later
* on doesn't work.
*/
#error "BYTE_CUTOFF is too small"
#endif
if (n < BYTE_CUTOFF) {
/* Strangely, this turns out to be the tightest way to
* write this loop.
*/
if (n != 0) {
do {
/* Strangely, combining these into one line
* performs worse.
*/
*out8 = c;
out8++;
} while (--n != 0);
}
return s;
}
#if !CHIP_HAS_WH64()
/* Use a spare issue slot to start prefetching the first cache
* line early. This instruction is free as the store can be buried
* in otherwise idle issue slots doing ALU ops.
*/
__insn_prefetch(out8);
/* We prefetch the end so that a short memset that spans two cache
* lines gets some prefetching benefit. Again we believe this is free
* to issue.
*/
__insn_prefetch(&out8[n - 1]);
#endif /* !CHIP_HAS_WH64() */
/* Align 'out8'. We know n >= 3 so this won't write past the end. */
while (((uintptr_t) out8 & 3) != 0) {
*out8++ = c;
--n;
}
/* Align 'n'. */
while (n & 3)
out8[--n] = c;
out32 = (uint32_t *) out8;
n32 = n >> 2;
/* Tile input byte out to 32 bits. */
v16 = __insn_intlb(c, c);
v32 = __insn_intlh(v16, v16);
/* This must be at least 8 or the following loop doesn't work. */
#define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4)
#if !CHIP_HAS_WH64()
ahead32 = CACHE_LINE_SIZE_IN_WORDS;
/* We already prefetched the first and last cache lines, so
* we only need to do more prefetching if we are storing
* to more than two cache lines.
*/
if (n32 > CACHE_LINE_SIZE_IN_WORDS * 2) {
int i;
/* Prefetch the next several cache lines.
* This is the setup code for the software-pipelined
* loop below.
*/
#define MAX_PREFETCH 5
ahead32 = n32 & -CACHE_LINE_SIZE_IN_WORDS;
if (ahead32 > MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS)
ahead32 = MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS;
for (i = CACHE_LINE_SIZE_IN_WORDS;
i < ahead32; i += CACHE_LINE_SIZE_IN_WORDS)
__insn_prefetch(&out32[i]);
}
if (n32 > ahead32) {
while (1) {
int j;
/* Prefetch by reading one word several cache lines
* ahead. Since loads are non-blocking this will
* cause the full cache line to be read while we are
* finishing earlier cache lines. Using a store
* here causes microarchitectural performance
* problems where a victimizing store miss goes to
* the head of the retry FIFO and locks the pipe for
* a few cycles. So a few subsequent stores in this
* loop go into the retry FIFO, and then later
* stores see other stores to the same cache line
* are already in the retry FIFO and themselves go
* into the retry FIFO, filling it up and grinding
* to a halt waiting for the original miss to be
* satisfied.
*/
__insn_prefetch(&out32[ahead32]);
#if 1
#if CACHE_LINE_SIZE_IN_WORDS % 4 != 0
#error "Unhandled CACHE_LINE_SIZE_IN_WORDS"
#endif
n32 -= CACHE_LINE_SIZE_IN_WORDS;
/* Save icache space by only partially unrolling
* this loop.
*/
for (j = CACHE_LINE_SIZE_IN_WORDS / 4; j > 0; j--) {
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
}
#else
/* Unfortunately, due to a code generator flaw this
* allocates a separate register for each of these
* stores, which requires a large number of spills,
* which makes this procedure enormously bigger
* (something like 70%)
*/
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
n32 -= 16;
#endif
/* To save compiled code size, reuse this loop even
* when we run out of prefetching to do by dropping
* ahead32 down.
*/
if (n32 <= ahead32) {
/* Not even a full cache line left,
* so stop now.
*/
if (n32 < CACHE_LINE_SIZE_IN_WORDS)
break;
/* Choose a small enough value that we don't
* prefetch past the end. There's no sense
* in touching cache lines we don't have to.
*/
ahead32 = CACHE_LINE_SIZE_IN_WORDS - 1;
}
}
}
#else /* CHIP_HAS_WH64() */
/* Determine how many words we need to emit before the 'out32'
* pointer becomes aligned modulo the cache line size.
*/
to_align32 =
(-((uintptr_t)out32 >> 2)) & (CACHE_LINE_SIZE_IN_WORDS - 1);
/* Only bother aligning and using wh64 if there is at least
* one full cache line to process. This check also prevents
* overrunning the end of the buffer with alignment words.
*/
if (to_align32 <= n32 - CACHE_LINE_SIZE_IN_WORDS) {
int lines_left;
/* Align out32 mod the cache line size so we can use wh64. */
n32 -= to_align32;
for (; to_align32 != 0; to_align32--) {
*out32 = v32;
out32++;
}
/* Use unsigned divide to turn this into a right shift. */
lines_left = (unsigned)n32 / CACHE_LINE_SIZE_IN_WORDS;
do {
/* Only wh64 a few lines at a time, so we don't
* exceed the maximum number of victim lines.
*/
int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
? lines_left
: CHIP_MAX_OUTSTANDING_VICTIMS());
uint32_t *wh = out32;
int i = x;
int j;
lines_left -= x;
do {
__insn_wh64(wh);
wh += CACHE_LINE_SIZE_IN_WORDS;
} while (--i);
for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4); j != 0; j--) {
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
*out32++ = v32;
}
} while (lines_left != 0);
/* We processed all full lines above, so only this many
* words remain to be processed.
*/
n32 &= CACHE_LINE_SIZE_IN_WORDS - 1;
}
#endif /* CHIP_HAS_WH64() */
/* Now handle any leftover values. */
if (n32 != 0) {
do {
*out32 = v32;
out32++;
} while (--n32 != 0);
}
return s;
}
EXPORT_SYMBOL(memset);
|