1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Low level PM code for TI EMIF
*
* Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
* Dave Gerlach
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include "emif.h"
#include "ti-emif-asm-offsets.h"
#define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
#define EMIF_POWER_MGMT_SR_TIMER_MASK 0x00f0
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE 0x0200
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
#define EMIF_STATUS_READY 0x4
#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
#define EMIF_AM437X_REGISTERS 0x1
.arm
.align 3
ENTRY(ti_emif_sram)
/*
* void ti_emif_save_context(void)
*
* Used during suspend to save the context of all required EMIF registers
* to local memory if the EMIF is going to lose context during the sleep
* transition. Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_save_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
/* Save EMIF configuration */
ldr r1, [r0, #EMIF_SDRAM_CONFIG]
str r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
str r1, [r2, #EMIF_PMCR_VAL_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
str r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
ldr r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
str r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
ldr r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
ldr r1, [r0, #EMIF_COS_CONFIG]
str r1, [r2, #EMIF_COS_CONFIG_OFFSET]
ldr r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
str r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
ldr r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
str r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
ldr r1, [r0, #EMIF_OCP_CONFIG]
str r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_save_extra_regs
ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
str r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
ldr r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
str r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
ldr r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
str r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
ldr r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
str r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
/* Loop and save entire block of emif phy regs */
mov r5, #0x0
add r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r3, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_save:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_save
emif_skip_save_extra_regs:
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_save_context)
/*
* void ti_emif_restore_context(void)
*
* Used during resume to restore the context of all required EMIF registers
* from local memory after the EMIF has lost context during a sleep transition.
* Operates on the PHYSICAL address of the EMIF.
*/
ENTRY(ti_emif_restore_context)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/* Config EMIF Timings */
ldr r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1]
str r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
ldr r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_1]
str r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
ldr r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_2]
str r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
ldr r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_TIMING_3]
str r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
ldr r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
str r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldr r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
str r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
ldr r1, [r2, #EMIF_COS_CONFIG_OFFSET]
str r1, [r0, #EMIF_COS_CONFIG]
ldr r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
str r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
ldr r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
str r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
ldr r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
str r1, [r0, #EMIF_OCP_CONFIG]
ldr r5, [r4, #EMIF_PM_CONFIG_OFFSET]
cmp r5, #EMIF_SRAM_AM43_REG_LAYOUT
bne emif_skip_restore_extra_regs
ldr r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
ldr r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
str r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
ldr r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
str r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL]
ldr r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
str r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Loop and restore entire block of emif phy regs */
mov r5, #0x0
/* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
* to phy register save space
*/
add r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
add r4, r0, #EMIF_EXT_PHY_CTRL_1
ddr_phy_ctrl_restore:
ldr r1, [r3, r5]
str r1, [r4, r5]
add r5, r5, #0x4
cmp r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
bne ddr_phy_ctrl_restore
emif_skip_restore_extra_regs:
/*
* Output impedence calib needed only for DDR3
* but since the initial state of this will be
* disabled for DDR2 no harm in restoring the
* old configuration
*/
ldr r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
str r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
/* Write to sdcfg last for DDR2 only */
ldr r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
and r2, r1, #SDRAM_TYPE_MASK
cmp r2, #EMIF_SDCFG_TYPE_DDR2
streq r1, [r0, #EMIF_SDRAM_CONFIG]
mov pc, lr
ENDPROC(ti_emif_restore_context)
/*
* void ti_emif_run_hw_leveling(void)
*
* Used during resume to run hardware leveling again and restore the
* configuration of the EMIF PHY, only for DDR3.
*/
ENTRY(ti_emif_run_hw_leveling)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
orr r3, r3, #RDWRLVLFULL_START
ldr r2, [r0, #EMIF_SDRAM_CONFIG]
and r2, r2, #SDRAM_TYPE_MASK
cmp r2, #EMIF_SDCFG_TYPE_DDR3
bne skip_hwlvl
str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
/*
* If EMIF registers are touched during initial stage of HW
* leveling sequence there will be an L3 NOC timeout error issued
* as the EMIF will not respond, which is not fatal, but it is
* avoidable. This small wait loop is enough time for this condition
* to clear, even at worst case of CPU running at max speed of 1Ghz.
*/
mov r2, #0x2000
1:
subs r2, r2, #0x1
bne 1b
/* Bit clears when operation is complete */
2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
tst r1, #RDWRLVLFULL_START
bne 2b
skip_hwlvl:
mov pc, lr
ENDPROC(ti_emif_run_hw_leveling)
/*
* void ti_emif_enter_sr(void)
*
* Programs the EMIF to tell the SDRAM to enter into self-refresh
* mode during a sleep transition. Operates on the VIRTUAL address
* of the EMIF.
*/
ENTRY(ti_emif_enter_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_enter_sr)
/*
* void ti_emif_exit_sr(void)
*
* Programs the EMIF to tell the SDRAM to exit self-refresh mode
* after a sleep transition. Operates on the PHYSICAL address of
* the EMIF.
*/
ENTRY(ti_emif_exit_sr)
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
/*
* Toggle EMIF to exit refresh mode:
* if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
* (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
* (0x0) here.
* *If* EMIF did not lose context, nothing broken as we write the same
* value(0x2) to reg before we write a disable (0x0).
*/
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
orr r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
mov pc, lr
ENDPROC(ti_emif_exit_sr)
/*
* void ti_emif_abort_sr(void)
*
* Disables self-refresh after a failed transition to a low-power
* state so the kernel can jump back to DDR and follow abort path.
* Operates on the VIRTUAL address of the EMIF.
*/
ENTRY(ti_emif_abort_sr)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r4, ti_emif_pm_sram_data
ldr r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
ldr r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
ldr r1, [r2, #EMIF_PMCR_VAL_OFFSET]
bic r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
str r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
/* Wait for EMIF to become ready */
1: ldr r1, [r0, #EMIF_STATUS]
tst r1, #EMIF_STATUS_READY
beq 1b
ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(ti_emif_abort_sr)
.align 3
ENTRY(ti_emif_pm_sram_data)
.space EMIF_PM_DATA_SIZE
ENTRY(ti_emif_sram_sz)
.word . - ti_emif_save_context
|