1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
|
/*
* Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
*
* Copyright (C) 1996-2000 Russell King
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
.macro save_and_disable_daif, flags
mrs \flags, daif
msr daifset, #0xf
.endm
.macro disable_daif
msr daifset, #0xf
.endm
.macro enable_daif
msr daifclr, #0xf
.endm
.macro restore_daif, flags:req
msr daif, \flags
.endm
/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
.macro inherit_daif, pstate:req, tmp:req
and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
msr daif, \tmp
.endm
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
.macro enable_da_f
msr daifclr, #(8 | 4 | 1)
.endm
/*
* Enable and disable interrupts.
*/
.macro disable_irq
msr daifset, #2
.endm
.macro enable_irq
msr daifclr, #2
.endm
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #2
.endm
.macro restore_irq, flags
msr daif, \flags
.endm
.macro enable_dbg
msr daifclr, #8
.endm
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
bic \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
isb // Synchronise with enable_dbg
9990:
.endm
/* call with daif masked */
.macro enable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
orr \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
9990:
.endm
/*
* SMP data memory barrier
*/
.macro smp_dmb, opt
dmb \opt
.endm
/*
* RAS Error Synchronization barrier
*/
.macro esb
hint #16
.endm
/*
* Value prediction barrier
*/
.macro csdb
hint #20
.endm
/*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds.
*/
.macro mask_nospec64, idx, limit, tmp
sub \tmp, \idx, \limit
bic \tmp, \tmp, \idx
and \idx, \idx, \tmp, asr #63
csdb
.endm
/*
* NOP sequence
*/
.macro nops, num
.rept \num
nop
.endr
.endm
/*
* Emit an entry into the exception table
*/
.macro _asm_extable, from, to
.pushsection __ex_table, "a"
.align 3
.long (\from - .), (\to - .)
.popsection
.endm
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
/*
* Register aliases.
*/
lr .req x30 // link register
/*
* Vector entry
*/
.macro ventry label
.align 7
b \label
.endm
/*
* Select code when configured for BE.
*/
#ifdef CONFIG_CPU_BIG_ENDIAN
#define CPU_BE(code...) code
#else
#define CPU_BE(code...)
#endif
/*
* Select code when configured for LE.
*/
#ifdef CONFIG_CPU_BIG_ENDIAN
#define CPU_LE(code...)
#else
#define CPU_LE(code...) code
#endif
/*
* Define a macro that constructs a 64-bit value by concatenating two
* 32-bit registers. Note that on big endian systems the order of the
* registers is swapped.
*/
#ifndef CONFIG_CPU_BIG_ENDIAN
.macro regs_to_64, rd, lbits, hbits
#else
.macro regs_to_64, rd, hbits, lbits
#endif
orr \rd, \lbits, \hbits, lsl #32
.endm
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
* <symbol> is within the range +/- 4 GB of the PC.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
*/
.macro adr_l, dst, sym
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
.endm
/*
* @dst: destination register (32 or 64 bit wide)
* @sym: name of the symbol
* @tmp: optional 64-bit scratch register to be used if <dst> is a
* 32-bit wide register, in which case it cannot be used to hold
* the address
*/
.macro ldr_l, dst, sym, tmp=
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
.else
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
.endm
/*
* @src: source register (32 or 64 bit wide)
* @sym: name of the symbol
* @tmp: mandatory 64-bit scratch register to calculate the address
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
.endm
/*
* @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @sym: The name of the per-cpu variable
* @tmp: scratch register
*/
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
mrs \tmp, tpidr_el2
alternative_endif
add \dst, \dst, \tmp
.endm
/*
* @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
* @sym: The name of the per-cpu variable
* @tmp: scratch register
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
mrs \tmp, tpidr_el2
alternative_endif
ldr \dst, [\dst, \tmp]
.endm
/*
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
*/
.macro vma_vm_mm, rd, rn
ldr \rd, [\rn, #VMA_VM_MM]
.endm
/*
* mmid - get context id from mm pointer (mm->context.id)
*/
.macro mmid, rd, rn
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
* read_ctr - read CTR_EL0. If the system has mismatched
* cache line sizes, provide the system wide safe value
* from arm64_ftr_reg_ctrel0.sys_val
*/
.macro read_ctr, reg
alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
mrs \reg, ctr_el0 // read CTR
nop
alternative_else
ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
alternative_endif
.endm
/*
* raw_dcache_line_size - get the minimum D-cache line size on this CPU
* from the CTR register.
*/
.macro raw_dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
/*
* dcache_line_size - get the safe D-cache line size across all CPUs
*/
.macro dcache_line_size, reg, tmp
read_ctr \tmp
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
/*
* raw_icache_line_size - get the minimum I-cache line size on this CPU
* from the CTR register.
*/
.macro raw_icache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
and \tmp, \tmp, #0xf // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
/*
* icache_line_size - get the safe I-cache line size across all CPUs
*/
.macro icache_line_size, reg, tmp
read_ctr \tmp
and \tmp, \tmp, #0xf // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
/*
* tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
*/
.macro tcr_set_idmap_t0sz, valreg, tmpreg
ldr_l \tmpreg, idmap_t0sz
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
* tcr_compute_pa_size - set TCR.(I)PS to the highest supported
* ID_AA64MMFR0_EL1.PARange value
*
* tcr: register with the TCR_ELx value to be updated
* pos: IPS or PS bitfield position
* tmp{0,1}: temporary registers
*/
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
mrs \tmp0, ID_AA64MMFR0_EL1
// Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3
.endm
/*
* Macro to perform a data cache maintenance for the interval
* [kaddr, kaddr + size)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
* kaddr: starting virtual address of the region
* size: size of the region
* Corrupts: kaddr, size, tmp1, tmp2
*/
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
dcache_line_size \tmp1, \tmp2
add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
bic \kaddr, \kaddr, \tmp2
9998:
.if (\op == cvau || \op == cvac)
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
dc \op, \kaddr
alternative_else
dc civac, \kaddr
alternative_endif
.elseif (\op == cvap)
alternative_if ARM64_HAS_DCPOP
sys 3, c7, c12, 1, \kaddr // dc cvap
alternative_else
dc cvac, \kaddr
alternative_endif
.else
dc \op, \kaddr
.endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b
dsb \domain
.endm
/*
* Macro to perform an instruction cache maintenance for the interval
* [start, end)
*
* start, end: virtual addresses describing the region
* label: A label to branch to on user fault.
* Corrupts: tmp1, tmp2
*/
.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
icache_line_size \tmp1, \tmp2
sub \tmp2, \tmp1, #1
bic \tmp2, \start, \tmp2
9997:
USER(\label, ic ivau, \tmp2) // invalidate I line PoU
add \tmp2, \tmp2, \tmp1
cmp \tmp2, \end
b.lo 9997b
dsb ish
isb
.endm
/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
sbfx \tmpreg, \tmpreg, #8, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
/*
* copy_page - copy src to dest using temp registers t1-t8
*/
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
9998: ldp \t1, \t2, [\src]
ldp \t3, \t4, [\src, #16]
ldp \t5, \t6, [\src, #32]
ldp \t7, \t8, [\src, #48]
add \src, \src, #64
stnp \t1, \t2, [\dest]
stnp \t3, \t4, [\dest, #16]
stnp \t5, \t6, [\dest, #32]
stnp \t7, \t8, [\dest, #48]
add \dest, \dest, #64
tst \src, #(PAGE_SIZE - 1)
b.ne 9998b
.endm
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
*/
#define ENDPIPROC(x) \
.globl __pi_##x; \
.type __pi_##x, %function; \
.set __pi_##x, x; \
.size __pi_##x, . - x; \
ENDPROC(x)
/*
* Annotate a function as being unsuitable for kprobes.
*/
#ifdef CONFIG_KPROBES
#define NOKPROBE(x) \
.pushsection "_kprobe_blacklist", "aw"; \
.quad x; \
.popsection;
#else
#define NOKPROBE(x)
#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
* PIE binary. This requires cooperation from the linker script, which
* must emit the lo32/hi32 halves individually.
*/
.macro le64sym, sym
.long \sym\()_lo32
.long \sym\()_hi32
.endm
/*
* mov_q - move an immediate constant into a 64-bit register using
* between 2 and 4 movz/movk instructions (depending on the
* magnitude and sign of the operand)
*/
.macro mov_q, reg, val
.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
movz \reg, :abs_g1_s:\val
.else
.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
movz \reg, :abs_g2_s:\val
.else
movz \reg, :abs_g3:\val
movk \reg, :abs_g2_nc:\val
.endif
movk \reg, :abs_g1_nc:\val
.endif
movk \reg, :abs_g0_nc:\val
.endm
/*
* Return the current thread_info.
*/
.macro get_thread_info, rd
mrs \rd, sp_el0
.endm
/*
* Arrange a physical address in a TTBR register, taking care of 52-bit
* addresses.
*
* phys: physical address, preserved
* ttbr: returns the TTBR value
*/
.macro phys_to_ttbr, ttbr, phys
#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
#else
mov \ttbr, \phys
#endif
.endm
.macro phys_to_pte, pte, phys
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* We assume \phys is 64K aligned and this is guaranteed by only
* supporting this configuration with 64K pages.
*/
orr \pte, \phys, \phys, lsr #36
and \pte, \pte, #PTE_ADDR_MASK
#else
mov \pte, \phys
#endif
.endm
.macro pte_to_phys, phys, pte
#ifdef CONFIG_ARM64_PA_BITS_52
ubfiz \phys, \pte, #(48 - 16 - 12), #16
bfxil \phys, \pte, #16, #32
lsl \phys, \phys, #16
#else
and \phys, \pte, #PTE_ADDR_MASK
#endif
.endm
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
*/
.macro pre_disable_mmu_workaround
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
isb
#endif
.endm
/*
* frame_push - Push @regcount callee saved registers to the stack,
* starting at x19, as well as x29/x30, and set x29 to
* the new value of sp. Add @extra bytes of stack space
* for locals.
*/
.macro frame_push, regcount:req, extra
__frame st, \regcount, \extra
.endm
/*
* frame_pop - Pop the callee saved registers from the stack that were
* pushed in the most recent call to frame_push, as well
* as x29/x30 and any extra stack space that may have been
* allocated.
*/
.macro frame_pop
__frame ld
.endm
.macro __frame_regs, reg1, reg2, op, num
.if .Lframe_regcount == \num
\op\()r \reg1, [sp, #(\num + 1) * 8]
.elseif .Lframe_regcount > \num
\op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
.endif
.endm
.macro __frame, op, regcount, extra=0
.ifc \op, st
.if (\regcount) < 0 || (\regcount) > 10
.error "regcount should be in the range [0 ... 10]"
.endif
.if ((\extra) % 16) != 0
.error "extra should be a multiple of 16 bytes"
.endif
.ifdef .Lframe_regcount
.if .Lframe_regcount != -1
.error "frame_push/frame_pop may not be nested"
.endif
.endif
.set .Lframe_regcount, \regcount
.set .Lframe_extra, \extra
.set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
mov x29, sp
.endif
__frame_regs x19, x20, \op, 1
__frame_regs x21, x22, \op, 3
__frame_regs x23, x24, \op, 5
__frame_regs x25, x26, \op, 7
__frame_regs x27, x28, \op, 9
.ifc \op, ld
.if .Lframe_regcount == -1
.error "frame_push/frame_pop may not be nested"
.endif
ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
.set .Lframe_regcount, -1
.endif
.endm
/*
* Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled).
*
* if_will_cond_yield_neon
* // pre-yield patchup code
* do_cond_yield_neon
* // post-yield patchup code
* endif_yield_neon <label>
*
* where <label> is optional, and marks the point where execution will resume
* after a yield has been performed. If omitted, execution resumes right after
* the endif_yield_neon invocation. Note that the entire sequence, including
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
* is not defined.
*
* As a convenience, in the case where no patchup code is required, the above
* sequence may be abbreviated to
*
* cond_yield_neon <label>
*
* Note that the patchup code does not support assembler directives that change
* the output section, any use of such directives is undefined.
*
* The yield itself consists of the following:
* - Check whether the preempt count is exactly 1, in which case disabling
* preemption once will make the task preemptible. If this is not the case,
* yielding is pointless.
* - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
* kernel mode NEON (which will trigger a reschedule), and branch to the
* yield fixup code.
*
* This macro sequence may clobber all CPU state that is not guaranteed by the
* AAPCS to be preserved across an ordinary function call.
*/
.macro cond_yield_neon, lbl
if_will_cond_yield_neon
do_cond_yield_neon
endif_yield_neon \lbl
.endm
.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
get_thread_info x0
ldr w1, [x0, #TSK_TI_PREEMPT]
ldr x0, [x0, #TSK_TI_FLAGS]
cmp w1, #PREEMPT_DISABLE_OFFSET
csel x0, x0, xzr, eq
tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
/* fall through to endif_yield_neon */
.subsection 1
.Lyield_\@ :
#else
.section ".discard.cond_yield_neon", "ax"
#endif
.endm
.macro do_cond_yield_neon
bl kernel_neon_end
bl kernel_neon_begin
.endm
.macro endif_yield_neon, lbl
.ifnb \lbl
b \lbl
.else
b .Lyield_out_\@
.endif
.previous
.Lyield_out_\@ :
.endm
#endif /* __ASM_ASSEMBLER_H */
|