Text file
src/runtime/asm_arm64.s
1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "go_asm.h"
6 #include "go_tls.h"
7 #include "tls_arm64.h"
8 #include "funcdata.h"
9 #include "textflag.h"
10
11 #ifdef GOARM64_LSE
12 DATA no_lse_msg<>+0x00(SB)/64, $"This program can only run on ARM64 processors with LSE support.\n"
13 GLOBL no_lse_msg<>(SB), RODATA, $64
14 #endif
15
16 // We know for sure that Linux and FreeBSD allow to read instruction set
17 // attribute registers (while some others OSes, like OpenBSD and Darwin,
18 // are not). Let's be conservative and allow code reading such registers
19 // only when we sure this won't lead to sigill.
20 #ifdef GOOS_linux
21 #define ISA_REGS_READABLE
22 #endif
23 #ifdef GOOS_freebsd
24 #define ISA_REGS_READABLE
25 #endif
26
27 #ifdef GOARM64_LSE
28 #ifdef ISA_REGS_READABLE
29 #define CHECK_GOARM64_LSE
30 #endif
31 #endif
32
33 TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
34 // SP = stack; R0 = argc; R1 = argv
35
36 SUB $32, RSP
37 MOVW R0, 8(RSP) // argc
38 MOVD R1, 16(RSP) // argv
39
40 #ifdef TLS_darwin
41 // Initialize TLS.
42 MOVD ZR, g // clear g, make sure it's not junk.
43 SUB $32, RSP
44 MRS_TPIDR_R0
45 AND $~7, R0
46 MOVD R0, 16(RSP) // arg2: TLS base
47 MOVD $runtime·tls_g(SB), R2
48 MOVD R2, 8(RSP) // arg1: &tlsg
49 BL ·tlsinit(SB)
50 ADD $32, RSP
51 #endif
52
53 // create istack out of the given (operating system) stack.
54 // _cgo_init may update stackguard.
55 MOVD $runtime·g0(SB), g
56 MOVD RSP, R7
57 MOVD $(-64*1024)(R7), R0
58 MOVD R0, g_stackguard0(g)
59 MOVD R0, g_stackguard1(g)
60 MOVD R0, (g_stack+stack_lo)(g)
61 MOVD R7, (g_stack+stack_hi)(g)
62
63 // if there is a _cgo_init, call it using the gcc ABI.
64 MOVD _cgo_init(SB), R12
65 CBZ R12, nocgo
66
67 #ifdef GOOS_android
68 MRS_TPIDR_R0 // load TLS base pointer
69 MOVD R0, R3 // arg 3: TLS base pointer
70 MOVD $runtime·tls_g(SB), R2 // arg 2: &tls_g
71 #else
72 MOVD $0, R2 // arg 2: not used when using platform's TLS
73 #endif
74 MOVD $setg_gcc<>(SB), R1 // arg 1: setg
75 MOVD g, R0 // arg 0: G
76 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
77 BL (R12)
78 ADD $16, RSP
79
80 nocgo:
81 BL runtime·save_g(SB)
82 // update stackguard after _cgo_init
83 MOVD (g_stack+stack_lo)(g), R0
84 ADD $const_stackGuard, R0
85 MOVD R0, g_stackguard0(g)
86 MOVD R0, g_stackguard1(g)
87
88 // set the per-goroutine and per-mach "registers"
89 MOVD $runtime·m0(SB), R0
90
91 // save m->g0 = g0
92 MOVD g, m_g0(R0)
93 // save m0 to g0->m
94 MOVD R0, g_m(g)
95
96 BL runtime·check(SB)
97
98 #ifdef GOOS_windows
99 BL runtime·wintls(SB)
100 #endif
101
102 // Check that CPU we use for execution supports instructions targeted during compile-time.
103 #ifdef CHECK_GOARM64_LSE
104 // Read the ID_AA64ISAR0_EL1 register
105 MRS ID_AA64ISAR0_EL1, R0
106
107 // Extract the LSE field (bits [23:20])
108 LSR $20, R0, R0
109 AND $0xf, R0, R0
110
111 // LSE support is indicated by a non-zero value
112 CBZ R0, no_lse
113 #endif
114
115 MOVW 8(RSP), R0 // copy argc
116 MOVW R0, -8(RSP)
117 MOVD 16(RSP), R0 // copy argv
118 MOVD R0, 0(RSP)
119 BL runtime·args(SB)
120 BL runtime·osinit(SB)
121 BL runtime·schedinit(SB)
122
123 // create a new goroutine to start program
124 MOVD $runtime·mainPC(SB), R0 // entry
125 SUB $16, RSP
126 MOVD R0, 8(RSP) // arg
127 MOVD $0, 0(RSP) // dummy LR
128 BL runtime·newproc(SB)
129 ADD $16, RSP
130
131 // start this M
132 BL runtime·mstart(SB)
133 UNDEF
134
135 #ifdef CHECK_GOARM64_LSE
136 no_lse:
137 MOVD $1, R0 // stderr
138 MOVD R0, 8(RSP)
139 MOVD $no_lse_msg<>(SB), R1 // message address
140 MOVD R1, 16(RSP)
141 MOVD $64, R2 // message length
142 MOVD R2, 24(RSP)
143 CALL runtime·write(SB)
144 CALL runtime·exit(SB)
145 CALL runtime·abort(SB)
146 RET
147 #endif
148
149 // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
150 // intended to be called by debuggers.
151 MOVD $runtime·debugPinnerV1<ABIInternal>(SB), R0
152 MOVD $runtime·debugCallV2<ABIInternal>(SB), R0
153
154 MOVD $0, R0
155 MOVD R0, (R0) // boom
156 UNDEF
157
158 DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
159 GLOBL runtime·mainPC(SB),RODATA,$8
160
161 // Windows ARM64 needs an immediate 0xf000 argument.
162 // See go.dev/issues/53837.
163 #define BREAK \
164 #ifdef GOOS_windows \
165 BRK $0xf000 \
166 #else \
167 BRK \
168 #endif \
169
170
171 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
172 BREAK
173 RET
174
175 TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
176 RET
177
178 TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
179 BL runtime·mstart0(SB)
180 RET // not reached
181
182 /*
183 * go-routine
184 */
185
186 // void gogo(Gobuf*)
187 // restore state from Gobuf; longjmp
188 TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
189 MOVD buf+0(FP), R5
190 MOVD gobuf_g(R5), R6
191 MOVD 0(R6), R4 // make sure g != nil
192 B gogo<>(SB)
193
194 TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
195 MOVD R6, g
196 BL runtime·save_g(SB)
197
198 MOVD gobuf_sp(R5), R0
199 MOVD R0, RSP
200 MOVD gobuf_bp(R5), R29
201 MOVD gobuf_lr(R5), LR
202 MOVD gobuf_ctxt(R5), R26
203 MOVD $0, gobuf_sp(R5)
204 MOVD $0, gobuf_bp(R5)
205 MOVD $0, gobuf_lr(R5)
206 MOVD $0, gobuf_ctxt(R5)
207 CMP ZR, ZR // set condition codes for == test, needed by stack split
208 MOVD gobuf_pc(R5), R6
209 B (R6)
210
211 // void mcall(fn func(*g))
212 // Switch to m->g0's stack, call fn(g).
213 // Fn must never return. It should gogo(&g->sched)
214 // to keep running g.
215 TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
216 MOVD R0, R26 // context
217
218 // Save caller state in g->sched
219 MOVD RSP, R0
220 MOVD R0, (g_sched+gobuf_sp)(g)
221 MOVD R29, (g_sched+gobuf_bp)(g)
222 MOVD LR, (g_sched+gobuf_pc)(g)
223 MOVD $0, (g_sched+gobuf_lr)(g)
224
225 // Switch to m->g0 & its stack, call fn.
226 MOVD g, R3
227 MOVD g_m(g), R8
228 MOVD m_g0(R8), g
229 BL runtime·save_g(SB)
230 CMP g, R3
231 BNE 2(PC)
232 B runtime·badmcall(SB)
233
234 MOVD (g_sched+gobuf_sp)(g), R0
235 MOVD R0, RSP // sp = m->g0->sched.sp
236 MOVD $0, R29 // clear frame pointer, as caller may execute on another M
237 MOVD R3, R0 // arg = g
238 MOVD $0, -16(RSP) // dummy LR
239 SUB $16, RSP
240 MOVD 0(R26), R4 // code pointer
241 BL (R4)
242 B runtime·badmcall2(SB)
243
244 // systemstack_switch is a dummy routine that systemstack leaves at the bottom
245 // of the G stack. We need to distinguish the routine that
246 // lives at the bottom of the G stack from the one that lives
247 // at the top of the system stack because the one at the top of
248 // the system stack terminates the stack walk (see topofstack()).
249 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
250 UNDEF
251 BL (LR) // make sure this function is not leaf
252 RET
253
254 // func systemstack(fn func())
255 TEXT runtime·systemstack(SB), NOSPLIT, $0-8
256 MOVD fn+0(FP), R3 // R3 = fn
257 MOVD R3, R26 // context
258 MOVD g_m(g), R4 // R4 = m
259
260 MOVD m_gsignal(R4), R5 // R5 = gsignal
261 CMP g, R5
262 BEQ noswitch
263
264 MOVD m_g0(R4), R5 // R5 = g0
265 CMP g, R5
266 BEQ noswitch
267
268 MOVD m_curg(R4), R6
269 CMP g, R6
270 BEQ switch
271
272 // Bad: g is not gsignal, not g0, not curg. What is it?
273 // Hide call from linker nosplit analysis.
274 MOVD $runtime·badsystemstack(SB), R3
275 BL (R3)
276 B runtime·abort(SB)
277
278 switch:
279 // Switch stacks.
280 // The original frame pointer is stored in R29,
281 // which is useful for stack unwinding.
282 // Save our state in g->sched. Pretend to
283 // be systemstack_switch if the G stack is scanned.
284 BL gosave_systemstack_switch<>(SB)
285
286 // switch to g0
287 MOVD R5, g
288 BL runtime·save_g(SB)
289 MOVD (g_sched+gobuf_sp)(g), R3
290 MOVD R3, RSP
291
292 // call target function
293 MOVD 0(R26), R3 // code pointer
294 BL (R3)
295
296 // switch back to g
297 MOVD g_m(g), R3
298 MOVD m_curg(R3), g
299 BL runtime·save_g(SB)
300 MOVD (g_sched+gobuf_sp)(g), R0
301 MOVD R0, RSP
302 MOVD (g_sched+gobuf_bp)(g), R29
303 MOVD $0, (g_sched+gobuf_sp)(g)
304 MOVD $0, (g_sched+gobuf_bp)(g)
305 RET
306
307 noswitch:
308 // already on m stack, just call directly
309 // Using a tail call here cleans up tracebacks since we won't stop
310 // at an intermediate systemstack.
311 MOVD 0(R26), R3 // code pointer
312 MOVD.P 16(RSP), R30 // restore LR
313 SUB $8, RSP, R29 // restore FP
314 B (R3)
315
316 // func switchToCrashStack0(fn func())
317 TEXT runtime·switchToCrashStack0<ABIInternal>(SB), NOSPLIT, $0-8
318 MOVD R0, R26 // context register
319 MOVD g_m(g), R1 // curm
320
321 // set g to gcrash
322 MOVD $runtime·gcrash(SB), g // g = &gcrash
323 BL runtime·save_g(SB) // clobbers R0
324 MOVD R1, g_m(g) // g.m = curm
325 MOVD g, m_g0(R1) // curm.g0 = g
326
327 // switch to crashstack
328 MOVD (g_stack+stack_hi)(g), R1
329 SUB $(4*8), R1
330 MOVD R1, RSP
331
332 // call target function
333 MOVD 0(R26), R0
334 CALL (R0)
335
336 // should never return
337 CALL runtime·abort(SB)
338 UNDEF
339
340 /*
341 * support for morestack
342 */
343
344 // Called during function prolog when more stack is needed.
345 // Caller has already loaded:
346 // R3 prolog's LR (R30)
347 //
348 // The traceback routines see morestack on a g0 as being
349 // the top of a stack (for example, morestack calling newstack
350 // calling the scheduler calling newm calling gc), so we must
351 // record an argument size. For that purpose, it has no arguments.
352 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
353 // Cannot grow scheduler stack (m->g0).
354 MOVD g_m(g), R8
355 MOVD m_g0(R8), R4
356
357 // Called from f.
358 // Set g->sched to context in f
359 MOVD RSP, R0
360 MOVD R0, (g_sched+gobuf_sp)(g)
361 MOVD R29, (g_sched+gobuf_bp)(g)
362 MOVD LR, (g_sched+gobuf_pc)(g)
363 MOVD R3, (g_sched+gobuf_lr)(g)
364 MOVD R26, (g_sched+gobuf_ctxt)(g)
365
366 CMP g, R4
367 BNE 3(PC)
368 BL runtime·badmorestackg0(SB)
369 B runtime·abort(SB)
370
371 // Cannot grow signal stack (m->gsignal).
372 MOVD m_gsignal(R8), R4
373 CMP g, R4
374 BNE 3(PC)
375 BL runtime·badmorestackgsignal(SB)
376 B runtime·abort(SB)
377
378 // Called from f.
379 // Set m->morebuf to f's callers.
380 MOVD R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC
381 MOVD RSP, R0
382 MOVD R0, (m_morebuf+gobuf_sp)(R8) // f's caller's RSP
383 MOVD g, (m_morebuf+gobuf_g)(R8)
384
385 // Call newstack on m->g0's stack.
386 MOVD m_g0(R8), g
387 BL runtime·save_g(SB)
388 MOVD (g_sched+gobuf_sp)(g), R0
389 MOVD R0, RSP
390 MOVD $0, R29 // clear frame pointer, as caller may execute on another M
391 MOVD.W $0, -16(RSP) // create a call frame on g0 (saved LR; keep 16-aligned)
392 BL runtime·newstack(SB)
393
394 // Not reached, but make sure the return PC from the call to newstack
395 // is still in this function, and not the beginning of the next.
396 UNDEF
397
398 TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
399 // Force SPWRITE. This function doesn't actually write SP,
400 // but it is called with a special calling convention where
401 // the caller doesn't save LR on stack but passes it as a
402 // register (R3), and the unwinder currently doesn't understand.
403 // Make it SPWRITE to stop unwinding. (See issue 54332)
404 MOVD RSP, RSP
405
406 MOVW $0, R26
407 B runtime·morestack(SB)
408
409 // spillArgs stores return values from registers to a *internal/abi.RegArgs in R20.
410 TEXT ·spillArgs(SB),NOSPLIT,$0-0
411 STP (R0, R1), (0*8)(R20)
412 STP (R2, R3), (2*8)(R20)
413 STP (R4, R5), (4*8)(R20)
414 STP (R6, R7), (6*8)(R20)
415 STP (R8, R9), (8*8)(R20)
416 STP (R10, R11), (10*8)(R20)
417 STP (R12, R13), (12*8)(R20)
418 STP (R14, R15), (14*8)(R20)
419 FSTPD (F0, F1), (16*8)(R20)
420 FSTPD (F2, F3), (18*8)(R20)
421 FSTPD (F4, F5), (20*8)(R20)
422 FSTPD (F6, F7), (22*8)(R20)
423 FSTPD (F8, F9), (24*8)(R20)
424 FSTPD (F10, F11), (26*8)(R20)
425 FSTPD (F12, F13), (28*8)(R20)
426 FSTPD (F14, F15), (30*8)(R20)
427 RET
428
429 // unspillArgs loads args into registers from a *internal/abi.RegArgs in R20.
430 TEXT ·unspillArgs(SB),NOSPLIT,$0-0
431 LDP (0*8)(R20), (R0, R1)
432 LDP (2*8)(R20), (R2, R3)
433 LDP (4*8)(R20), (R4, R5)
434 LDP (6*8)(R20), (R6, R7)
435 LDP (8*8)(R20), (R8, R9)
436 LDP (10*8)(R20), (R10, R11)
437 LDP (12*8)(R20), (R12, R13)
438 LDP (14*8)(R20), (R14, R15)
439 FLDPD (16*8)(R20), (F0, F1)
440 FLDPD (18*8)(R20), (F2, F3)
441 FLDPD (20*8)(R20), (F4, F5)
442 FLDPD (22*8)(R20), (F6, F7)
443 FLDPD (24*8)(R20), (F8, F9)
444 FLDPD (26*8)(R20), (F10, F11)
445 FLDPD (28*8)(R20), (F12, F13)
446 FLDPD (30*8)(R20), (F14, F15)
447 RET
448
449 // reflectcall: call a function with the given argument list
450 // func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
451 // we don't have variable-sized frames, so we use a small number
452 // of constant-sized-frame functions to encode a few bits of size in the pc.
453 // Caution: ugly multiline assembly macros in your future!
454
455 #define DISPATCH(NAME,MAXSIZE) \
456 MOVD $MAXSIZE, R27; \
457 CMP R27, R16; \
458 BGT 3(PC); \
459 MOVD $NAME(SB), R27; \
460 B (R27)
461 // Note: can't just "B NAME(SB)" - bad inlining results.
462
463 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
464 MOVWU frameSize+32(FP), R16
465 DISPATCH(runtime·call16, 16)
466 DISPATCH(runtime·call32, 32)
467 DISPATCH(runtime·call64, 64)
468 DISPATCH(runtime·call128, 128)
469 DISPATCH(runtime·call256, 256)
470 DISPATCH(runtime·call512, 512)
471 DISPATCH(runtime·call1024, 1024)
472 DISPATCH(runtime·call2048, 2048)
473 DISPATCH(runtime·call4096, 4096)
474 DISPATCH(runtime·call8192, 8192)
475 DISPATCH(runtime·call16384, 16384)
476 DISPATCH(runtime·call32768, 32768)
477 DISPATCH(runtime·call65536, 65536)
478 DISPATCH(runtime·call131072, 131072)
479 DISPATCH(runtime·call262144, 262144)
480 DISPATCH(runtime·call524288, 524288)
481 DISPATCH(runtime·call1048576, 1048576)
482 DISPATCH(runtime·call2097152, 2097152)
483 DISPATCH(runtime·call4194304, 4194304)
484 DISPATCH(runtime·call8388608, 8388608)
485 DISPATCH(runtime·call16777216, 16777216)
486 DISPATCH(runtime·call33554432, 33554432)
487 DISPATCH(runtime·call67108864, 67108864)
488 DISPATCH(runtime·call134217728, 134217728)
489 DISPATCH(runtime·call268435456, 268435456)
490 DISPATCH(runtime·call536870912, 536870912)
491 DISPATCH(runtime·call1073741824, 1073741824)
492 MOVD $runtime·badreflectcall(SB), R0
493 B (R0)
494
495 #define CALLFN(NAME,MAXSIZE) \
496 TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
497 NO_LOCAL_POINTERS; \
498 /* copy arguments to stack */ \
499 MOVD stackArgs+16(FP), R3; \
500 MOVWU stackArgsSize+24(FP), R4; \
501 ADD $8, RSP, R5; \
502 BIC $0xf, R4, R6; \
503 CBZ R6, 6(PC); \
504 /* if R6=(argsize&~15) != 0 */ \
505 ADD R6, R5, R6; \
506 /* copy 16 bytes a time */ \
507 LDP.P 16(R3), (R7, R8); \
508 STP.P (R7, R8), 16(R5); \
509 CMP R5, R6; \
510 BNE -3(PC); \
511 AND $0xf, R4, R6; \
512 CBZ R6, 6(PC); \
513 /* if R6=(argsize&15) != 0 */ \
514 ADD R6, R5, R6; \
515 /* copy 1 byte a time for the rest */ \
516 MOVBU.P 1(R3), R7; \
517 MOVBU.P R7, 1(R5); \
518 CMP R5, R6; \
519 BNE -3(PC); \
520 /* set up argument registers */ \
521 MOVD regArgs+40(FP), R20; \
522 CALL ·unspillArgs(SB); \
523 /* call function */ \
524 MOVD f+8(FP), R26; \
525 MOVD (R26), R20; \
526 PCDATA $PCDATA_StackMapIndex, $0; \
527 BL (R20); \
528 /* copy return values back */ \
529 MOVD regArgs+40(FP), R20; \
530 CALL ·spillArgs(SB); \
531 MOVD stackArgsType+0(FP), R7; \
532 MOVD stackArgs+16(FP), R3; \
533 MOVWU stackArgsSize+24(FP), R4; \
534 MOVWU stackRetOffset+28(FP), R6; \
535 ADD $8, RSP, R5; \
536 ADD R6, R5; \
537 ADD R6, R3; \
538 SUB R6, R4; \
539 BL callRet<>(SB); \
540 RET
541
542 // callRet copies return values back at the end of call*. This is a
543 // separate function so it can allocate stack space for the arguments
544 // to reflectcallmove. It does not follow the Go ABI; it expects its
545 // arguments in registers.
546 TEXT callRet<>(SB), NOSPLIT, $48-0
547 NO_LOCAL_POINTERS
548 STP (R7, R3), 8(RSP)
549 STP (R5, R4), 24(RSP)
550 MOVD R20, 40(RSP)
551 BL runtime·reflectcallmove(SB)
552 RET
553
554 CALLFN(·call16, 16)
555 CALLFN(·call32, 32)
556 CALLFN(·call64, 64)
557 CALLFN(·call128, 128)
558 CALLFN(·call256, 256)
559 CALLFN(·call512, 512)
560 CALLFN(·call1024, 1024)
561 CALLFN(·call2048, 2048)
562 CALLFN(·call4096, 4096)
563 CALLFN(·call8192, 8192)
564 CALLFN(·call16384, 16384)
565 CALLFN(·call32768, 32768)
566 CALLFN(·call65536, 65536)
567 CALLFN(·call131072, 131072)
568 CALLFN(·call262144, 262144)
569 CALLFN(·call524288, 524288)
570 CALLFN(·call1048576, 1048576)
571 CALLFN(·call2097152, 2097152)
572 CALLFN(·call4194304, 4194304)
573 CALLFN(·call8388608, 8388608)
574 CALLFN(·call16777216, 16777216)
575 CALLFN(·call33554432, 33554432)
576 CALLFN(·call67108864, 67108864)
577 CALLFN(·call134217728, 134217728)
578 CALLFN(·call268435456, 268435456)
579 CALLFN(·call536870912, 536870912)
580 CALLFN(·call1073741824, 1073741824)
581
582 // func memhash32(p unsafe.Pointer, h uintptr) uintptr
583 TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
584 MOVB runtime·useAeshash(SB), R10
585 CBZ R10, noaes
586 MOVD $runtime·aeskeysched+0(SB), R3
587
588 VEOR V0.B16, V0.B16, V0.B16
589 VLD1 (R3), [V2.B16]
590 VLD1 (R0), V0.S[1]
591 VMOV R1, V0.S[0]
592
593 AESE V2.B16, V0.B16
594 AESMC V0.B16, V0.B16
595 AESE V2.B16, V0.B16
596 AESMC V0.B16, V0.B16
597 AESE V2.B16, V0.B16
598
599 VMOV V0.D[0], R0
600 RET
601 noaes:
602 B runtime·memhash32Fallback<ABIInternal>(SB)
603
604 // func memhash64(p unsafe.Pointer, h uintptr) uintptr
605 TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
606 MOVB runtime·useAeshash(SB), R10
607 CBZ R10, noaes
608 MOVD $runtime·aeskeysched+0(SB), R3
609
610 VEOR V0.B16, V0.B16, V0.B16
611 VLD1 (R3), [V2.B16]
612 VLD1 (R0), V0.D[1]
613 VMOV R1, V0.D[0]
614
615 AESE V2.B16, V0.B16
616 AESMC V0.B16, V0.B16
617 AESE V2.B16, V0.B16
618 AESMC V0.B16, V0.B16
619 AESE V2.B16, V0.B16
620
621 VMOV V0.D[0], R0
622 RET
623 noaes:
624 B runtime·memhash64Fallback<ABIInternal>(SB)
625
626 // func memhash(p unsafe.Pointer, h, size uintptr) uintptr
627 TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
628 MOVB runtime·useAeshash(SB), R10
629 CBZ R10, noaes
630 B aeshashbody<>(SB)
631 noaes:
632 B runtime·memhashFallback<ABIInternal>(SB)
633
634 // func strhash(p unsafe.Pointer, h uintptr) uintptr
635 TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
636 MOVB runtime·useAeshash(SB), R10
637 CBZ R10, noaes
638 LDP (R0), (R0, R2) // string data / length
639 B aeshashbody<>(SB)
640 noaes:
641 B runtime·strhashFallback<ABIInternal>(SB)
642
643 // R0: data
644 // R1: seed data
645 // R2: length
646 // At return, R0 = return value
647 TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
648 VEOR V30.B16, V30.B16, V30.B16
649 VMOV R1, V30.D[0]
650 VMOV R2, V30.D[1] // load length into seed
651
652 MOVD $runtime·aeskeysched+0(SB), R4
653 VLD1.P 16(R4), [V0.B16]
654 AESE V30.B16, V0.B16
655 AESMC V0.B16, V0.B16
656 CMP $16, R2
657 BLO aes0to15
658 BEQ aes16
659 CMP $32, R2
660 BLS aes17to32
661 CMP $64, R2
662 BLS aes33to64
663 CMP $128, R2
664 BLS aes65to128
665 B aes129plus
666
667 aes0to15:
668 CBZ R2, aes0
669 VEOR V2.B16, V2.B16, V2.B16
670 TBZ $3, R2, less_than_8
671 VLD1.P 8(R0), V2.D[0]
672
673 less_than_8:
674 TBZ $2, R2, less_than_4
675 VLD1.P 4(R0), V2.S[2]
676
677 less_than_4:
678 TBZ $1, R2, less_than_2
679 VLD1.P 2(R0), V2.H[6]
680
681 less_than_2:
682 TBZ $0, R2, done
683 VLD1 (R0), V2.B[14]
684 done:
685 AESE V0.B16, V2.B16
686 AESMC V2.B16, V2.B16
687 AESE V0.B16, V2.B16
688 AESMC V2.B16, V2.B16
689 AESE V0.B16, V2.B16
690 AESMC V2.B16, V2.B16
691
692 VMOV V2.D[0], R0
693 RET
694
695 aes0:
696 VMOV V0.D[0], R0
697 RET
698
699 aes16:
700 VLD1 (R0), [V2.B16]
701 B done
702
703 aes17to32:
704 // make second seed
705 VLD1 (R4), [V1.B16]
706 AESE V30.B16, V1.B16
707 AESMC V1.B16, V1.B16
708 SUB $16, R2, R10
709 VLD1.P (R0)(R10), [V2.B16]
710 VLD1 (R0), [V3.B16]
711
712 AESE V0.B16, V2.B16
713 AESMC V2.B16, V2.B16
714 AESE V1.B16, V3.B16
715 AESMC V3.B16, V3.B16
716
717 AESE V0.B16, V2.B16
718 AESMC V2.B16, V2.B16
719 AESE V1.B16, V3.B16
720 AESMC V3.B16, V3.B16
721
722 AESE V0.B16, V2.B16
723 AESE V1.B16, V3.B16
724
725 VEOR V3.B16, V2.B16, V2.B16
726
727 VMOV V2.D[0], R0
728 RET
729
730 aes33to64:
731 VLD1 (R4), [V1.B16, V2.B16, V3.B16]
732 AESE V30.B16, V1.B16
733 AESMC V1.B16, V1.B16
734 AESE V30.B16, V2.B16
735 AESMC V2.B16, V2.B16
736 AESE V30.B16, V3.B16
737 AESMC V3.B16, V3.B16
738 SUB $32, R2, R10
739
740 VLD1.P (R0)(R10), [V4.B16, V5.B16]
741 VLD1 (R0), [V6.B16, V7.B16]
742
743 AESE V0.B16, V4.B16
744 AESMC V4.B16, V4.B16
745 AESE V1.B16, V5.B16
746 AESMC V5.B16, V5.B16
747 AESE V2.B16, V6.B16
748 AESMC V6.B16, V6.B16
749 AESE V3.B16, V7.B16
750 AESMC V7.B16, V7.B16
751
752 AESE V0.B16, V4.B16
753 AESMC V4.B16, V4.B16
754 AESE V1.B16, V5.B16
755 AESMC V5.B16, V5.B16
756 AESE V2.B16, V6.B16
757 AESMC V6.B16, V6.B16
758 AESE V3.B16, V7.B16
759 AESMC V7.B16, V7.B16
760
761 AESE V0.B16, V4.B16
762 AESE V1.B16, V5.B16
763 AESE V2.B16, V6.B16
764 AESE V3.B16, V7.B16
765
766 VEOR V6.B16, V4.B16, V4.B16
767 VEOR V7.B16, V5.B16, V5.B16
768 VEOR V5.B16, V4.B16, V4.B16
769
770 VMOV V4.D[0], R0
771 RET
772
773 aes65to128:
774 VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16]
775 VLD1 (R4), [V5.B16, V6.B16, V7.B16]
776 AESE V30.B16, V1.B16
777 AESMC V1.B16, V1.B16
778 AESE V30.B16, V2.B16
779 AESMC V2.B16, V2.B16
780 AESE V30.B16, V3.B16
781 AESMC V3.B16, V3.B16
782 AESE V30.B16, V4.B16
783 AESMC V4.B16, V4.B16
784 AESE V30.B16, V5.B16
785 AESMC V5.B16, V5.B16
786 AESE V30.B16, V6.B16
787 AESMC V6.B16, V6.B16
788 AESE V30.B16, V7.B16
789 AESMC V7.B16, V7.B16
790
791 SUB $64, R2, R10
792 VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
793 VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16]
794 AESE V0.B16, V8.B16
795 AESMC V8.B16, V8.B16
796 AESE V1.B16, V9.B16
797 AESMC V9.B16, V9.B16
798 AESE V2.B16, V10.B16
799 AESMC V10.B16, V10.B16
800 AESE V3.B16, V11.B16
801 AESMC V11.B16, V11.B16
802 AESE V4.B16, V12.B16
803 AESMC V12.B16, V12.B16
804 AESE V5.B16, V13.B16
805 AESMC V13.B16, V13.B16
806 AESE V6.B16, V14.B16
807 AESMC V14.B16, V14.B16
808 AESE V7.B16, V15.B16
809 AESMC V15.B16, V15.B16
810
811 AESE V0.B16, V8.B16
812 AESMC V8.B16, V8.B16
813 AESE V1.B16, V9.B16
814 AESMC V9.B16, V9.B16
815 AESE V2.B16, V10.B16
816 AESMC V10.B16, V10.B16
817 AESE V3.B16, V11.B16
818 AESMC V11.B16, V11.B16
819 AESE V4.B16, V12.B16
820 AESMC V12.B16, V12.B16
821 AESE V5.B16, V13.B16
822 AESMC V13.B16, V13.B16
823 AESE V6.B16, V14.B16
824 AESMC V14.B16, V14.B16
825 AESE V7.B16, V15.B16
826 AESMC V15.B16, V15.B16
827
828 AESE V0.B16, V8.B16
829 AESE V1.B16, V9.B16
830 AESE V2.B16, V10.B16
831 AESE V3.B16, V11.B16
832 AESE V4.B16, V12.B16
833 AESE V5.B16, V13.B16
834 AESE V6.B16, V14.B16
835 AESE V7.B16, V15.B16
836
837 VEOR V12.B16, V8.B16, V8.B16
838 VEOR V13.B16, V9.B16, V9.B16
839 VEOR V14.B16, V10.B16, V10.B16
840 VEOR V15.B16, V11.B16, V11.B16
841 VEOR V10.B16, V8.B16, V8.B16
842 VEOR V11.B16, V9.B16, V9.B16
843 VEOR V9.B16, V8.B16, V8.B16
844
845 VMOV V8.D[0], R0
846 RET
847
848 aes129plus:
849 PRFM (R0), PLDL1KEEP
850 VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16]
851 VLD1 (R4), [V5.B16, V6.B16, V7.B16]
852 AESE V30.B16, V1.B16
853 AESMC V1.B16, V1.B16
854 AESE V30.B16, V2.B16
855 AESMC V2.B16, V2.B16
856 AESE V30.B16, V3.B16
857 AESMC V3.B16, V3.B16
858 AESE V30.B16, V4.B16
859 AESMC V4.B16, V4.B16
860 AESE V30.B16, V5.B16
861 AESMC V5.B16, V5.B16
862 AESE V30.B16, V6.B16
863 AESMC V6.B16, V6.B16
864 AESE V30.B16, V7.B16
865 AESMC V7.B16, V7.B16
866 ADD R0, R2, R10
867 SUB $128, R10, R10
868 VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
869 VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16]
870 SUB $1, R2, R2
871 LSR $7, R2, R2
872
873 aesloop:
874 AESE V8.B16, V0.B16
875 AESMC V0.B16, V0.B16
876 AESE V9.B16, V1.B16
877 AESMC V1.B16, V1.B16
878 AESE V10.B16, V2.B16
879 AESMC V2.B16, V2.B16
880 AESE V11.B16, V3.B16
881 AESMC V3.B16, V3.B16
882 AESE V12.B16, V4.B16
883 AESMC V4.B16, V4.B16
884 AESE V13.B16, V5.B16
885 AESMC V5.B16, V5.B16
886 AESE V14.B16, V6.B16
887 AESMC V6.B16, V6.B16
888 AESE V15.B16, V7.B16
889 AESMC V7.B16, V7.B16
890
891 VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16]
892 AESE V8.B16, V0.B16
893 AESMC V0.B16, V0.B16
894 AESE V9.B16, V1.B16
895 AESMC V1.B16, V1.B16
896 AESE V10.B16, V2.B16
897 AESMC V2.B16, V2.B16
898 AESE V11.B16, V3.B16
899 AESMC V3.B16, V3.B16
900
901 VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16]
902 AESE V12.B16, V4.B16
903 AESMC V4.B16, V4.B16
904 AESE V13.B16, V5.B16
905 AESMC V5.B16, V5.B16
906 AESE V14.B16, V6.B16
907 AESMC V6.B16, V6.B16
908 AESE V15.B16, V7.B16
909 AESMC V7.B16, V7.B16
910 SUB $1, R2, R2
911 CBNZ R2, aesloop
912
913 AESE V8.B16, V0.B16
914 AESMC V0.B16, V0.B16
915 AESE V9.B16, V1.B16
916 AESMC V1.B16, V1.B16
917 AESE V10.B16, V2.B16
918 AESMC V2.B16, V2.B16
919 AESE V11.B16, V3.B16
920 AESMC V3.B16, V3.B16
921 AESE V12.B16, V4.B16
922 AESMC V4.B16, V4.B16
923 AESE V13.B16, V5.B16
924 AESMC V5.B16, V5.B16
925 AESE V14.B16, V6.B16
926 AESMC V6.B16, V6.B16
927 AESE V15.B16, V7.B16
928 AESMC V7.B16, V7.B16
929
930 AESE V8.B16, V0.B16
931 AESMC V0.B16, V0.B16
932 AESE V9.B16, V1.B16
933 AESMC V1.B16, V1.B16
934 AESE V10.B16, V2.B16
935 AESMC V2.B16, V2.B16
936 AESE V11.B16, V3.B16
937 AESMC V3.B16, V3.B16
938 AESE V12.B16, V4.B16
939 AESMC V4.B16, V4.B16
940 AESE V13.B16, V5.B16
941 AESMC V5.B16, V5.B16
942 AESE V14.B16, V6.B16
943 AESMC V6.B16, V6.B16
944 AESE V15.B16, V7.B16
945 AESMC V7.B16, V7.B16
946
947 AESE V8.B16, V0.B16
948 AESE V9.B16, V1.B16
949 AESE V10.B16, V2.B16
950 AESE V11.B16, V3.B16
951 AESE V12.B16, V4.B16
952 AESE V13.B16, V5.B16
953 AESE V14.B16, V6.B16
954 AESE V15.B16, V7.B16
955
956 VEOR V0.B16, V1.B16, V0.B16
957 VEOR V2.B16, V3.B16, V2.B16
958 VEOR V4.B16, V5.B16, V4.B16
959 VEOR V6.B16, V7.B16, V6.B16
960 VEOR V0.B16, V2.B16, V0.B16
961 VEOR V4.B16, V6.B16, V4.B16
962 VEOR V4.B16, V0.B16, V0.B16
963
964 VMOV V0.D[0], R0
965 RET
966
967 TEXT runtime·procyield(SB),NOSPLIT,$0-0
968 MOVWU cycles+0(FP), R0
969 again:
970 YIELD
971 SUBW $1, R0
972 CBNZ R0, again
973 RET
974
975 // Save state of caller into g->sched,
976 // but using fake PC from systemstack_switch.
977 // Must only be called from functions with no locals ($0)
978 // or else unwinding from systemstack_switch is incorrect.
979 // Smashes R0.
980 TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
981 MOVD $runtime·systemstack_switch(SB), R0
982 ADD $8, R0 // get past prologue
983 MOVD R0, (g_sched+gobuf_pc)(g)
984 MOVD RSP, R0
985 MOVD R0, (g_sched+gobuf_sp)(g)
986 MOVD R29, (g_sched+gobuf_bp)(g)
987 MOVD $0, (g_sched+gobuf_lr)(g)
988 // Assert ctxt is zero. See func save.
989 MOVD (g_sched+gobuf_ctxt)(g), R0
990 CBZ R0, 2(PC)
991 CALL runtime·abort(SB)
992 RET
993
994 // func asmcgocall_no_g(fn, arg unsafe.Pointer)
995 // Call fn(arg) aligned appropriately for the gcc ABI.
996 // Called on a system stack, and there may be no g yet (during needm).
997 TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
998 MOVD fn+0(FP), R1
999 MOVD arg+8(FP), R0
1000 SUB $16, RSP // skip over saved frame pointer below RSP
1001 BL (R1)
1002 ADD $16, RSP // skip over saved frame pointer below RSP
1003 RET
1004
1005 // func asmcgocall(fn, arg unsafe.Pointer) int32
1006 // Call fn(arg) on the scheduler stack,
1007 // aligned appropriately for the gcc ABI.
1008 // See cgocall.go for more details.
1009 TEXT ·asmcgocall(SB),NOSPLIT,$0-20
1010 MOVD fn+0(FP), R1
1011 MOVD arg+8(FP), R0
1012
1013 MOVD RSP, R2 // save original stack pointer
1014 CBZ g, nosave
1015 MOVD g, R4
1016
1017 // Figure out if we need to switch to m->g0 stack.
1018 // We get called to create new OS threads too, and those
1019 // come in on the m->g0 stack already. Or we might already
1020 // be on the m->gsignal stack.
1021 MOVD g_m(g), R8
1022 MOVD m_gsignal(R8), R3
1023 CMP R3, g
1024 BEQ nosave
1025 MOVD m_g0(R8), R3
1026 CMP R3, g
1027 BEQ nosave
1028
1029 // Switch to system stack.
1030 MOVD R0, R9 // gosave_systemstack_switch<> and save_g might clobber R0
1031 BL gosave_systemstack_switch<>(SB)
1032 MOVD R3, g
1033 BL runtime·save_g(SB)
1034 MOVD (g_sched+gobuf_sp)(g), R0
1035 MOVD R0, RSP
1036 MOVD (g_sched+gobuf_bp)(g), R29
1037 MOVD R9, R0
1038
1039 // Now on a scheduling stack (a pthread-created stack).
1040 // Save room for two of our pointers /*, plus 32 bytes of callee
1041 // save area that lives on the caller stack. */
1042 MOVD RSP, R13
1043 SUB $16, R13
1044 MOVD R13, RSP
1045 MOVD R4, 0(RSP) // save old g on stack
1046 MOVD (g_stack+stack_hi)(R4), R4
1047 SUB R2, R4
1048 MOVD R4, 8(RSP) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
1049 BL (R1)
1050 MOVD R0, R9
1051
1052 // Restore g, stack pointer. R0 is errno, so don't touch it
1053 MOVD 0(RSP), g
1054 BL runtime·save_g(SB)
1055 MOVD (g_stack+stack_hi)(g), R5
1056 MOVD 8(RSP), R6
1057 SUB R6, R5
1058 MOVD R9, R0
1059 MOVD R5, RSP
1060
1061 MOVW R0, ret+16(FP)
1062 RET
1063
1064 nosave:
1065 // Running on a system stack, perhaps even without a g.
1066 // Having no g can happen during thread creation or thread teardown
1067 // (see needm/dropm on Solaris, for example).
1068 // This code is like the above sequence but without saving/restoring g
1069 // and without worrying about the stack moving out from under us
1070 // (because we're on a system stack, not a goroutine stack).
1071 // The above code could be used directly if already on a system stack,
1072 // but then the only path through this code would be a rare case on Solaris.
1073 // Using this code for all "already on system stack" calls exercises it more,
1074 // which should help keep it correct.
1075 MOVD RSP, R13
1076 SUB $16, R13
1077 MOVD R13, RSP
1078 MOVD $0, R4
1079 MOVD R4, 0(RSP) // Where above code stores g, in case someone looks during debugging.
1080 MOVD R2, 8(RSP) // Save original stack pointer.
1081 BL (R1)
1082 // Restore stack pointer.
1083 MOVD 8(RSP), R2
1084 MOVD R2, RSP
1085 MOVD R0, ret+16(FP)
1086 RET
1087
1088 // cgocallback(fn, frame unsafe.Pointer, ctxt uintptr)
1089 // See cgocall.go for more details.
1090 TEXT ·cgocallback(SB),NOSPLIT,$24-24
1091 NO_LOCAL_POINTERS
1092
1093 // Skip cgocallbackg, just dropm when fn is nil, and frame is the saved g.
1094 // It is used to dropm while thread is exiting.
1095 MOVD fn+0(FP), R1
1096 CBNZ R1, loadg
1097 // Restore the g from frame.
1098 MOVD frame+8(FP), g
1099 B dropm
1100
1101 loadg:
1102 // Load g from thread-local storage.
1103 BL runtime·load_g(SB)
1104
1105 // If g is nil, Go did not create the current thread,
1106 // or if this thread never called into Go on pthread platforms.
1107 // Call needm to obtain one for temporary use.
1108 // In this case, we're running on the thread stack, so there's
1109 // lots of space, but the linker doesn't know. Hide the call from
1110 // the linker analysis by using an indirect call.
1111 CBZ g, needm
1112
1113 MOVD g_m(g), R8
1114 MOVD R8, savedm-8(SP)
1115 B havem
1116
1117 needm:
1118 MOVD g, savedm-8(SP) // g is zero, so is m.
1119 MOVD $runtime·needAndBindM(SB), R0
1120 BL (R0)
1121
1122 // Set m->g0->sched.sp = SP, so that if a panic happens
1123 // during the function we are about to execute, it will
1124 // have a valid SP to run on the g0 stack.
1125 // The next few lines (after the havem label)
1126 // will save this SP onto the stack and then write
1127 // the same SP back to m->sched.sp. That seems redundant,
1128 // but if an unrecovered panic happens, unwindm will
1129 // restore the g->sched.sp from the stack location
1130 // and then systemstack will try to use it. If we don't set it here,
1131 // that restored SP will be uninitialized (typically 0) and
1132 // will not be usable.
1133 MOVD g_m(g), R8
1134 MOVD m_g0(R8), R3
1135 MOVD RSP, R0
1136 MOVD R0, (g_sched+gobuf_sp)(R3)
1137 MOVD R29, (g_sched+gobuf_bp)(R3)
1138
1139 havem:
1140 // Now there's a valid m, and we're running on its m->g0.
1141 // Save current m->g0->sched.sp on stack and then set it to SP.
1142 // Save current sp in m->g0->sched.sp in preparation for
1143 // switch back to m->curg stack.
1144 // NOTE: unwindm knows that the saved g->sched.sp is at 16(RSP) aka savedsp-16(SP).
1145 // Beware that the frame size is actually 32+16.
1146 MOVD m_g0(R8), R3
1147 MOVD (g_sched+gobuf_sp)(R3), R4
1148 MOVD R4, savedsp-16(SP)
1149 MOVD RSP, R0
1150 MOVD R0, (g_sched+gobuf_sp)(R3)
1151
1152 // Switch to m->curg stack and call runtime.cgocallbackg.
1153 // Because we are taking over the execution of m->curg
1154 // but *not* resuming what had been running, we need to
1155 // save that information (m->curg->sched) so we can restore it.
1156 // We can restore m->curg->sched.sp easily, because calling
1157 // runtime.cgocallbackg leaves SP unchanged upon return.
1158 // To save m->curg->sched.pc, we push it onto the curg stack and
1159 // open a frame the same size as cgocallback's g0 frame.
1160 // Once we switch to the curg stack, the pushed PC will appear
1161 // to be the return PC of cgocallback, so that the traceback
1162 // will seamlessly trace back into the earlier calls.
1163 MOVD m_curg(R8), g
1164 BL runtime·save_g(SB)
1165 MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4
1166 MOVD (g_sched+gobuf_pc)(g), R5
1167 MOVD R5, -48(R4)
1168 MOVD (g_sched+gobuf_bp)(g), R5
1169 MOVD R5, -56(R4)
1170 // Gather our arguments into registers.
1171 MOVD fn+0(FP), R1
1172 MOVD frame+8(FP), R2
1173 MOVD ctxt+16(FP), R3
1174 MOVD $-48(R4), R0 // maintain 16-byte SP alignment
1175 MOVD R0, RSP // switch stack
1176 MOVD R1, 8(RSP)
1177 MOVD R2, 16(RSP)
1178 MOVD R3, 24(RSP)
1179 MOVD $runtime·cgocallbackg(SB), R0
1180 CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now.
1181
1182 // Restore g->sched (== m->curg->sched) from saved values.
1183 MOVD 0(RSP), R5
1184 MOVD R5, (g_sched+gobuf_pc)(g)
1185 MOVD RSP, R4
1186 ADD $48, R4, R4
1187 MOVD R4, (g_sched+gobuf_sp)(g)
1188
1189 // Switch back to m->g0's stack and restore m->g0->sched.sp.
1190 // (Unlike m->curg, the g0 goroutine never uses sched.pc,
1191 // so we do not have to restore it.)
1192 MOVD g_m(g), R8
1193 MOVD m_g0(R8), g
1194 BL runtime·save_g(SB)
1195 MOVD (g_sched+gobuf_sp)(g), R0
1196 MOVD R0, RSP
1197 MOVD savedsp-16(SP), R4
1198 MOVD R4, (g_sched+gobuf_sp)(g)
1199
1200 // If the m on entry was nil, we called needm above to borrow an m,
1201 // 1. for the duration of the call on non-pthread platforms,
1202 // 2. or the duration of the C thread alive on pthread platforms.
1203 // If the m on entry wasn't nil,
1204 // 1. the thread might be a Go thread,
1205 // 2. or it wasn't the first call from a C thread on pthread platforms,
1206 // since then we skip dropm to reuse the m in the first call.
1207 MOVD savedm-8(SP), R6
1208 CBNZ R6, droppedm
1209
1210 // Skip dropm to reuse it in the next call, when a pthread key has been created.
1211 MOVD _cgo_pthread_key_created(SB), R6
1212 // It means cgo is disabled when _cgo_pthread_key_created is a nil pointer, need dropm.
1213 CBZ R6, dropm
1214 MOVD (R6), R6
1215 CBNZ R6, droppedm
1216
1217 dropm:
1218 MOVD $runtime·dropm(SB), R0
1219 BL (R0)
1220 droppedm:
1221
1222 // Done!
1223 RET
1224
1225 // Called from cgo wrappers, this function returns g->m->curg.stack.hi.
1226 // Must obey the gcc calling convention.
1227 TEXT _cgo_topofstack(SB),NOSPLIT,$24
1228 // g (R28) and REGTMP (R27) might be clobbered by load_g. They
1229 // are callee-save in the gcc calling convention, so save them.
1230 MOVD R27, savedR27-8(SP)
1231 MOVD g, saveG-16(SP)
1232
1233 BL runtime·load_g(SB)
1234 MOVD g_m(g), R0
1235 MOVD m_curg(R0), R0
1236 MOVD (g_stack+stack_hi)(R0), R0
1237
1238 MOVD saveG-16(SP), g
1239 MOVD savedR28-8(SP), R27
1240 RET
1241
1242 // void setg(G*); set g. for use by needm.
1243 TEXT runtime·setg(SB), NOSPLIT, $0-8
1244 MOVD gg+0(FP), g
1245 // This only happens if iscgo, so jump straight to save_g
1246 BL runtime·save_g(SB)
1247 RET
1248
1249 // void setg_gcc(G*); set g called from gcc
1250 TEXT setg_gcc<>(SB),NOSPLIT,$8
1251 MOVD R0, g
1252 MOVD R27, savedR27-8(SP)
1253 BL runtime·save_g(SB)
1254 MOVD savedR27-8(SP), R27
1255 RET
1256
1257 TEXT runtime·emptyfunc(SB),0,$0-0
1258 RET
1259
1260 TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
1261 MOVD ZR, R0
1262 MOVD (R0), R0
1263 UNDEF
1264
1265 // The top-most function running on a goroutine
1266 // returns to goexit+PCQuantum.
1267 TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0
1268 MOVD R0, R0 // NOP
1269 BL runtime·goexit1(SB) // does not return
1270
1271 // This is called from .init_array and follows the platform, not Go, ABI.
1272 TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
1273 SUB $0x10, RSP
1274 MOVD R27, 8(RSP) // The access to global variables below implicitly uses R27, which is callee-save
1275 MOVD runtime·lastmoduledatap(SB), R1
1276 MOVD R0, moduledata_next(R1)
1277 MOVD R0, runtime·lastmoduledatap(SB)
1278 MOVD 8(RSP), R27
1279 ADD $0x10, RSP
1280 RET
1281
1282 TEXT ·checkASM(SB),NOSPLIT,$0-1
1283 MOVW $1, R3
1284 MOVB R3, ret+0(FP)
1285 RET
1286
1287 // gcWriteBarrier informs the GC about heap pointer writes.
1288 //
1289 // gcWriteBarrier does NOT follow the Go ABI. It accepts the
1290 // number of bytes of buffer needed in R25, and returns a pointer
1291 // to the buffer space in R25.
1292 // It clobbers condition codes.
1293 // It does not clobber any general-purpose registers except R27,
1294 // but may clobber others (e.g., floating point registers)
1295 // The act of CALLing gcWriteBarrier will clobber R30 (LR).
1296 TEXT gcWriteBarrier<>(SB),NOSPLIT,$200
1297 // Save the registers clobbered by the fast path.
1298 STP (R0, R1), 184(RSP)
1299 retry:
1300 MOVD g_m(g), R0
1301 MOVD m_p(R0), R0
1302 MOVD (p_wbBuf+wbBuf_next)(R0), R1
1303 MOVD (p_wbBuf+wbBuf_end)(R0), R27
1304 // Increment wbBuf.next position.
1305 ADD R25, R1
1306 // Is the buffer full?
1307 CMP R27, R1
1308 BHI flush
1309 // Commit to the larger buffer.
1310 MOVD R1, (p_wbBuf+wbBuf_next)(R0)
1311 // Make return value (the original next position)
1312 SUB R25, R1, R25
1313 // Restore registers.
1314 LDP 184(RSP), (R0, R1)
1315 RET
1316
1317 flush:
1318 // Save all general purpose registers since these could be
1319 // clobbered by wbBufFlush and were not saved by the caller.
1320 // R0 and R1 already saved
1321 STP (R2, R3), 1*8(RSP)
1322 STP (R4, R5), 3*8(RSP)
1323 STP (R6, R7), 5*8(RSP)
1324 STP (R8, R9), 7*8(RSP)
1325 STP (R10, R11), 9*8(RSP)
1326 STP (R12, R13), 11*8(RSP)
1327 STP (R14, R15), 13*8(RSP)
1328 // R16, R17 may be clobbered by linker trampoline
1329 // R18 is unused.
1330 STP (R19, R20), 15*8(RSP)
1331 STP (R21, R22), 17*8(RSP)
1332 STP (R23, R24), 19*8(RSP)
1333 STP (R25, R26), 21*8(RSP)
1334 // R27 is temp register.
1335 // R28 is g.
1336 // R29 is frame pointer (unused).
1337 // R30 is LR, which was saved by the prologue.
1338 // R31 is SP.
1339
1340 CALL runtime·wbBufFlush(SB)
1341 LDP 1*8(RSP), (R2, R3)
1342 LDP 3*8(RSP), (R4, R5)
1343 LDP 5*8(RSP), (R6, R7)
1344 LDP 7*8(RSP), (R8, R9)
1345 LDP 9*8(RSP), (R10, R11)
1346 LDP 11*8(RSP), (R12, R13)
1347 LDP 13*8(RSP), (R14, R15)
1348 LDP 15*8(RSP), (R19, R20)
1349 LDP 17*8(RSP), (R21, R22)
1350 LDP 19*8(RSP), (R23, R24)
1351 LDP 21*8(RSP), (R25, R26)
1352 JMP retry
1353
1354 TEXT runtime·gcWriteBarrier1<ABIInternal>(SB),NOSPLIT,$0
1355 MOVD $8, R25
1356 JMP gcWriteBarrier<>(SB)
1357 TEXT runtime·gcWriteBarrier2<ABIInternal>(SB),NOSPLIT,$0
1358 MOVD $16, R25
1359 JMP gcWriteBarrier<>(SB)
1360 TEXT runtime·gcWriteBarrier3<ABIInternal>(SB),NOSPLIT,$0
1361 MOVD $24, R25
1362 JMP gcWriteBarrier<>(SB)
1363 TEXT runtime·gcWriteBarrier4<ABIInternal>(SB),NOSPLIT,$0
1364 MOVD $32, R25
1365 JMP gcWriteBarrier<>(SB)
1366 TEXT runtime·gcWriteBarrier5<ABIInternal>(SB),NOSPLIT,$0
1367 MOVD $40, R25
1368 JMP gcWriteBarrier<>(SB)
1369 TEXT runtime·gcWriteBarrier6<ABIInternal>(SB),NOSPLIT,$0
1370 MOVD $48, R25
1371 JMP gcWriteBarrier<>(SB)
1372 TEXT runtime·gcWriteBarrier7<ABIInternal>(SB),NOSPLIT,$0
1373 MOVD $56, R25
1374 JMP gcWriteBarrier<>(SB)
1375 TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
1376 MOVD $64, R25
1377 JMP gcWriteBarrier<>(SB)
1378
1379 DATA debugCallFrameTooLarge<>+0x00(SB)/20, $"call frame too large"
1380 GLOBL debugCallFrameTooLarge<>(SB), RODATA, $20 // Size duplicated below
1381
1382 // debugCallV2 is the entry point for debugger-injected function
1383 // calls on running goroutines. It informs the runtime that a
1384 // debug call has been injected and creates a call frame for the
1385 // debugger to fill in.
1386 //
1387 // To inject a function call, a debugger should:
1388 // 1. Check that the goroutine is in state _Grunning and that
1389 // there are at least 288 bytes free on the stack.
1390 // 2. Set SP as SP-16.
1391 // 3. Store the current LR in (SP) (using the SP after step 2).
1392 // 4. Store the current PC in the LR register.
1393 // 5. Write the desired argument frame size at SP-16
1394 // 6. Save all machine registers (including flags and fpsimd registers)
1395 // so they can be restored later by the debugger.
1396 // 7. Set the PC to debugCallV2 and resume execution.
1397 //
1398 // If the goroutine is in state _Grunnable, then it's not generally
1399 // safe to inject a call because it may return out via other runtime
1400 // operations. Instead, the debugger should unwind the stack to find
1401 // the return to non-runtime code, add a temporary breakpoint there,
1402 // and inject the call once that breakpoint is hit.
1403 //
1404 // If the goroutine is in any other state, it's not safe to inject a call.
1405 //
1406 // This function communicates back to the debugger by setting R20 and
1407 // invoking BRK to raise a breakpoint signal. Note that the signal PC of
1408 // the signal triggered by the BRK instruction is the PC where the signal
1409 // is trapped, not the next PC, so to resume execution, the debugger needs
1410 // to set the signal PC to PC+4. See the comments in the implementation for
1411 // the protocol the debugger is expected to follow. InjectDebugCall in the
1412 // runtime tests demonstrates this protocol.
1413 //
1414 // The debugger must ensure that any pointers passed to the function
1415 // obey escape analysis requirements. Specifically, it must not pass
1416 // a stack pointer to an escaping argument. debugCallV2 cannot check
1417 // this invariant.
1418 //
1419 // This is ABIInternal because Go code injects its PC directly into new
1420 // goroutine stacks.
1421 TEXT runtime·debugCallV2<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
1422 STP (R29, R30), -280(RSP)
1423 SUB $272, RSP, RSP
1424 SUB $8, RSP, R29
1425 // Save all registers that may contain pointers so they can be
1426 // conservatively scanned.
1427 //
1428 // We can't do anything that might clobber any of these
1429 // registers before this.
1430 STP (R27, g), (30*8)(RSP)
1431 STP (R25, R26), (28*8)(RSP)
1432 STP (R23, R24), (26*8)(RSP)
1433 STP (R21, R22), (24*8)(RSP)
1434 STP (R19, R20), (22*8)(RSP)
1435 STP (R16, R17), (20*8)(RSP)
1436 STP (R14, R15), (18*8)(RSP)
1437 STP (R12, R13), (16*8)(RSP)
1438 STP (R10, R11), (14*8)(RSP)
1439 STP (R8, R9), (12*8)(RSP)
1440 STP (R6, R7), (10*8)(RSP)
1441 STP (R4, R5), (8*8)(RSP)
1442 STP (R2, R3), (6*8)(RSP)
1443 STP (R0, R1), (4*8)(RSP)
1444
1445 // Perform a safe-point check.
1446 MOVD R30, 8(RSP) // Caller's PC
1447 CALL runtime·debugCallCheck(SB)
1448 MOVD 16(RSP), R0
1449 CBZ R0, good
1450
1451 // The safety check failed. Put the reason string at the top
1452 // of the stack.
1453 MOVD R0, 8(RSP)
1454 MOVD 24(RSP), R0
1455 MOVD R0, 16(RSP)
1456
1457 // Set R20 to 8 and invoke BRK. The debugger should get the
1458 // reason a call can't be injected from SP+8 and resume execution.
1459 MOVD $8, R20
1460 BREAK
1461 JMP restore
1462
1463 good:
1464 // Registers are saved and it's safe to make a call.
1465 // Open up a call frame, moving the stack if necessary.
1466 //
1467 // Once the frame is allocated, this will set R20 to 0 and
1468 // invoke BRK. The debugger should write the argument
1469 // frame for the call at SP+8, set up argument registers,
1470 // set the LR as the signal PC + 4, set the PC to the function
1471 // to call, set R26 to point to the closure (if a closure call),
1472 // and resume execution.
1473 //
1474 // If the function returns, this will set R20 to 1 and invoke
1475 // BRK. The debugger can then inspect any return value saved
1476 // on the stack at SP+8 and in registers. To resume execution,
1477 // the debugger should restore the LR from (SP).
1478 //
1479 // If the function panics, this will set R20 to 2 and invoke BRK.
1480 // The interface{} value of the panic will be at SP+8. The debugger
1481 // can inspect the panic value and resume execution again.
1482 #define DEBUG_CALL_DISPATCH(NAME,MAXSIZE) \
1483 CMP $MAXSIZE, R0; \
1484 BGT 5(PC); \
1485 MOVD $NAME(SB), R0; \
1486 MOVD R0, 8(RSP); \
1487 CALL runtime·debugCallWrap(SB); \
1488 JMP restore
1489
1490 MOVD 256(RSP), R0 // the argument frame size
1491 DEBUG_CALL_DISPATCH(debugCall32<>, 32)
1492 DEBUG_CALL_DISPATCH(debugCall64<>, 64)
1493 DEBUG_CALL_DISPATCH(debugCall128<>, 128)
1494 DEBUG_CALL_DISPATCH(debugCall256<>, 256)
1495 DEBUG_CALL_DISPATCH(debugCall512<>, 512)
1496 DEBUG_CALL_DISPATCH(debugCall1024<>, 1024)
1497 DEBUG_CALL_DISPATCH(debugCall2048<>, 2048)
1498 DEBUG_CALL_DISPATCH(debugCall4096<>, 4096)
1499 DEBUG_CALL_DISPATCH(debugCall8192<>, 8192)
1500 DEBUG_CALL_DISPATCH(debugCall16384<>, 16384)
1501 DEBUG_CALL_DISPATCH(debugCall32768<>, 32768)
1502 DEBUG_CALL_DISPATCH(debugCall65536<>, 65536)
1503 // The frame size is too large. Report the error.
1504 MOVD $debugCallFrameTooLarge<>(SB), R0
1505 MOVD R0, 8(RSP)
1506 MOVD $20, R0
1507 MOVD R0, 16(RSP) // length of debugCallFrameTooLarge string
1508 MOVD $8, R20
1509 BREAK
1510 JMP restore
1511
1512 restore:
1513 // Calls and failures resume here.
1514 //
1515 // Set R20 to 16 and invoke BRK. The debugger should restore
1516 // all registers except for PC and RSP and resume execution.
1517 MOVD $16, R20
1518 BREAK
1519 // We must not modify flags after this point.
1520
1521 // Restore pointer-containing registers, which may have been
1522 // modified from the debugger's copy by stack copying.
1523 LDP (30*8)(RSP), (R27, g)
1524 LDP (28*8)(RSP), (R25, R26)
1525 LDP (26*8)(RSP), (R23, R24)
1526 LDP (24*8)(RSP), (R21, R22)
1527 LDP (22*8)(RSP), (R19, R20)
1528 LDP (20*8)(RSP), (R16, R17)
1529 LDP (18*8)(RSP), (R14, R15)
1530 LDP (16*8)(RSP), (R12, R13)
1531 LDP (14*8)(RSP), (R10, R11)
1532 LDP (12*8)(RSP), (R8, R9)
1533 LDP (10*8)(RSP), (R6, R7)
1534 LDP (8*8)(RSP), (R4, R5)
1535 LDP (6*8)(RSP), (R2, R3)
1536 LDP (4*8)(RSP), (R0, R1)
1537
1538 LDP -8(RSP), (R29, R27)
1539 ADD $288, RSP, RSP // Add 16 more bytes, see saveSigContext
1540 MOVD -16(RSP), R30 // restore old lr
1541 JMP (R27)
1542
1543 // runtime.debugCallCheck assumes that functions defined with the
1544 // DEBUG_CALL_FN macro are safe points to inject calls.
1545 #define DEBUG_CALL_FN(NAME,MAXSIZE) \
1546 TEXT NAME(SB),WRAPPER,$MAXSIZE-0; \
1547 NO_LOCAL_POINTERS; \
1548 MOVD $0, R20; \
1549 BREAK; \
1550 MOVD $1, R20; \
1551 BREAK; \
1552 RET
1553 DEBUG_CALL_FN(debugCall32<>, 32)
1554 DEBUG_CALL_FN(debugCall64<>, 64)
1555 DEBUG_CALL_FN(debugCall128<>, 128)
1556 DEBUG_CALL_FN(debugCall256<>, 256)
1557 DEBUG_CALL_FN(debugCall512<>, 512)
1558 DEBUG_CALL_FN(debugCall1024<>, 1024)
1559 DEBUG_CALL_FN(debugCall2048<>, 2048)
1560 DEBUG_CALL_FN(debugCall4096<>, 4096)
1561 DEBUG_CALL_FN(debugCall8192<>, 8192)
1562 DEBUG_CALL_FN(debugCall16384<>, 16384)
1563 DEBUG_CALL_FN(debugCall32768<>, 32768)
1564 DEBUG_CALL_FN(debugCall65536<>, 65536)
1565
1566 // func debugCallPanicked(val interface{})
1567 TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
1568 // Copy the panic value to the top of stack at SP+8.
1569 MOVD val_type+0(FP), R0
1570 MOVD R0, 8(RSP)
1571 MOVD val_data+8(FP), R0
1572 MOVD R0, 16(RSP)
1573 MOVD $2, R20
1574 BREAK
1575 RET
1576
1577 // Note: these functions use a special calling convention to save generated code space.
1578 // Arguments are passed in registers, but the space for those arguments are allocated
1579 // in the caller's stack frame. These stubs write the args into that stack space and
1580 // then tail call to the corresponding runtime handler.
1581 // The tail call makes these stubs disappear in backtraces.
1582 //
1583 // Defined as ABIInternal since the compiler generates ABIInternal
1584 // calls to it directly and it does not use the stack-based Go ABI.
1585 TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
1586 JMP runtime·goPanicIndex<ABIInternal>(SB)
1587 TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
1588 JMP runtime·goPanicIndexU<ABIInternal>(SB)
1589 TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
1590 MOVD R1, R0
1591 MOVD R2, R1
1592 JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
1593 TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
1594 MOVD R1, R0
1595 MOVD R2, R1
1596 JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
1597 TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
1598 MOVD R1, R0
1599 MOVD R2, R1
1600 JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
1601 TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
1602 MOVD R1, R0
1603 MOVD R2, R1
1604 JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
1605 TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
1606 JMP runtime·goPanicSliceB<ABIInternal>(SB)
1607 TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
1608 JMP runtime·goPanicSliceBU<ABIInternal>(SB)
1609 TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
1610 MOVD R2, R0
1611 MOVD R3, R1
1612 JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
1613 TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
1614 MOVD R2, R0
1615 MOVD R3, R1
1616 JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
1617 TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
1618 MOVD R2, R0
1619 MOVD R3, R1
1620 JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
1621 TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
1622 MOVD R2, R0
1623 MOVD R3, R1
1624 JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
1625 TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
1626 MOVD R1, R0
1627 MOVD R2, R1
1628 JMP runtime·goPanicSlice3B<ABIInternal>(SB)
1629 TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
1630 MOVD R1, R0
1631 MOVD R2, R1
1632 JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
1633 TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
1634 JMP runtime·goPanicSlice3C<ABIInternal>(SB)
1635 TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
1636 JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
1637 TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
1638 MOVD R2, R0
1639 MOVD R3, R1
1640 JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
1641
1642 TEXT ·getfp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
1643 MOVD R29, R0
1644 RET
1645
View as plain text