Text file
src/runtime/race_s390x.s
1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build race
6
7 #include "go_asm.h"
8 #include "funcdata.h"
9 #include "textflag.h"
10
11 // The following thunks allow calling the gcc-compiled race runtime directly
12 // from Go code without going all the way through cgo.
13 // First, it's much faster (up to 50% speedup for real Go programs).
14 // Second, it eliminates race-related special cases from cgocall and scheduler.
15 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
16
17 // A brief recap of the s390x C calling convention.
18 // Arguments are passed in R2...R6, the rest is on stack.
19 // Callee-saved registers are: R6...R13, R15.
20 // Temporary registers are: R0...R5, R14.
21
22 // When calling racecalladdr, R1 is the call target address.
23
24 // The race ctx, ThreadState *thr below, is passed in R2 and loaded in racecalladdr.
25
26 // func runtime·raceread(addr uintptr)
27 // Called from instrumented code.
28 TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
29 // void __tsan_read(ThreadState *thr, void *addr, void *pc);
30 MOVD $__tsan_read(SB), R1
31 #ifndef GOEXPERIMENT_regabiargs
32 MOVD addr+0(FP), R3
33 #else
34 MOVD R2, R3
35 #endif
36 MOVD R14, R4
37 JMP racecalladdr<>(SB)
38
39 // func runtime·RaceRead(addr uintptr)
40 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
41 // This needs to be a tail call, because raceread reads caller pc.
42 JMP runtime·raceread(SB)
43
44 // func runtime·racereadpc(void *addr, void *callpc, void *pc)
45 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
46 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
47 MOVD $__tsan_read_pc(SB), R1
48 LMG addr+0(FP), R3, R5
49 JMP racecalladdr<>(SB)
50
51 // func runtime·racewrite(addr uintptr)
52 // Called from instrumented code.
53 TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
54 // void __tsan_write(ThreadState *thr, void *addr, void *pc);
55 MOVD $__tsan_write(SB), R1
56 #ifndef GOEXPERIMENT_regabiargs
57 MOVD addr+0(FP), R3
58 #else
59 MOVD R2, R3
60 #endif
61 MOVD R14, R4
62 JMP racecalladdr<>(SB)
63
64 // func runtime·RaceWrite(addr uintptr)
65 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
66 // This needs to be a tail call, because racewrite reads caller pc.
67 JMP runtime·racewrite(SB)
68
69 // func runtime·racewritepc(void *addr, void *callpc, void *pc)
70 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
71 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
72 MOVD $__tsan_write_pc(SB), R1
73 LMG addr+0(FP), R3, R5
74 JMP racecalladdr<>(SB)
75
76 // func runtime·racereadrange(addr, size uintptr)
77 // Called from instrumented code.
78 TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
79 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
80 MOVD $__tsan_read_range(SB), R1
81 #ifndef GOEXPERIMENT_regabiargs
82 LMG addr+0(FP), R3, R4
83 #else
84 MOVD R3, R4
85 MOVD R2, R3
86 #endif
87 MOVD R14, R5
88 JMP racecalladdr<>(SB)
89
90 // func runtime·RaceReadRange(addr, size uintptr)
91 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
92 // This needs to be a tail call, because racereadrange reads caller pc.
93 JMP runtime·racereadrange(SB)
94
95 // func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
96 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
97 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
98 MOVD $__tsan_read_range(SB), R1
99 LMG addr+0(FP), R3, R5
100 // pc is an interceptor address, but TSan expects it to point to the
101 // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
102 ADD $2, R5
103 JMP racecalladdr<>(SB)
104
105 // func runtime·racewriterange(addr, size uintptr)
106 // Called from instrumented code.
107 TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
108 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
109 MOVD $__tsan_write_range(SB), R1
110 #ifndef GOEXPERIMENT_regabiargs
111 LMG addr+0(FP), R3, R4
112 #else
113 MOVD R3, R4
114 MOVD R2, R3
115 #endif
116 MOVD R14, R5
117 JMP racecalladdr<>(SB)
118
119 // func runtime·RaceWriteRange(addr, size uintptr)
120 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
121 // This needs to be a tail call, because racewriterange reads caller pc.
122 JMP runtime·racewriterange(SB)
123
124 // func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
125 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
126 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
127 MOVD $__tsan_write_range(SB), R1
128 LMG addr+0(FP), R3, R5
129 // pc is an interceptor address, but TSan expects it to point to the
130 // middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
131 ADD $2, R5
132 JMP racecalladdr<>(SB)
133
134 // If R3 is out of range, do nothing. Otherwise, setup goroutine context and
135 // invoke racecall. Other arguments are already set.
136 TEXT racecalladdr<>(SB), NOSPLIT, $0-0
137 MOVD runtime·racearenastart(SB), R0
138 CMPUBLT R3, R0, data // Before racearena start?
139 MOVD runtime·racearenaend(SB), R0
140 CMPUBLT R3, R0, call // Before racearena end?
141 data:
142 MOVD runtime·racedatastart(SB), R0
143 CMPUBLT R3, R0, ret // Before racedata start?
144 MOVD runtime·racedataend(SB), R0
145 CMPUBGE R3, R0, ret // At or after racedata end?
146 call:
147 MOVD g_racectx(g), R2
148 JMP racecall<>(SB)
149 ret:
150 RET
151
152 // func runtime·racefuncenter(pc uintptr)
153 // Called from instrumented code.
154 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
155 MOVD callpc+0(FP), R3
156 JMP racefuncenter<>(SB)
157
158 // Common code for racefuncenter
159 // R3 = caller's return address
160 TEXT racefuncenter<>(SB), NOSPLIT, $0-0
161 // void __tsan_func_enter(ThreadState *thr, void *pc);
162 MOVD $__tsan_func_enter(SB), R1
163 MOVD g_racectx(g), R2
164 BL racecall<>(SB)
165 RET
166
167 // func runtime·racefuncexit()
168 // Called from instrumented code.
169 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
170 // void __tsan_func_exit(ThreadState *thr);
171 MOVD $__tsan_func_exit(SB), R1
172 MOVD g_racectx(g), R2
173 JMP racecall<>(SB)
174
175 // Atomic operations for sync/atomic package.
176
177 // Load
178
179 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
180 GO_ARGS
181 MOVD $__tsan_go_atomic32_load(SB), R1
182 BL racecallatomic<>(SB)
183 RET
184
185 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
186 GO_ARGS
187 MOVD $__tsan_go_atomic64_load(SB), R1
188 BL racecallatomic<>(SB)
189 RET
190
191 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
192 GO_ARGS
193 JMP sync∕atomic·LoadInt32(SB)
194
195 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
196 GO_ARGS
197 JMP sync∕atomic·LoadInt64(SB)
198
199 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
200 GO_ARGS
201 JMP sync∕atomic·LoadInt64(SB)
202
203 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
204 GO_ARGS
205 JMP sync∕atomic·LoadInt64(SB)
206
207 // Store
208
209 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
210 GO_ARGS
211 MOVD $__tsan_go_atomic32_store(SB), R1
212 BL racecallatomic<>(SB)
213 RET
214
215 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
216 GO_ARGS
217 MOVD $__tsan_go_atomic64_store(SB), R1
218 BL racecallatomic<>(SB)
219 RET
220
221 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
222 GO_ARGS
223 JMP sync∕atomic·StoreInt32(SB)
224
225 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
226 GO_ARGS
227 JMP sync∕atomic·StoreInt64(SB)
228
229 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
230 GO_ARGS
231 JMP sync∕atomic·StoreInt64(SB)
232
233 // Swap
234
235 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
236 GO_ARGS
237 MOVD $__tsan_go_atomic32_exchange(SB), R1
238 BL racecallatomic<>(SB)
239 RET
240
241 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
242 GO_ARGS
243 MOVD $__tsan_go_atomic64_exchange(SB), R1
244 BL racecallatomic<>(SB)
245 RET
246
247 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
248 GO_ARGS
249 JMP sync∕atomic·SwapInt32(SB)
250
251 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
252 GO_ARGS
253 JMP sync∕atomic·SwapInt64(SB)
254
255 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
256 GO_ARGS
257 JMP sync∕atomic·SwapInt64(SB)
258
259 // Add
260
261 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
262 GO_ARGS
263 MOVD $__tsan_go_atomic32_fetch_add(SB), R1
264 BL racecallatomic<>(SB)
265 // TSan performed fetch_add, but Go needs add_fetch.
266 MOVW add+8(FP), R0
267 MOVW ret+16(FP), R1
268 ADD R0, R1, R0
269 MOVW R0, ret+16(FP)
270 RET
271
272 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
273 GO_ARGS
274 MOVD $__tsan_go_atomic64_fetch_add(SB), R1
275 BL racecallatomic<>(SB)
276 // TSan performed fetch_add, but Go needs add_fetch.
277 MOVD add+8(FP), R0
278 MOVD ret+16(FP), R1
279 ADD R0, R1, R0
280 MOVD R0, ret+16(FP)
281 RET
282
283 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
284 GO_ARGS
285 JMP sync∕atomic·AddInt32(SB)
286
287 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
288 GO_ARGS
289 JMP sync∕atomic·AddInt64(SB)
290
291 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
292 GO_ARGS
293 JMP sync∕atomic·AddInt64(SB)
294
295 // And
296 TEXT sync∕atomic·AndInt32(SB), NOSPLIT, $0-20
297 GO_ARGS
298 MOVD $__tsan_go_atomic32_fetch_and(SB), R1
299 BL racecallatomic<>(SB)
300 RET
301
302 TEXT sync∕atomic·AndInt64(SB), NOSPLIT, $0-24
303 GO_ARGS
304 MOVD $__tsan_go_atomic64_fetch_and(SB), R1
305 BL racecallatomic<>(SB)
306 RET
307
308 TEXT sync∕atomic·AndUint32(SB), NOSPLIT, $0-20
309 GO_ARGS
310 JMP sync∕atomic·AndInt32(SB)
311
312 TEXT sync∕atomic·AndUint64(SB), NOSPLIT, $0-24
313 GO_ARGS
314 JMP sync∕atomic·AndInt64(SB)
315
316 TEXT sync∕atomic·AndUintptr(SB), NOSPLIT, $0-24
317 GO_ARGS
318 JMP sync∕atomic·AndInt64(SB)
319
320 // Or
321 TEXT sync∕atomic·OrInt32(SB), NOSPLIT, $0-20
322 GO_ARGS
323 MOVD $__tsan_go_atomic32_fetch_or(SB), R1
324 BL racecallatomic<>(SB)
325 RET
326
327 TEXT sync∕atomic·OrInt64(SB), NOSPLIT, $0-24
328 GO_ARGS
329 MOVD $__tsan_go_atomic64_fetch_or(SB), R1
330 BL racecallatomic<>(SB)
331 RET
332
333 TEXT sync∕atomic·OrUint32(SB), NOSPLIT, $0-20
334 GO_ARGS
335 JMP sync∕atomic·OrInt32(SB)
336
337 TEXT sync∕atomic·OrUint64(SB), NOSPLIT, $0-24
338 GO_ARGS
339 JMP sync∕atomic·OrInt64(SB)
340
341 TEXT sync∕atomic·OrUintptr(SB), NOSPLIT, $0-24
342 GO_ARGS
343 JMP sync∕atomic·OrInt64(SB)
344
345 // CompareAndSwap
346
347 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
348 GO_ARGS
349 MOVD $__tsan_go_atomic32_compare_exchange(SB), R1
350 BL racecallatomic<>(SB)
351 RET
352
353 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
354 GO_ARGS
355 MOVD $__tsan_go_atomic64_compare_exchange(SB), R1
356 BL racecallatomic<>(SB)
357 RET
358
359 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
360 GO_ARGS
361 JMP sync∕atomic·CompareAndSwapInt32(SB)
362
363 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
364 GO_ARGS
365 JMP sync∕atomic·CompareAndSwapInt64(SB)
366
367 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
368 GO_ARGS
369 JMP sync∕atomic·CompareAndSwapInt64(SB)
370
371 // Common code for atomic operations. Calls R1.
372 TEXT racecallatomic<>(SB), NOSPLIT, $0
373 MOVD 24(R15), R5 // Address (arg1, after 2xBL).
374 // If we pass an invalid pointer to the TSan runtime, it will cause a
375 // "fatal error: unknown caller pc". So trigger a SEGV here instead.
376 MOVB (R5), R0
377 MOVD runtime·racearenastart(SB), R0
378 CMPUBLT R5, R0, racecallatomic_data // Before racearena start?
379 MOVD runtime·racearenaend(SB), R0
380 CMPUBLT R5, R0, racecallatomic_ok // Before racearena end?
381 racecallatomic_data:
382 MOVD runtime·racedatastart(SB), R0
383 CMPUBLT R5, R0, racecallatomic_ignore // Before racedata start?
384 MOVD runtime·racedataend(SB), R0
385 CMPUBGE R5, R0, racecallatomic_ignore // At or after racearena end?
386 racecallatomic_ok:
387 MOVD g_racectx(g), R2 // ThreadState *.
388 MOVD 8(R15), R3 // Caller PC.
389 MOVD R14, R4 // PC.
390 ADD $24, R15, R5 // Arguments.
391 // Tail call fails to restore R15, so use a normal one.
392 BL racecall<>(SB)
393 RET
394 racecallatomic_ignore:
395 // Call __tsan_go_ignore_sync_begin to ignore synchronization during
396 // the atomic op. An attempt to synchronize on the address would cause
397 // a crash.
398 MOVD R1, R6 // Save target function.
399 MOVD R14, R7 // Save PC.
400 MOVD $__tsan_go_ignore_sync_begin(SB), R1
401 MOVD g_racectx(g), R2 // ThreadState *.
402 BL racecall<>(SB)
403 MOVD R6, R1 // Restore target function.
404 MOVD g_racectx(g), R2 // ThreadState *.
405 MOVD 8(R15), R3 // Caller PC.
406 MOVD R7, R4 // PC.
407 ADD $24, R15, R5 // Arguments.
408 BL racecall<>(SB)
409 MOVD $__tsan_go_ignore_sync_end(SB), R1
410 MOVD g_racectx(g), R2 // ThreadState *.
411 BL racecall<>(SB)
412 RET
413
414 // func runtime·racecall(void(*f)(...), ...)
415 // Calls C function f from race runtime and passes up to 4 arguments to it.
416 // The arguments are never heap-object-preserving pointers, so we pretend there
417 // are no arguments.
418 TEXT runtime·racecall(SB), NOSPLIT, $0-0
419 MOVD fn+0(FP), R1
420 MOVD arg0+8(FP), R2
421 MOVD arg1+16(FP), R3
422 MOVD arg2+24(FP), R4
423 MOVD arg3+32(FP), R5
424 JMP racecall<>(SB)
425
426 // Switches SP to g0 stack and calls R1. Arguments are already set.
427 TEXT racecall<>(SB), NOSPLIT, $0-0
428 BL runtime·save_g(SB) // Save g for callbacks.
429 MOVD R15, R7 // Save SP.
430 MOVD g_m(g), R8 // R8 = thread.
431
432 // Switch to g0 stack if we aren't already on g0 or gsignal.
433 MOVD m_gsignal(R8), R9
434 CMPBEQ R9, g, call
435
436 MOVD m_g0(R8), R9
437 CMPBEQ R9, g, call
438
439 MOVD (g_sched+gobuf_sp)(R9), R15 // Switch SP to g0.
440
441 call: SUB $160, R15 // Allocate C frame.
442 BL R1 // Call C code.
443 MOVD R7, R15 // Restore SP.
444 RET // Return to Go.
445
446 // C->Go callback thunk that allows to call runtime·racesymbolize from C
447 // code. racecall has only switched SP, finish g->g0 switch by setting correct
448 // g. R2 contains command code, R3 contains command-specific context. See
449 // racecallback for command codes.
450 TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
451 STMG R6, R15, 48(R15) // Save non-volatile regs.
452 BL runtime·load_g(SB) // Saved by racecall.
453 CMPBNE R2, $0, rest // raceGetProcCmd?
454 MOVD g_m(g), R2 // R2 = thread.
455 MOVD m_p(R2), R2 // R2 = processor.
456 MVC $8, p_raceprocctx(R2), (R3) // *R3 = ThreadState *.
457 LMG 48(R15), R6, R15 // Restore non-volatile regs.
458 BR R14 // Return to C.
459 rest: MOVD g_m(g), R4 // R4 = current thread.
460 MOVD m_g0(R4), g // Switch to g0.
461 SUB $24, R15 // Allocate Go argument slots.
462 STMG R2, R3, 8(R15) // Fill Go frame.
463 BL runtime·racecallback(SB) // Call Go code.
464 LMG 72(R15), R6, R15 // Restore non-volatile regs.
465 BR R14 // Return to C.
466
View as plain text