Source file
src/runtime/tracestack.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "unsafe"
13 )
14
15 const (
16
17
18
19 traceStackSize = 128
20
21
22
23
24
25 logicalStackSentinel = ^uintptr(0)
26 )
27
28
29
30
31
32
33
34
35
36
37
38 func traceStack(skip int, gp *g, gen uintptr) uint64 {
39 var pcBuf [traceStackSize]uintptr
40
41
42 var mp *m
43 if gp == nil {
44 mp = getg().m
45 gp = mp.curg
46 }
47
48
49 if debug.traceCheckStackOwnership != 0 && gp != nil {
50 status := readgstatus(gp)
51
52 if status&_Gscan == 0 {
53
54
55
56
57 switch goStatusToTraceGoStatus(status, gp.waitreason) {
58 case traceGoRunning, traceGoSyscall:
59 if getg() == gp || mp.curg == gp {
60 break
61 }
62 fallthrough
63 default:
64 print("runtime: gp=", unsafe.Pointer(gp), " gp.goid=", gp.goid, " status=", gStatusStrings[status], "\n")
65 throw("attempted to trace stack of a goroutine this thread does not own")
66 }
67 }
68 }
69
70 if gp != nil && mp == nil {
71
72
73 mp = gp.lockedm.ptr()
74 }
75 nstk := 1
76 if tracefpunwindoff() || (mp != nil && mp.hasCgoOnStack()) {
77
78
79
80
81
82
83 pcBuf[0] = logicalStackSentinel
84 if getg() == gp {
85 nstk += callers(skip+1, pcBuf[1:])
86 } else if gp != nil {
87 nstk += gcallers(gp, skip, pcBuf[1:])
88 }
89 } else {
90
91 pcBuf[0] = uintptr(skip)
92 if getg() == gp {
93 nstk += fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf[1:])
94 } else if gp != nil {
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 if gp.syscallsp != 0 {
112 pcBuf[1] = gp.syscallpc
113 nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.syscallbp), pcBuf[2:])
114 } else {
115 pcBuf[1] = gp.sched.pc
116 nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.sched.bp), pcBuf[2:])
117 }
118 }
119 }
120 if nstk > 0 {
121 nstk--
122 }
123 if nstk > 0 && gp.goid == 1 {
124 nstk--
125 }
126 id := trace.stackTab[gen%2].put(pcBuf[:nstk])
127 return id
128 }
129
130
131
132 type traceStackTable struct {
133 tab traceMap
134 }
135
136
137
138 func (t *traceStackTable) put(pcs []uintptr) uint64 {
139 if len(pcs) == 0 {
140 return 0
141 }
142 id, _ := t.tab.put(noescape(unsafe.Pointer(&pcs[0])), uintptr(len(pcs))*unsafe.Sizeof(uintptr(0)))
143 return id
144 }
145
146
147
148
149 func (t *traceStackTable) dump(gen uintptr) {
150 w := unsafeTraceWriter(gen, nil)
151 if root := (*traceMapNode)(t.tab.root.Load()); root != nil {
152 w = dumpStacksRec(root, w)
153 }
154 w.flush().end()
155 t.tab.reset()
156 }
157
158 func dumpStacksRec(node *traceMapNode, w traceWriter) traceWriter {
159 stack := unsafe.Slice((*uintptr)(unsafe.Pointer(&node.data[0])), uintptr(len(node.data))/unsafe.Sizeof(uintptr(0)))
160
161
162
163 frames := makeTraceFrames(w.gen, fpunwindExpand(stack))
164
165
166
167 maxBytes := 1 + (2+4*len(frames))*traceBytesPerNumber
168
169
170
171
172
173
174 var flushed bool
175 w, flushed = w.ensure(1 + maxBytes)
176 if flushed {
177 w.byte(byte(traceEvStacks))
178 }
179
180
181 w.byte(byte(traceEvStack))
182 w.varint(uint64(node.id))
183 w.varint(uint64(len(frames)))
184 for _, frame := range frames {
185 w.varint(uint64(frame.PC))
186 w.varint(frame.funcID)
187 w.varint(frame.fileID)
188 w.varint(frame.line)
189 }
190
191
192 for i := range node.children {
193 child := node.children[i].Load()
194 if child == nil {
195 continue
196 }
197 w = dumpStacksRec((*traceMapNode)(child), w)
198 }
199 return w
200 }
201
202
203
204 func makeTraceFrames(gen uintptr, pcs []uintptr) []traceFrame {
205 frames := make([]traceFrame, 0, len(pcs))
206 ci := CallersFrames(pcs)
207 for {
208 f, more := ci.Next()
209 frames = append(frames, makeTraceFrame(gen, f))
210 if !more {
211 return frames
212 }
213 }
214 }
215
216 type traceFrame struct {
217 PC uintptr
218 funcID uint64
219 fileID uint64
220 line uint64
221 }
222
223
224 func makeTraceFrame(gen uintptr, f Frame) traceFrame {
225 var frame traceFrame
226 frame.PC = f.PC
227
228 fn := f.Function
229 const maxLen = 1 << 10
230 if len(fn) > maxLen {
231 fn = fn[len(fn)-maxLen:]
232 }
233 frame.funcID = trace.stringTab[gen%2].put(gen, fn)
234 frame.line = uint64(f.Line)
235 file := f.File
236 if len(file) > maxLen {
237 file = file[len(file)-maxLen:]
238 }
239 frame.fileID = trace.stringTab[gen%2].put(gen, file)
240 return frame
241 }
242
243
244
245 func tracefpunwindoff() bool {
246 return debug.tracefpunwindoff != 0 || (goarch.ArchFamily != goarch.AMD64 && goarch.ArchFamily != goarch.ARM64)
247 }
248
249
250
251
252
253 func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int) {
254 for i = 0; i < len(pcBuf) && fp != nil; i++ {
255
256 pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
257
258 fp = unsafe.Pointer(*(*uintptr)(fp))
259 }
260 return i
261 }
262
263
264
265
266
267
268 func fpunwindExpand(pcBuf []uintptr) []uintptr {
269 if len(pcBuf) > 0 && pcBuf[0] == logicalStackSentinel {
270
271
272 return pcBuf[1:]
273 }
274
275 var (
276 lastFuncID = abi.FuncIDNormal
277 newPCBuf = make([]uintptr, 0, traceStackSize)
278 skip = pcBuf[0]
279
280
281 skipOrAdd = func(retPC uintptr) bool {
282 if skip > 0 {
283 skip--
284 } else {
285 newPCBuf = append(newPCBuf, retPC)
286 }
287 return len(newPCBuf) < cap(newPCBuf)
288 }
289 )
290
291 outer:
292 for _, retPC := range pcBuf[1:] {
293 callPC := retPC - 1
294 fi := findfunc(callPC)
295 if !fi.valid() {
296
297
298 if more := skipOrAdd(retPC); !more {
299 break outer
300 }
301 continue
302 }
303
304 u, uf := newInlineUnwinder(fi, callPC)
305 for ; uf.valid(); uf = u.next(uf) {
306 sf := u.srcFunc(uf)
307 if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
308
309 } else if more := skipOrAdd(uf.pc + 1); !more {
310 break outer
311 }
312 lastFuncID = sf.funcID
313 }
314 }
315 return newPCBuf
316 }
317
318
319
320
321 func startPCForTrace(pc uintptr) uintptr {
322 f := findfunc(pc)
323 if !f.valid() {
324 return pc
325 }
326 w := funcdata(f, abi.FUNCDATA_WrapInfo)
327 if w == nil {
328 return pc
329 }
330 return f.datap.textAddr(*(*uint32)(w))
331 }
332
View as plain text