package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
const (
traceEvNone = 0
traceEvBatch = 1
traceEvFrequency = 2
traceEvStack = 3
traceEvGomaxprocs = 4
traceEvProcStart = 5
traceEvProcStop = 6
traceEvGCStart = 7
traceEvGCDone = 8
traceEvGCSTWStart = 9
traceEvGCSTWDone = 10
traceEvGCSweepStart = 11
traceEvGCSweepDone = 12
traceEvGoCreate = 13
traceEvGoStart = 14
traceEvGoEnd = 15
traceEvGoStop = 16
traceEvGoSched = 17
traceEvGoPreempt = 18
traceEvGoSleep = 19
traceEvGoBlock = 20
traceEvGoUnblock = 21
traceEvGoBlockSend = 22
traceEvGoBlockRecv = 23
traceEvGoBlockSelect = 24
traceEvGoBlockSync = 25
traceEvGoBlockCond = 26
traceEvGoBlockNet = 27
traceEvGoSysCall = 28
traceEvGoSysExit = 29
traceEvGoSysBlock = 30
traceEvGoWaiting = 31
traceEvGoInSyscall = 32
traceEvHeapAlloc = 33
traceEvNextGC = 34
traceEvTimerGoroutine = 35
traceEvFutileWakeup = 36
traceEvString = 37
traceEvGoStartLocal = 38
traceEvGoUnblockLocal = 39
traceEvGoSysExitLocal = 40
traceEvGoStartLabel = 41
traceEvGoBlockGC = 42
traceEvGCMarkAssistStart = 43
traceEvGCMarkAssistDone = 44
traceEvUserTaskCreate = 45
traceEvUserTaskEnd = 46
traceEvUserRegion = 47
traceEvUserLog = 48
traceEvCount = 49
)
const (
traceTickDiv = 16 + 48 *(sys .Goarch386 |sys .GoarchAmd64 )
traceStackSize = 128
traceGlobProc = -1
traceBytesPerNumber = 10
traceArgCountShift = 6
traceFutileWakeup byte = 128
)
var trace struct {
lock mutex
lockOwner *g
enabled bool
shutdown bool
headerWritten bool
footerWritten bool
shutdownSema uint32
seqStart uint64
ticksStart int64
ticksEnd int64
timeStart int64
timeEnd int64
seqGC uint64
reading traceBufPtr
empty traceBufPtr
fullHead traceBufPtr
fullTail traceBufPtr
reader guintptr
stackTab traceStackTable
stringsLock mutex
strings map [string ]uint64
stringSeq uint64
markWorkerLabels [len (gcMarkWorkerModeStrings )]uint64
bufLock mutex
buf traceBufPtr
}
type traceBufHeader struct {
link traceBufPtr
lastTicks uint64
pos int
stk [traceStackSize ]uintptr
}
type traceBuf struct {
traceBufHeader
arr [64 <<10 - unsafe .Sizeof (traceBufHeader {})]byte
}
type traceBufPtr uintptr
func (tp traceBufPtr ) ptr () *traceBuf { return (*traceBuf )(unsafe .Pointer (tp )) }
func (tp *traceBufPtr ) set (b *traceBuf ) { *tp = traceBufPtr (unsafe .Pointer (b )) }
func traceBufPtrOf (b *traceBuf ) traceBufPtr {
return traceBufPtr (unsafe .Pointer (b ))
}
func StartTrace () error {
stopTheWorldGC ("start tracing" )
lock (&sched .sysmonlock )
lock (&trace .bufLock )
if trace .enabled || trace .shutdown {
unlock (&trace .bufLock )
unlock (&sched .sysmonlock )
startTheWorldGC ()
return errorString ("tracing is already enabled" )
}
_g_ := getg ()
_g_ .m .startingtrace = true
mp := acquirem ()
stkBuf := make ([]uintptr , traceStackSize )
stackID := traceStackID (mp , stkBuf , 2 )
releasem (mp )
for _ , gp := range allgs {
status := readgstatus (gp )
if status != _Gdead {
gp .traceseq = 0
gp .tracelastp = getg ().m .p
id := trace .stackTab .put ([]uintptr {gp .startpc + sys .PCQuantum })
traceEvent (traceEvGoCreate , -1 , uint64 (gp .goid ), uint64 (id ), stackID )
}
if status == _Gwaiting {
gp .traceseq ++
traceEvent (traceEvGoWaiting , -1 , uint64 (gp .goid ))
}
if status == _Gsyscall {
gp .traceseq ++
traceEvent (traceEvGoInSyscall , -1 , uint64 (gp .goid ))
} else {
gp .sysblocktraced = false
}
}
traceProcStart ()
traceGoStart ()
trace .ticksStart = cputicks ()
trace .timeStart = nanotime ()
trace .headerWritten = false
trace .footerWritten = false
trace .stringSeq = 0
trace .strings = make (map [string ]uint64 )
trace .seqGC = 0
_g_ .m .startingtrace = false
trace .enabled = true
_ , pid , bufp := traceAcquireBuffer ()
for i , label := range gcMarkWorkerModeStrings [:] {
trace .markWorkerLabels [i ], bufp = traceString (bufp , pid , label )
}
traceReleaseBuffer (pid )
unlock (&trace .bufLock )
unlock (&sched .sysmonlock )
startTheWorldGC ()
return nil
}
func StopTrace () {
stopTheWorldGC ("stop tracing" )
lock (&sched .sysmonlock )
lock (&trace .bufLock )
if !trace .enabled {
unlock (&trace .bufLock )
unlock (&sched .sysmonlock )
startTheWorldGC ()
return
}
traceGoSched ()
for _ , p := range allp [:cap (allp )] {
buf := p .tracebuf
if buf != 0 {
traceFullQueue (buf )
p .tracebuf = 0
}
}
if trace .buf != 0 {
buf := trace .buf
trace .buf = 0
if buf .ptr ().pos != 0 {
traceFullQueue (buf )
}
}
for {
trace .ticksEnd = cputicks ()
trace .timeEnd = nanotime ()
if trace .timeEnd != trace .timeStart {
break
}
osyield ()
}
trace .enabled = false
trace .shutdown = true
unlock (&trace .bufLock )
unlock (&sched .sysmonlock )
startTheWorldGC ()
semacquire (&trace .shutdownSema )
if raceenabled {
raceacquire (unsafe .Pointer (&trace .shutdownSema ))
}
lock (&trace .lock )
for _ , p := range allp [:cap (allp )] {
if p .tracebuf != 0 {
throw ("trace: non-empty trace buffer in proc" )
}
}
if trace .buf != 0 {
throw ("trace: non-empty global trace buffer" )
}
if trace .fullHead != 0 || trace .fullTail != 0 {
throw ("trace: non-empty full trace buffer" )
}
if trace .reading != 0 || trace .reader != 0 {
throw ("trace: reading after shutdown" )
}
for trace .empty != 0 {
buf := trace .empty
trace .empty = buf .ptr ().link
sysFree (unsafe .Pointer (buf ), unsafe .Sizeof (*buf .ptr ()), &memstats .other_sys )
}
trace .strings = nil
trace .shutdown = false
unlock (&trace .lock )
}
func ReadTrace () []byte {
lock (&trace .lock )
trace .lockOwner = getg ()
if trace .reader != 0 {
trace .lockOwner = nil
unlock (&trace .lock )
println ("runtime: ReadTrace called from multiple goroutines simultaneously" )
return nil
}
if buf := trace .reading ; buf != 0 {
buf .ptr ().link = trace .empty
trace .empty = buf
trace .reading = 0
}
if !trace .headerWritten {
trace .headerWritten = true
trace .lockOwner = nil
unlock (&trace .lock )
return []byte ("go 1.11 trace\x00\x00\x00" )
}
if trace .fullHead == 0 && !trace .shutdown {
trace .reader .set (getg ())
goparkunlock (&trace .lock , waitReasonTraceReaderBlocked , traceEvGoBlock , 2 )
lock (&trace .lock )
}
if trace .fullHead != 0 {
buf := traceFullDequeue ()
trace .reading = buf
trace .lockOwner = nil
unlock (&trace .lock )
return buf .ptr ().arr [:buf .ptr ().pos ]
}
if !trace .footerWritten {
trace .footerWritten = true
freq := float64 (trace .ticksEnd -trace .ticksStart ) * 1e9 / float64 (trace .timeEnd -trace .timeStart ) / traceTickDiv
trace .lockOwner = nil
unlock (&trace .lock )
var data []byte
data = append (data , traceEvFrequency |0 <<traceArgCountShift )
data = traceAppend (data , uint64 (freq ))
trace .stackTab .dump ()
return data
}
if trace .shutdown {
trace .lockOwner = nil
unlock (&trace .lock )
if raceenabled {
racerelease (unsafe .Pointer (&trace .shutdownSema ))
}
semrelease (&trace .shutdownSema )
return nil
}
trace .lockOwner = nil
unlock (&trace .lock )
println ("runtime: spurious wakeup of trace reader" )
return nil
}
func traceReader () *g {
if trace .reader == 0 || (trace .fullHead == 0 && !trace .shutdown ) {
return nil
}
lock (&trace .lock )
if trace .reader == 0 || (trace .fullHead == 0 && !trace .shutdown ) {
unlock (&trace .lock )
return nil
}
gp := trace .reader .ptr ()
trace .reader .set (nil )
unlock (&trace .lock )
return gp
}
func traceProcFree (pp *p ) {
buf := pp .tracebuf
pp .tracebuf = 0
if buf == 0 {
return
}
lock (&trace .lock )
traceFullQueue (buf )
unlock (&trace .lock )
}
func traceFullQueue (buf traceBufPtr ) {
buf .ptr ().link = 0
if trace .fullHead == 0 {
trace .fullHead = buf
} else {
trace .fullTail .ptr ().link = buf
}
trace .fullTail = buf
}
func traceFullDequeue () traceBufPtr {
buf := trace .fullHead
if buf == 0 {
return 0
}
trace .fullHead = buf .ptr ().link
if trace .fullHead == 0 {
trace .fullTail = 0
}
buf .ptr ().link = 0
return buf
}
func traceEvent (ev byte , skip int , args ...uint64 ) {
mp , pid , bufp := traceAcquireBuffer ()
if !trace .enabled && !mp .startingtrace {
traceReleaseBuffer (pid )
return
}
if skip > 0 {
if getg () == mp .curg {
skip ++
}
}
traceEventLocked (0 , mp , pid , bufp , ev , skip , args ...)
traceReleaseBuffer (pid )
}
func traceEventLocked (extraBytes int , mp *m , pid int32 , bufp *traceBufPtr , ev byte , skip int , args ...uint64 ) {
buf := bufp .ptr ()
maxSize := 2 + 5 *traceBytesPerNumber + extraBytes
if buf == nil || len (buf .arr )-buf .pos < maxSize {
buf = traceFlush (traceBufPtrOf (buf ), pid ).ptr ()
bufp .set (buf )
}
ticks := uint64 (cputicks ()) / traceTickDiv
tickDiff := ticks - buf .lastTicks
buf .lastTicks = ticks
narg := byte (len (args ))
if skip >= 0 {
narg ++
}
if narg > 3 {
narg = 3
}
startPos := buf .pos
buf .byte (ev | narg <<traceArgCountShift )
var lenp *byte
if narg == 3 {
buf .varint (0 )
lenp = &buf .arr [buf .pos -1 ]
}
buf .varint (tickDiff )
for _ , a := range args {
buf .varint (a )
}
if skip == 0 {
buf .varint (0 )
} else if skip > 0 {
buf .varint (traceStackID (mp , buf .stk [:], skip ))
}
evSize := buf .pos - startPos
if evSize > maxSize {
throw ("invalid length of trace event" )
}
if lenp != nil {
*lenp = byte (evSize - 2 )
}
}
func traceStackID (mp *m , buf []uintptr , skip int ) uint64 {
_g_ := getg ()
gp := mp .curg
var nstk int
if gp == _g_ {
nstk = callers (skip +1 , buf )
} else if gp != nil {
gp = mp .curg
nstk = gcallers (gp , skip , buf )
}
if nstk > 0 {
nstk --
}
if nstk > 0 && gp .goid == 1 {
nstk --
}
id := trace .stackTab .put (buf [:nstk ])
return uint64 (id )
}
func traceAcquireBuffer () (mp *m , pid int32 , bufp *traceBufPtr ) {
mp = acquirem ()
if p := mp .p .ptr (); p != nil {
return mp , p .id , &p .tracebuf
}
lock (&trace .bufLock )
return mp , traceGlobProc , &trace .buf
}
func traceReleaseBuffer (pid int32 ) {
if pid == traceGlobProc {
unlock (&trace .bufLock )
}
releasem (getg ().m )
}
func traceFlush (buf traceBufPtr , pid int32 ) traceBufPtr {
owner := trace .lockOwner
dolock := owner == nil || owner != getg ().m .curg
if dolock {
lock (&trace .lock )
}
if buf != 0 {
traceFullQueue (buf )
}
if trace .empty != 0 {
buf = trace .empty
trace .empty = buf .ptr ().link
} else {
buf = traceBufPtr (sysAlloc (unsafe .Sizeof (traceBuf {}), &memstats .other_sys ))
if buf == 0 {
throw ("trace: out of memory" )
}
}
bufp := buf .ptr ()
bufp .link .set (nil )
bufp .pos = 0
ticks := uint64 (cputicks ()) / traceTickDiv
bufp .lastTicks = ticks
bufp .byte (traceEvBatch | 1 <<traceArgCountShift )
bufp .varint (uint64 (pid ))
bufp .varint (ticks )
if dolock {
unlock (&trace .lock )
}
return buf
}
func traceString (bufp *traceBufPtr , pid int32 , s string ) (uint64 , *traceBufPtr ) {
if s == "" {
return 0 , bufp
}
lock (&trace .stringsLock )
if raceenabled {
raceacquire (unsafe .Pointer (&trace .stringsLock ))
}
if id , ok := trace .strings [s ]; ok {
if raceenabled {
racerelease (unsafe .Pointer (&trace .stringsLock ))
}
unlock (&trace .stringsLock )
return id , bufp
}
trace .stringSeq ++
id := trace .stringSeq
trace .strings [s ] = id
if raceenabled {
racerelease (unsafe .Pointer (&trace .stringsLock ))
}
unlock (&trace .stringsLock )
buf := bufp .ptr ()
size := 1 + 2 *traceBytesPerNumber + len (s )
if buf == nil || len (buf .arr )-buf .pos < size {
buf = traceFlush (traceBufPtrOf (buf ), pid ).ptr ()
bufp .set (buf )
}
buf .byte (traceEvString )
buf .varint (id )
slen := len (s )
if room := len (buf .arr ) - buf .pos ; room < slen +traceBytesPerNumber {
slen = room
}
buf .varint (uint64 (slen ))
buf .pos += copy (buf .arr [buf .pos :], s [:slen ])
bufp .set (buf )
return id , bufp
}
func traceAppend (buf []byte , v uint64 ) []byte {
for ; v >= 0x80 ; v >>= 7 {
buf = append (buf , 0x80 |byte (v ))
}
buf = append (buf , byte (v ))
return buf
}
func (buf *traceBuf ) varint (v uint64 ) {
pos := buf .pos
for ; v >= 0x80 ; v >>= 7 {
buf .arr [pos ] = 0x80 | byte (v )
pos ++
}
buf .arr [pos ] = byte (v )
pos ++
buf .pos = pos
}
func (buf *traceBuf ) byte (v byte ) {
buf .arr [buf .pos ] = v
buf .pos ++
}
type traceStackTable struct {
lock mutex
seq uint32
mem traceAlloc
tab [1 << 13 ]traceStackPtr
}
type traceStack struct {
link traceStackPtr
hash uintptr
id uint32
n int
stk [0 ]uintptr
}
type traceStackPtr uintptr
func (tp traceStackPtr ) ptr () *traceStack { return (*traceStack )(unsafe .Pointer (tp )) }
func (ts *traceStack ) stack () []uintptr {
return (*[traceStackSize ]uintptr )(unsafe .Pointer (&ts .stk ))[:ts .n ]
}
func (tab *traceStackTable ) put (pcs []uintptr ) uint32 {
if len (pcs ) == 0 {
return 0
}
hash := memhash (unsafe .Pointer (&pcs [0 ]), 0 , uintptr (len (pcs ))*unsafe .Sizeof (pcs [0 ]))
if id := tab .find (pcs , hash ); id != 0 {
return id
}
lock (&tab .lock )
if id := tab .find (pcs , hash ); id != 0 {
unlock (&tab .lock )
return id
}
tab .seq ++
stk := tab .newStack (len (pcs ))
stk .hash = hash
stk .id = tab .seq
stk .n = len (pcs )
stkpc := stk .stack ()
for i , pc := range pcs {
stkpc [i ] = pc
}
part := int (hash % uintptr (len (tab .tab )))
stk .link = tab .tab [part ]
atomicstorep (unsafe .Pointer (&tab .tab [part ]), unsafe .Pointer (stk ))
unlock (&tab .lock )
return stk .id
}
func (tab *traceStackTable ) find (pcs []uintptr , hash uintptr ) uint32 {
part := int (hash % uintptr (len (tab .tab )))
Search :
for stk := tab .tab [part ].ptr (); stk != nil ; stk = stk .link .ptr () {
if stk .hash == hash && stk .n == len (pcs ) {
for i , stkpc := range stk .stack () {
if stkpc != pcs [i ] {
continue Search
}
}
return stk .id
}
}
return 0
}
func (tab *traceStackTable ) newStack (n int ) *traceStack {
return (*traceStack )(tab .mem .alloc (unsafe .Sizeof (traceStack {}) + uintptr (n )*sys .PtrSize ))
}
func allFrames (pcs []uintptr ) []Frame {
frames := make ([]Frame , 0 , len (pcs ))
ci := CallersFrames (pcs )
for {
f , more := ci .Next ()
frames = append (frames , f )
if !more {
return frames
}
}
}
func (tab *traceStackTable ) dump () {
var tmp [(2 + 4 *traceStackSize ) * traceBytesPerNumber ]byte
bufp := traceFlush (0 , 0 )
for _ , stk := range tab .tab {
stk := stk .ptr ()
for ; stk != nil ; stk = stk .link .ptr () {
tmpbuf := tmp [:0 ]
tmpbuf = traceAppend (tmpbuf , uint64 (stk .id ))
frames := allFrames (stk .stack ())
tmpbuf = traceAppend (tmpbuf , uint64 (len (frames )))
for _ , f := range frames {
var frame traceFrame
frame , bufp = traceFrameForPC (bufp , 0 , f )
tmpbuf = traceAppend (tmpbuf , uint64 (f .PC ))
tmpbuf = traceAppend (tmpbuf , uint64 (frame .funcID ))
tmpbuf = traceAppend (tmpbuf , uint64 (frame .fileID ))
tmpbuf = traceAppend (tmpbuf , uint64 (frame .line ))
}
size := 1 + traceBytesPerNumber + len (tmpbuf )
if buf := bufp .ptr (); len (buf .arr )-buf .pos < size {
bufp = traceFlush (bufp , 0 )
}
buf := bufp .ptr ()
buf .byte (traceEvStack | 3 <<traceArgCountShift )
buf .varint (uint64 (len (tmpbuf )))
buf .pos += copy (buf .arr [buf .pos :], tmpbuf )
}
}
lock (&trace .lock )
traceFullQueue (bufp )
unlock (&trace .lock )
tab .mem .drop ()
*tab = traceStackTable {}
lockInit (&((*tab ).lock ), lockRankTraceStackTab )
}
type traceFrame struct {
funcID uint64
fileID uint64
line uint64
}
func traceFrameForPC (buf traceBufPtr , pid int32 , f Frame ) (traceFrame , traceBufPtr ) {
bufp := &buf
var frame traceFrame
fn := f .Function
const maxLen = 1 << 10
if len (fn ) > maxLen {
fn = fn [len (fn )-maxLen :]
}
frame .funcID , bufp = traceString (bufp , pid , fn )
frame .line = uint64 (f .Line )
file := f .File
if len (file ) > maxLen {
file = file [len (file )-maxLen :]
}
frame .fileID , bufp = traceString (bufp , pid , file )
return frame , (*bufp )
}
type traceAlloc struct {
head traceAllocBlockPtr
off uintptr
}
type traceAllocBlock struct {
next traceAllocBlockPtr
data [64 <<10 - sys .PtrSize ]byte
}
type traceAllocBlockPtr uintptr
func (p traceAllocBlockPtr ) ptr () *traceAllocBlock { return (*traceAllocBlock )(unsafe .Pointer (p )) }
func (p *traceAllocBlockPtr ) set (x *traceAllocBlock ) { *p = traceAllocBlockPtr (unsafe .Pointer (x )) }
func (a *traceAlloc ) alloc (n uintptr ) unsafe .Pointer {
n = alignUp (n , sys .PtrSize )
if a .head == 0 || a .off +n > uintptr (len (a .head .ptr ().data )) {
if n > uintptr (len (a .head .ptr ().data )) {
throw ("trace: alloc too large" )
}
block := (*traceAllocBlock )(sysAlloc (unsafe .Sizeof (traceAllocBlock {}), &memstats .other_sys ))
if block == nil {
throw ("trace: out of memory" )
}
block .next .set (a .head .ptr ())
a .head .set (block )
a .off = 0
}
p := &a .head .ptr ().data [a .off ]
a .off += n
return unsafe .Pointer (p )
}
func (a *traceAlloc ) drop () {
for a .head != 0 {
block := a .head .ptr ()
a .head .set (block .next .ptr ())
sysFree (unsafe .Pointer (block ), unsafe .Sizeof (traceAllocBlock {}), &memstats .other_sys )
}
}
func traceGomaxprocs (procs int32 ) {
traceEvent (traceEvGomaxprocs , 1 , uint64 (procs ))
}
func traceProcStart () {
traceEvent (traceEvProcStart , -1 , uint64 (getg ().m .id ))
}
func traceProcStop (pp *p ) {
mp := acquirem ()
oldp := mp .p
mp .p .set (pp )
traceEvent (traceEvProcStop , -1 )
mp .p = oldp
releasem (mp )
}
func traceGCStart () {
traceEvent (traceEvGCStart , 3 , trace .seqGC )
trace .seqGC ++
}
func traceGCDone () {
traceEvent (traceEvGCDone , -1 )
}
func traceGCSTWStart (kind int ) {
traceEvent (traceEvGCSTWStart , -1 , uint64 (kind ))
}
func traceGCSTWDone () {
traceEvent (traceEvGCSTWDone , -1 )
}
func traceGCSweepStart () {
_p_ := getg ().m .p .ptr ()
if _p_ .traceSweep {
throw ("double traceGCSweepStart" )
}
_p_ .traceSweep , _p_ .traceSwept , _p_ .traceReclaimed = true , 0 , 0
}
func traceGCSweepSpan (bytesSwept uintptr ) {
_p_ := getg ().m .p .ptr ()
if _p_ .traceSweep {
if _p_ .traceSwept == 0 {
traceEvent (traceEvGCSweepStart , 1 )
}
_p_ .traceSwept += bytesSwept
}
}
func traceGCSweepDone () {
_p_ := getg ().m .p .ptr ()
if !_p_ .traceSweep {
throw ("missing traceGCSweepStart" )
}
if _p_ .traceSwept != 0 {
traceEvent (traceEvGCSweepDone , -1 , uint64 (_p_ .traceSwept ), uint64 (_p_ .traceReclaimed ))
}
_p_ .traceSweep = false
}
func traceGCMarkAssistStart () {
traceEvent (traceEvGCMarkAssistStart , 1 )
}
func traceGCMarkAssistDone () {
traceEvent (traceEvGCMarkAssistDone , -1 )
}
func traceGoCreate (newg *g , pc uintptr ) {
newg .traceseq = 0
newg .tracelastp = getg ().m .p
id := trace .stackTab .put ([]uintptr {pc + sys .PCQuantum })
traceEvent (traceEvGoCreate , 2 , uint64 (newg .goid ), uint64 (id ))
}
func traceGoStart () {
_g_ := getg ().m .curg
_p_ := _g_ .m .p
_g_ .traceseq ++
if _p_ .ptr ().gcMarkWorkerMode != gcMarkWorkerNotWorker {
traceEvent (traceEvGoStartLabel , -1 , uint64 (_g_ .goid ), _g_ .traceseq , trace .markWorkerLabels [_p_ .ptr ().gcMarkWorkerMode ])
} else if _g_ .tracelastp == _p_ {
traceEvent (traceEvGoStartLocal , -1 , uint64 (_g_ .goid ))
} else {
_g_ .tracelastp = _p_
traceEvent (traceEvGoStart , -1 , uint64 (_g_ .goid ), _g_ .traceseq )
}
}
func traceGoEnd () {
traceEvent (traceEvGoEnd , -1 )
}
func traceGoSched () {
_g_ := getg ()
_g_ .tracelastp = _g_ .m .p
traceEvent (traceEvGoSched , 1 )
}
func traceGoPreempt () {
_g_ := getg ()
_g_ .tracelastp = _g_ .m .p
traceEvent (traceEvGoPreempt , 1 )
}
func traceGoPark (traceEv byte , skip int ) {
if traceEv &traceFutileWakeup != 0 {
traceEvent (traceEvFutileWakeup , -1 )
}
traceEvent (traceEv & ^traceFutileWakeup , skip )
}
func traceGoUnpark (gp *g , skip int ) {
_p_ := getg ().m .p
gp .traceseq ++
if gp .tracelastp == _p_ {
traceEvent (traceEvGoUnblockLocal , skip , uint64 (gp .goid ))
} else {
gp .tracelastp = _p_
traceEvent (traceEvGoUnblock , skip , uint64 (gp .goid ), gp .traceseq )
}
}
func traceGoSysCall () {
traceEvent (traceEvGoSysCall , 1 )
}
func traceGoSysExit (ts int64 ) {
if ts != 0 && ts < trace .ticksStart {
ts = 0
}
_g_ := getg ().m .curg
_g_ .traceseq ++
_g_ .tracelastp = _g_ .m .p
traceEvent (traceEvGoSysExit , -1 , uint64 (_g_ .goid ), _g_ .traceseq , uint64 (ts )/traceTickDiv )
}
func traceGoSysBlock (pp *p ) {
mp := acquirem ()
oldp := mp .p
mp .p .set (pp )
traceEvent (traceEvGoSysBlock , -1 )
mp .p = oldp
releasem (mp )
}
func traceHeapAlloc () {
traceEvent (traceEvHeapAlloc , -1 , memstats .heap_live )
}
func traceNextGC () {
if nextGC := atomic .Load64 (&memstats .next_gc ); nextGC == ^uint64 (0 ) {
traceEvent (traceEvNextGC , -1 , 0 )
} else {
traceEvent (traceEvNextGC , -1 , nextGC )
}
}
func trace_userTaskCreate (id , parentID uint64 , taskType string ) {
if !trace .enabled {
return
}
mp , pid , bufp := traceAcquireBuffer ()
if !trace .enabled && !mp .startingtrace {
traceReleaseBuffer (pid )
return
}
typeStringID , bufp := traceString (bufp , pid , taskType )
traceEventLocked (0 , mp , pid , bufp , traceEvUserTaskCreate , 3 , id , parentID , typeStringID )
traceReleaseBuffer (pid )
}
func trace_userTaskEnd (id uint64 ) {
traceEvent (traceEvUserTaskEnd , 2 , id )
}
func trace_userRegion (id , mode uint64 , name string ) {
if !trace .enabled {
return
}
mp , pid , bufp := traceAcquireBuffer ()
if !trace .enabled && !mp .startingtrace {
traceReleaseBuffer (pid )
return
}
nameStringID , bufp := traceString (bufp , pid , name )
traceEventLocked (0 , mp , pid , bufp , traceEvUserRegion , 3 , id , mode , nameStringID )
traceReleaseBuffer (pid )
}
func trace_userLog (id uint64 , category , message string ) {
if !trace .enabled {
return
}
mp , pid , bufp := traceAcquireBuffer ()
if !trace .enabled && !mp .startingtrace {
traceReleaseBuffer (pid )
return
}
categoryID , bufp := traceString (bufp , pid , category )
extraSpace := traceBytesPerNumber + len (message )
traceEventLocked (extraSpace , mp , pid , bufp , traceEvUserLog , 3 , id , categoryID )
buf := bufp .ptr ()
slen := len (message )
if room := len (buf .arr ) - buf .pos ; room < slen +traceBytesPerNumber {
slen = room
}
buf .varint (uint64 (slen ))
buf .pos += copy (buf .arr [buf .pos :], message [:slen ])
traceReleaseBuffer (pid )
}
The pages are generated with Golds v0.3.6 . (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds .