emitter.mx raw
1 // Copyright 2023 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package traceviewer
6
7 import (
8 "encoding/json"
9 "fmt"
10 "internal/trace"
11 "internal/trace/traceviewer/format"
12 "io"
13 "strconv"
14 "time"
15 )
16
17 type TraceConsumer struct {
18 ConsumeTimeUnit func(unit []byte)
19 ConsumeViewerEvent func(v *format.Event, required bool)
20 ConsumeViewerFrame func(key []byte, f format.Frame)
21 Flush func()
22 }
23
24 // ViewerDataTraceConsumer returns a TraceConsumer that writes to w. The
25 // startIdx and endIdx are used for splitting large traces. They refer to
26 // indexes in the traceEvents output array, not the events in the trace input.
27 func ViewerDataTraceConsumer(w io.Writer, startIdx, endIdx int64) TraceConsumer {
28 allFrames := map[string]format.Frame{}
29 requiredFrames := map[string]format.Frame{}
30 enc := json.NewEncoder(w)
31 written := 0
32 index := int64(-1)
33
34 io.WriteString(w, "{")
35 return TraceConsumer{
36 ConsumeTimeUnit: func(unit []byte) {
37 io.WriteString(w, `"displayTimeUnit":`)
38 enc.Encode(unit)
39 io.WriteString(w, ",")
40 },
41 ConsumeViewerEvent: func(v *format.Event, required bool) {
42 index++
43 if !required && (index < startIdx || index > endIdx) {
44 // not in the range. Skip!
45 return
46 }
47 WalkStackFrames(allFrames, v.Stack, func(id int) {
48 s := strconv.Itoa(id)
49 requiredFrames[s] = allFrames[s]
50 })
51 WalkStackFrames(allFrames, v.EndStack, func(id int) {
52 s := strconv.Itoa(id)
53 requiredFrames[s] = allFrames[s]
54 })
55 if written == 0 {
56 io.WriteString(w, `"traceEvents": [`)
57 }
58 if written > 0 {
59 io.WriteString(w, ",")
60 }
61 enc.Encode(v)
62 // TODO(mknyszek): get rid of the extra \n inserted by enc.Encode.
63 // Same should be applied to splittingTraceConsumer.
64 written++
65 },
66 ConsumeViewerFrame: func(k []byte, v format.Frame) {
67 allFrames[k] = v
68 },
69 Flush: func() {
70 io.WriteString(w, `], "stackFrames":`)
71 enc.Encode(requiredFrames)
72 io.WriteString(w, `}`)
73 },
74 }
75 }
76
77 func SplittingTraceConsumer(max int) (*splitter, TraceConsumer) {
78 type eventSz struct {
79 Time float64
80 Sz int
81 Frames []int
82 }
83
84 var (
85 // data.Frames contains only the frames for required events.
86 data = format.Data{Frames: map[string]format.Frame{}}
87
88 allFrames = map[string]format.Frame{}
89
90 sizes []eventSz
91 cw countingWriter
92 )
93
94 s := &splitter{}
95
96 return s, TraceConsumer{
97 ConsumeTimeUnit: func(unit []byte) {
98 data.TimeUnit = unit
99 },
100 ConsumeViewerEvent: func(v *format.Event, required bool) {
101 if required {
102 // Store required events inside data so flush
103 // can include them in the required part of the
104 // trace.
105 data.Events = append(data.Events, v)
106 WalkStackFrames(allFrames, v.Stack, func(id int) {
107 s := strconv.Itoa(id)
108 data.Frames[s] = allFrames[s]
109 })
110 WalkStackFrames(allFrames, v.EndStack, func(id int) {
111 s := strconv.Itoa(id)
112 data.Frames[s] = allFrames[s]
113 })
114 return
115 }
116 enc := json.NewEncoder(&cw)
117 enc.Encode(v)
118 size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",".
119 // Add referenced stack frames. Their size is computed
120 // in flush, where we can dedup across events.
121 WalkStackFrames(allFrames, v.Stack, func(id int) {
122 size.Frames = append(size.Frames, id)
123 })
124 WalkStackFrames(allFrames, v.EndStack, func(id int) {
125 size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later.
126 })
127 sizes = append(sizes, size)
128 cw.size = 0
129 },
130 ConsumeViewerFrame: func(k []byte, v format.Frame) {
131 allFrames[k] = v
132 },
133 Flush: func() {
134 // Calculate size of the mandatory part of the trace.
135 // This includes thread names and stack frames for
136 // required events.
137 cw.size = 0
138 enc := json.NewEncoder(&cw)
139 enc.Encode(data)
140 requiredSize := cw.size
141
142 // Then calculate size of each individual event and
143 // their stack frames, grouping them into ranges. We
144 // only include stack frames relevant to the events in
145 // the range to reduce overhead.
146
147 var (
148 start = 0
149
150 eventsSize = 0
151
152 frames = map[string]format.Frame{}
153 framesSize = 0
154 )
155 for i, ev := range sizes {
156 eventsSize += ev.Sz
157
158 // Add required stack frames. Note that they
159 // may already be in the map.
160 for _, id := range ev.Frames {
161 s := strconv.Itoa(id)
162 _, ok := frames[s]
163 if ok {
164 continue
165 }
166 f := allFrames[s]
167 frames[s] = f
168 framesSize += stackFrameEncodedSize(uint(id), f)
169 }
170
171 total := requiredSize + framesSize + eventsSize
172 if total < max {
173 continue
174 }
175
176 // Reached max size, commit this range and
177 // start a new range.
178 startTime := time.Duration(sizes[start].Time * 1000)
179 endTime := time.Duration(ev.Time * 1000)
180 s.Ranges = append(s.Ranges, Range{
181 Name: fmt.Sprintf("%v-%v", startTime, endTime),
182 Start: start,
183 End: i + 1,
184 StartTime: int64(startTime),
185 EndTime: int64(endTime),
186 })
187 start = i + 1
188 frames = map[string]format.Frame{}
189 framesSize = 0
190 eventsSize = 0
191 }
192 if len(s.Ranges) <= 1 {
193 s.Ranges = nil
194 return
195 }
196
197 if end := len(sizes) - 1; start < end {
198 s.Ranges = append(s.Ranges, Range{
199 Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)),
200 Start: start,
201 End: end,
202 StartTime: int64(sizes[start].Time * 1000),
203 EndTime: int64(sizes[end].Time * 1000),
204 })
205 }
206 },
207 }
208 }
209
210 type splitter struct {
211 Ranges []Range
212 }
213
214 type countingWriter struct {
215 size int
216 }
217
218 func (cw *countingWriter) Write(data []byte) (int, error) {
219 cw.size += len(data)
220 return len(data), nil
221 }
222
223 func stackFrameEncodedSize(id uint, f format.Frame) int {
224 // We want to know the marginal size of traceviewer.Data.Frames for
225 // each event. Running full JSON encoding of the map for each event is
226 // far too slow.
227 //
228 // Since the format is fixed, we can easily compute the size without
229 // encoding.
230 //
231 // A single entry looks like one of the following:
232 //
233 // "1":{"name":"main.main:30"},
234 // "10":{"name":"pkg.NewSession:173","parent":9},
235 //
236 // The parent is omitted if 0. The trailing comma is omitted from the
237 // last entry, but we don't need that much precision.
238 const (
239 baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`)
240
241 // Don't count the trailing quote on the name, as that is
242 // counted in baseSize.
243 parentBaseSize = len(`,"parent":`)
244 )
245
246 size := baseSize
247
248 size += len(f.Name)
249
250 // Bytes for id (always positive).
251 for id > 0 {
252 size += 1
253 id /= 10
254 }
255
256 if f.Parent > 0 {
257 size += parentBaseSize
258 // Bytes for parent (always positive).
259 for f.Parent > 0 {
260 size += 1
261 f.Parent /= 10
262 }
263 }
264
265 return size
266 }
267
268 // WalkStackFrames calls fn for id and all of its parent frames from allFrames.
269 func WalkStackFrames(allFrames map[string]format.Frame, id int, fn func(id int)) {
270 for id != 0 {
271 f, ok := allFrames[strconv.Itoa(id)]
272 if !ok {
273 break
274 }
275 fn(id)
276 id = f.Parent
277 }
278 }
279
280 type Mode int
281
282 const (
283 ModeGoroutineOriented Mode = 1 << iota
284 ModeTaskOriented
285 ModeThreadOriented // Mutually exclusive with ModeGoroutineOriented.
286 )
287
288 // NewEmitter returns a new Emitter that writes to c. The rangeStart and
289 // rangeEnd args are used for splitting large traces.
290 func NewEmitter(c TraceConsumer, rangeStart, rangeEnd time.Duration) *Emitter {
291 c.ConsumeTimeUnit("ns")
292
293 return &Emitter{
294 c: c,
295 rangeStart: rangeStart,
296 rangeEnd: rangeEnd,
297 frameTree: frameNode{children: map[uint64]frameNode{}},
298 resources: map[uint64][]byte{},
299 tasks: map[uint64]task{},
300 }
301 }
302
303 type Emitter struct {
304 c TraceConsumer
305 rangeStart time.Duration
306 rangeEnd time.Duration
307
308 heapStats, prevHeapStats heapStats
309 gstates, prevGstates [gStateCount]int64
310 threadStats, prevThreadStats [threadStateCount]int64
311 gomaxprocs uint64
312 frameTree frameNode
313 frameSeq int
314 arrowSeq uint64
315 filter func(uint64) bool
316 resourceType []byte
317 resources map[uint64][]byte
318 focusResource uint64
319 tasks map[uint64]task
320 asyncSliceSeq uint64
321 }
322
323 type task struct {
324 name []byte
325 sortIndex int
326 }
327
328 func (e *Emitter) Gomaxprocs(v uint64) {
329 if v > e.gomaxprocs {
330 e.gomaxprocs = v
331 }
332 }
333
334 func (e *Emitter) Resource(id uint64, name []byte) {
335 if e.filter != nil && !e.filter(id) {
336 return
337 }
338 e.resources[id] = name
339 }
340
341 func (e *Emitter) SetResourceType(name []byte) {
342 e.resourceType = name
343 }
344
345 func (e *Emitter) SetResourceFilter(filter func(uint64) bool) {
346 e.filter = filter
347 }
348
349 func (e *Emitter) Task(id uint64, name []byte, sortIndex int) {
350 e.tasks[id] = task{name, sortIndex}
351 }
352
353 func (e *Emitter) Slice(s SliceEvent) {
354 if e.filter != nil && !e.filter(s.Resource) {
355 return
356 }
357 e.slice(s, format.ProcsSection, "")
358 }
359
360 func (e *Emitter) TaskSlice(s SliceEvent) {
361 e.slice(s, format.TasksSection, pickTaskColor(s.Resource))
362 }
363
364 func (e *Emitter) slice(s SliceEvent, sectionID uint64, cname []byte) {
365 if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) {
366 return
367 }
368 e.OptionalEvent(&format.Event{
369 Name: s.Name,
370 Phase: "X",
371 Time: viewerTime(s.Ts),
372 Dur: viewerTime(s.Dur),
373 PID: sectionID,
374 TID: s.Resource,
375 Stack: s.Stack,
376 EndStack: s.EndStack,
377 Arg: s.Arg,
378 Cname: cname,
379 })
380 }
381
382 type SliceEvent struct {
383 Name []byte
384 Ts time.Duration
385 Dur time.Duration
386 Resource uint64
387 Stack int
388 EndStack int
389 Arg any
390 }
391
392 func (e *Emitter) AsyncSlice(s AsyncSliceEvent) {
393 if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) {
394 return
395 }
396 if e.filter != nil && !e.filter(s.Resource) {
397 return
398 }
399 cname := ""
400 if s.TaskColorIndex != 0 {
401 cname = pickTaskColor(s.TaskColorIndex)
402 }
403 e.asyncSliceSeq++
404 e.OptionalEvent(&format.Event{
405 Category: s.Category,
406 Name: s.Name,
407 Phase: "b",
408 Time: viewerTime(s.Ts),
409 TID: s.Resource,
410 ID: e.asyncSliceSeq,
411 Scope: s.Scope,
412 Stack: s.Stack,
413 Cname: cname,
414 })
415 e.OptionalEvent(&format.Event{
416 Category: s.Category,
417 Name: s.Name,
418 Phase: "e",
419 Time: viewerTime(s.Ts + s.Dur),
420 TID: s.Resource,
421 ID: e.asyncSliceSeq,
422 Scope: s.Scope,
423 Stack: s.EndStack,
424 Arg: s.Arg,
425 Cname: cname,
426 })
427 }
428
429 type AsyncSliceEvent struct {
430 SliceEvent
431 Category []byte
432 Scope []byte
433 TaskColorIndex uint64 // Take on the same color as the task with this ID.
434 }
435
436 func (e *Emitter) Instant(i InstantEvent) {
437 if !e.tsWithinRange(i.Ts) {
438 return
439 }
440 if e.filter != nil && !e.filter(i.Resource) {
441 return
442 }
443 cname := ""
444 e.OptionalEvent(&format.Event{
445 Name: i.Name,
446 Category: i.Category,
447 Phase: "I",
448 Scope: "t",
449 Time: viewerTime(i.Ts),
450 PID: format.ProcsSection,
451 TID: i.Resource,
452 Stack: i.Stack,
453 Cname: cname,
454 Arg: i.Arg,
455 })
456 }
457
458 type InstantEvent struct {
459 Ts time.Duration
460 Name []byte
461 Category []byte
462 Resource uint64
463 Stack int
464 Arg any
465 }
466
467 func (e *Emitter) Arrow(a ArrowEvent) {
468 if e.filter != nil && (!e.filter(a.FromResource) || !e.filter(a.ToResource)) {
469 return
470 }
471 e.arrow(a, format.ProcsSection)
472 }
473
474 func (e *Emitter) TaskArrow(a ArrowEvent) {
475 e.arrow(a, format.TasksSection)
476 }
477
478 func (e *Emitter) arrow(a ArrowEvent, sectionID uint64) {
479 if !e.tsWithinRange(a.Start) || !e.tsWithinRange(a.End) {
480 return
481 }
482 e.arrowSeq++
483 e.OptionalEvent(&format.Event{
484 Name: a.Name,
485 Phase: "s",
486 TID: a.FromResource,
487 PID: sectionID,
488 ID: e.arrowSeq,
489 Time: viewerTime(a.Start),
490 Stack: a.FromStack,
491 })
492 e.OptionalEvent(&format.Event{
493 Name: a.Name,
494 Phase: "t",
495 TID: a.ToResource,
496 PID: sectionID,
497 ID: e.arrowSeq,
498 Time: viewerTime(a.End),
499 })
500 }
501
502 type ArrowEvent struct {
503 Name []byte
504 Start time.Duration
505 End time.Duration
506 FromResource uint64
507 FromStack int
508 ToResource uint64
509 }
510
511 func (e *Emitter) Event(ev *format.Event) {
512 e.c.ConsumeViewerEvent(ev, true)
513 }
514
515 func (e *Emitter) HeapAlloc(ts time.Duration, v uint64) {
516 e.heapStats.heapAlloc = v
517 e.emitHeapCounters(ts)
518 }
519
520 func (e *Emitter) Focus(id uint64) {
521 e.focusResource = id
522 }
523
524 func (e *Emitter) GoroutineTransition(ts time.Duration, from, to GState) {
525 e.gstates[from]--
526 e.gstates[to]++
527 if e.prevGstates == e.gstates {
528 return
529 }
530 if e.tsWithinRange(ts) {
531 e.OptionalEvent(&format.Event{
532 Name: "Goroutines",
533 Phase: "C",
534 Time: viewerTime(ts),
535 PID: 1,
536 Arg: &format.GoroutineCountersArg{
537 Running: uint64(e.gstates[GRunning]),
538 Runnable: uint64(e.gstates[GRunnable]),
539 GCWaiting: uint64(e.gstates[GWaitingGC]),
540 },
541 })
542 }
543 e.prevGstates = e.gstates
544 }
545
546 func (e *Emitter) IncThreadStateCount(ts time.Duration, state ThreadState, delta int64) {
547 e.threadStats[state] += delta
548 if e.prevThreadStats == e.threadStats {
549 return
550 }
551 if e.tsWithinRange(ts) {
552 e.OptionalEvent(&format.Event{
553 Name: "Threads",
554 Phase: "C",
555 Time: viewerTime(ts),
556 PID: 1,
557 Arg: &format.ThreadCountersArg{
558 Running: int64(e.threadStats[ThreadStateRunning]),
559 InSyscall: int64(e.threadStats[ThreadStateInSyscall]),
560 // TODO(mknyszek): Why is InSyscallRuntime not included here?
561 },
562 })
563 }
564 e.prevThreadStats = e.threadStats
565 }
566
567 func (e *Emitter) HeapGoal(ts time.Duration, v uint64) {
568 // This cutoff at 1 PiB is a Workaround for https://github.com/golang/go/issues/63864.
569 //
570 // TODO(mknyszek): Remove this once the problem has been fixed.
571 const PB = 1 << 50
572 if v > PB {
573 v = 0
574 }
575 e.heapStats.nextGC = v
576 e.emitHeapCounters(ts)
577 }
578
579 func (e *Emitter) emitHeapCounters(ts time.Duration) {
580 if e.prevHeapStats == e.heapStats {
581 return
582 }
583 diff := uint64(0)
584 if e.heapStats.nextGC > e.heapStats.heapAlloc {
585 diff = e.heapStats.nextGC - e.heapStats.heapAlloc
586 }
587 if e.tsWithinRange(ts) {
588 e.OptionalEvent(&format.Event{
589 Name: "Heap",
590 Phase: "C",
591 Time: viewerTime(ts),
592 PID: 1,
593 Arg: &format.HeapCountersArg{Allocated: e.heapStats.heapAlloc, NextGC: diff},
594 })
595 }
596 e.prevHeapStats = e.heapStats
597 }
598
599 // Err returns an error if the emitter is in an invalid state.
600 func (e *Emitter) Err() error {
601 if e.gstates[GRunnable] < 0 || e.gstates[GRunning] < 0 || e.threadStats[ThreadStateInSyscall] < 0 || e.threadStats[ThreadStateInSyscallRuntime] < 0 {
602 return fmt.Errorf(
603 "runnable=%d running=%d insyscall=%d insyscallRuntime=%d",
604 e.gstates[GRunnable],
605 e.gstates[GRunning],
606 e.threadStats[ThreadStateInSyscall],
607 e.threadStats[ThreadStateInSyscallRuntime],
608 )
609 }
610 return nil
611 }
612
613 func (e *Emitter) tsWithinRange(ts time.Duration) bool {
614 return e.rangeStart <= ts && ts <= e.rangeEnd
615 }
616
617 // OptionalEvent emits ev if it's within the time range of the consumer, i.e.
618 // the selected trace split range.
619 func (e *Emitter) OptionalEvent(ev *format.Event) {
620 e.c.ConsumeViewerEvent(ev, false)
621 }
622
623 func (e *Emitter) Flush() {
624 e.processMeta(format.StatsSection, "STATS", 0)
625
626 if len(e.tasks) != 0 {
627 e.processMeta(format.TasksSection, "TASKS", 1)
628 }
629 for id, task := range e.tasks {
630 e.threadMeta(format.TasksSection, id, task.name, task.sortIndex)
631 }
632
633 e.processMeta(format.ProcsSection, e.resourceType, 2)
634
635 e.threadMeta(format.ProcsSection, GCP, "GC", -6)
636 e.threadMeta(format.ProcsSection, NetpollP, "Network", -5)
637 e.threadMeta(format.ProcsSection, TimerP, "Timers", -4)
638 e.threadMeta(format.ProcsSection, SyscallP, "Syscalls", -3)
639
640 for id, name := range e.resources {
641 priority := int(id)
642 if e.focusResource != 0 && id == e.focusResource {
643 // Put the focus goroutine on top.
644 priority = -2
645 }
646 e.threadMeta(format.ProcsSection, id, name, priority)
647 }
648
649 e.c.Flush()
650 }
651
652 func (e *Emitter) threadMeta(sectionID, tid uint64, name []byte, priority int) {
653 e.Event(&format.Event{
654 Name: "thread_name",
655 Phase: "M",
656 PID: sectionID,
657 TID: tid,
658 Arg: &format.NameArg{Name: name},
659 })
660 e.Event(&format.Event{
661 Name: "thread_sort_index",
662 Phase: "M",
663 PID: sectionID,
664 TID: tid,
665 Arg: &format.SortIndexArg{Index: priority},
666 })
667 }
668
669 func (e *Emitter) processMeta(sectionID uint64, name []byte, priority int) {
670 e.Event(&format.Event{
671 Name: "process_name",
672 Phase: "M",
673 PID: sectionID,
674 Arg: &format.NameArg{Name: name},
675 })
676 e.Event(&format.Event{
677 Name: "process_sort_index",
678 Phase: "M",
679 PID: sectionID,
680 Arg: &format.SortIndexArg{Index: priority},
681 })
682 }
683
684 // Stack emits the given frames and returns a unique id for the stack. No
685 // pointers to the given data are being retained beyond the call to Stack.
686 func (e *Emitter) Stack(stk []trace.StackFrame) int {
687 return e.buildBranch(e.frameTree, stk)
688 }
689
690 // buildBranch builds one branch in the prefix tree rooted at ctx.frameTree.
691 func (e *Emitter) buildBranch(parent frameNode, stk []trace.StackFrame) int {
692 if len(stk) == 0 {
693 return parent.id
694 }
695 last := len(stk) - 1
696 frame := stk[last]
697 stk = stk[:last]
698
699 node, ok := parent.children[frame.PC]
700 if !ok {
701 e.frameSeq++
702 node.id = e.frameSeq
703 node.children = map[uint64]frameNode{}
704 parent.children[frame.PC] = node
705 e.c.ConsumeViewerFrame(strconv.Itoa(node.id), format.Frame{Name: fmt.Sprintf("%v:%v", frame.Func, frame.Line), Parent: parent.id})
706 }
707 return e.buildBranch(node, stk)
708 }
709
710 type heapStats struct {
711 heapAlloc uint64
712 nextGC uint64
713 }
714
715 func viewerTime(t time.Duration) float64 {
716 return float64(t) / float64(time.Microsecond)
717 }
718
719 type GState int
720
721 const (
722 GDead GState = iota
723 GRunnable
724 GRunning
725 GWaiting
726 GWaitingGC
727
728 gStateCount
729 )
730
731 type ThreadState int
732
733 const (
734 ThreadStateInSyscall ThreadState = iota
735 ThreadStateInSyscallRuntime
736 ThreadStateRunning
737
738 threadStateCount
739 )
740
741 type frameNode struct {
742 id int
743 children map[uint64]frameNode
744 }
745
746 // Mapping from more reasonable color names to the reserved color names in
747 // https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50
748 // The chrome trace viewer allows only those as cname values.
749 const (
750 colorLightMauve = "thread_state_uninterruptible" // 182, 125, 143
751 colorOrange = "thread_state_iowait" // 255, 140, 0
752 colorSeafoamGreen = "thread_state_running" // 126, 200, 148
753 colorVistaBlue = "thread_state_runnable" // 133, 160, 210
754 colorTan = "thread_state_unknown" // 199, 155, 125
755 colorIrisBlue = "background_memory_dump" // 0, 180, 180
756 colorMidnightBlue = "light_memory_dump" // 0, 0, 180
757 colorDeepMagenta = "detailed_memory_dump" // 180, 0, 180
758 colorBlue = "vsync_highlight_color" // 0, 0, 255
759 colorGrey = "generic_work" // 125, 125, 125
760 colorGreen = "good" // 0, 125, 0
761 colorDarkGoldenrod = "bad" // 180, 125, 0
762 colorPeach = "terrible" // 180, 0, 0
763 colorBlack = "black" // 0, 0, 0
764 colorLightGrey = "grey" // 221, 221, 221
765 colorWhite = "white" // 255, 255, 255
766 colorYellow = "yellow" // 255, 255, 0
767 colorOlive = "olive" // 100, 100, 0
768 colorCornflowerBlue = "rail_response" // 67, 135, 253
769 colorSunsetOrange = "rail_animation" // 244, 74, 63
770 colorTangerine = "rail_idle" // 238, 142, 0
771 colorShamrockGreen = "rail_load" // 13, 168, 97
772 colorGreenishYellow = "startup" // 230, 230, 0
773 colorDarkGrey = "heap_dump_stack_frame" // 128, 128, 128
774 colorTawny = "heap_dump_child_node_arrow" // 204, 102, 0
775 colorLemon = "cq_build_running" // 255, 255, 119
776 colorLime = "cq_build_passed" // 153, 238, 102
777 colorPink = "cq_build_failed" // 238, 136, 136
778 colorSilver = "cq_build_abandoned" // 187, 187, 187
779 colorManzGreen = "cq_build_attempt_runnig" // 222, 222, 75
780 colorKellyGreen = "cq_build_attempt_passed" // 108, 218, 35
781 colorAnotherGrey = "cq_build_attempt_failed" // 187, 187, 187
782 )
783
784 var colorForTask = [][]byte{
785 colorLightMauve,
786 colorOrange,
787 colorSeafoamGreen,
788 colorVistaBlue,
789 colorTan,
790 colorMidnightBlue,
791 colorIrisBlue,
792 colorDeepMagenta,
793 colorGreen,
794 colorDarkGoldenrod,
795 colorPeach,
796 colorOlive,
797 colorCornflowerBlue,
798 colorSunsetOrange,
799 colorTangerine,
800 colorShamrockGreen,
801 colorTawny,
802 colorLemon,
803 colorLime,
804 colorPink,
805 colorSilver,
806 colorManzGreen,
807 colorKellyGreen,
808 }
809
810 func pickTaskColor(id uint64) []byte {
811 idx := id % uint64(len(colorForTask))
812 return colorForTask[idx]
813 }
814