queue.mx raw

   1  package task
   2  
   3  import "runtime/interrupt"
   4  
   5  const asserts = false
   6  
   7  // Queue is a FIFO container of tasks.
   8  // The zero value is an empty queue.
   9  type Queue struct {
  10  	head, tail *Task
  11  }
  12  
  13  // Push a task onto the queue.
  14  func (q *Queue) Push(t *Task) {
  15  	mask := lockAtomics()
  16  	if asserts && t.Next != nil {
  17  		unlockAtomics(mask)
  18  		panic("runtime: pushing a task to a queue with a non-nil Next pointer")
  19  	}
  20  	if q.tail != nil {
  21  		q.tail.Next = t
  22  	}
  23  	q.tail = t
  24  	t.Next = nil
  25  	if q.head == nil {
  26  		q.head = t
  27  	}
  28  	unlockAtomics(mask)
  29  }
  30  
  31  // Pop a task off of the queue.
  32  func (q *Queue) Pop() *Task {
  33  	mask := lockAtomics()
  34  	t := q.head
  35  	if t == nil {
  36  		unlockAtomics(mask)
  37  		return nil
  38  	}
  39  	q.head = t.Next
  40  	if q.tail == t {
  41  		q.tail = nil
  42  	}
  43  	t.Next = nil
  44  	unlockAtomics(mask)
  45  	return t
  46  }
  47  
  48  // Append pops the contents of another queue and pushes them onto the end of this queue.
  49  func (q *Queue) Append(other *Queue) {
  50  	mask := lockAtomics()
  51  	if q.head == nil {
  52  		q.head = other.head
  53  	} else {
  54  		q.tail.Next = other.head
  55  	}
  56  	q.tail = other.tail
  57  	other.head, other.tail = nil, nil
  58  	unlockAtomics(mask)
  59  }
  60  
  61  // Empty checks if the queue is empty.
  62  func (q *Queue) Empty() bool {
  63  	mask := lockAtomics()
  64  	empty := q.head == nil
  65  	unlockAtomics(mask)
  66  	return empty
  67  }
  68  
  69  // Stack is a LIFO container of tasks.
  70  // The zero value is an empty stack.
  71  // This is slightly cheaper than a queue, so it can be preferable when strict ordering is not necessary.
  72  type Stack struct {
  73  	top *Task
  74  }
  75  
  76  // Push a task onto the stack.
  77  func (s *Stack) Push(t *Task) {
  78  	mask := lockAtomics()
  79  	if asserts && t.Next != nil {
  80  		unlockAtomics(mask)
  81  		panic("runtime: pushing a task to a stack with a non-nil Next pointer")
  82  	}
  83  	s.top, t.Next = t, s.top
  84  	unlockAtomics(mask)
  85  }
  86  
  87  // Pop a task off of the stack.
  88  func (s *Stack) Pop() *Task {
  89  	mask := lockAtomics()
  90  	t := s.top
  91  	if t != nil {
  92  		s.top = t.Next
  93  		t.Next = nil
  94  	}
  95  	unlockAtomics(mask)
  96  	return t
  97  }
  98  
  99  // tail follows the chain of tasks.
 100  // If t is nil, returns nil.
 101  // Otherwise, returns the task in the chain where the Next field is nil.
 102  func (t *Task) tail() *Task {
 103  	if t == nil {
 104  		return nil
 105  	}
 106  	for t.Next != nil {
 107  		t = t.Next
 108  	}
 109  	return t
 110  }
 111  
 112  // Queue moves the contents of the stack into a queue.
 113  // Elements can be popped from the queue in the same order that they would be popped from the stack.
 114  func (s *Stack) Queue() Queue {
 115  	mask := lockAtomics()
 116  	head := s.top
 117  	s.top = nil
 118  	q := Queue{
 119  		head: head,
 120  		tail: head.tail(),
 121  	}
 122  	unlockAtomics(mask)
 123  	return q
 124  }
 125  
 126  // Use runtime.lockAtomics and runtime.unlockAtomics so that Queue and Stack
 127  // work correctly even on multicore systems. These functions are normally used
 128  // to implement atomic operations, but the same spinlock can also be used for
 129  // Queue/Stack operations which are very fast.
 130  // These functions are just plain old interrupt disable/restore on non-multicore
 131  // systems.
 132  
 133  //go:linkname lockAtomics runtime.lockAtomics
 134  func lockAtomics() interrupt.State
 135  
 136  //go:linkname unlockAtomics runtime.unlockAtomics
 137  func unlockAtomics(mask interrupt.State)
 138