Source file src/runtime/sema_test.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	. "runtime"
    10  	"sync"
    11  	"sync/atomic"
    12  	"testing"
    13  )
    14  
    15  // TestSemaHandoff checks that when semrelease+handoff is
    16  // requested, the G that releases the semaphore yields its
    17  // P directly to the first waiter in line.
    18  // See issue 33747 for discussion.
    19  func TestSemaHandoff(t *testing.T) {
    20  	const iter = 10000
    21  	ok := 0
    22  	for i := 0; i < iter; i++ {
    23  		if testSemaHandoff() {
    24  			ok++
    25  		}
    26  	}
    27  	// As long as two thirds of handoffs are direct, we
    28  	// consider the test successful. The scheduler is
    29  	// nondeterministic, so this test checks that we get the
    30  	// desired outcome in a significant majority of cases.
    31  	// The actual ratio of direct handoffs is much higher
    32  	// (>90%) but we use a lower threshold to minimize the
    33  	// chances that unrelated changes in the runtime will
    34  	// cause the test to fail or become flaky.
    35  	if ok < iter*2/3 {
    36  		t.Fatal("direct handoff < 2/3:", ok, iter)
    37  	}
    38  }
    39  
    40  func TestSemaHandoff1(t *testing.T) {
    41  	if GOMAXPROCS(-1) <= 1 {
    42  		t.Skip("GOMAXPROCS <= 1")
    43  	}
    44  	defer GOMAXPROCS(GOMAXPROCS(-1))
    45  	GOMAXPROCS(1)
    46  	TestSemaHandoff(t)
    47  }
    48  
    49  func TestSemaHandoff2(t *testing.T) {
    50  	if GOMAXPROCS(-1) <= 2 {
    51  		t.Skip("GOMAXPROCS <= 2")
    52  	}
    53  	defer GOMAXPROCS(GOMAXPROCS(-1))
    54  	GOMAXPROCS(2)
    55  	TestSemaHandoff(t)
    56  }
    57  
    58  func testSemaHandoff() bool {
    59  	var sema, res uint32
    60  	done := make(chan struct{})
    61  
    62  	// We're testing that the current goroutine is able to yield its time slice
    63  	// to another goroutine. Stop the current goroutine from migrating to
    64  	// another CPU where it can win the race (and appear to have not yielded) by
    65  	// keeping the CPUs slightly busy.
    66  	var wg sync.WaitGroup
    67  	for i := 0; i < GOMAXPROCS(-1); i++ {
    68  		wg.Add(1)
    69  		go func() {
    70  			defer wg.Done()
    71  			for {
    72  				select {
    73  				case <-done:
    74  					return
    75  				default:
    76  				}
    77  				Gosched()
    78  			}
    79  		}()
    80  	}
    81  
    82  	wg.Add(1)
    83  	go func() {
    84  		defer wg.Done()
    85  		Semacquire(&sema)
    86  		atomic.CompareAndSwapUint32(&res, 0, 1)
    87  
    88  		Semrelease1(&sema, true, 0)
    89  		close(done)
    90  	}()
    91  	for SemNwait(&sema) == 0 {
    92  		Gosched() // wait for goroutine to block in Semacquire
    93  	}
    94  
    95  	// The crux of the test: we release the semaphore with handoff
    96  	// and immediately perform a CAS both here and in the waiter; we
    97  	// want the CAS in the waiter to execute first.
    98  	Semrelease1(&sema, true, 0)
    99  	atomic.CompareAndSwapUint32(&res, 0, 2)
   100  
   101  	wg.Wait() // wait for goroutines to finish to avoid data races
   102  
   103  	return res == 1 // did the waiter run first?
   104  }
   105  
   106  func BenchmarkSemTable(b *testing.B) {
   107  	for _, n := range []int{1000, 2000, 4000, 8000} {
   108  		b.Run(fmt.Sprintf("OneAddrCollision/n=%d", n), func(b *testing.B) {
   109  			tab := Escape(new(SemTable))
   110  			u := make([]uint32, SemTableSize+1)
   111  
   112  			b.ResetTimer()
   113  
   114  			for j := 0; j < b.N; j++ {
   115  				// Simulate two locks colliding on the same semaRoot.
   116  				//
   117  				// Specifically enqueue all the waiters for the first lock,
   118  				// then all the waiters for the second lock.
   119  				//
   120  				// Then, dequeue all the waiters from the first lock, then
   121  				// the second.
   122  				//
   123  				// Each enqueue/dequeue operation should be O(1), because
   124  				// there are exactly 2 locks. This could be O(n) if all
   125  				// the waiters for both locks are on the same list, as it
   126  				// once was.
   127  				for i := 0; i < n; i++ {
   128  					if i < n/2 {
   129  						tab.Enqueue(&u[0])
   130  					} else {
   131  						tab.Enqueue(&u[SemTableSize])
   132  					}
   133  				}
   134  				for i := 0; i < n; i++ {
   135  					var ok bool
   136  					if i < n/2 {
   137  						ok = tab.Dequeue(&u[0])
   138  					} else {
   139  						ok = tab.Dequeue(&u[SemTableSize])
   140  					}
   141  					if !ok {
   142  						b.Fatal("failed to dequeue")
   143  					}
   144  				}
   145  			}
   146  		})
   147  		b.Run(fmt.Sprintf("ManyAddrCollision/n=%d", n), func(b *testing.B) {
   148  			tab := Escape(new(SemTable))
   149  			u := make([]uint32, n*SemTableSize)
   150  
   151  			b.ResetTimer()
   152  
   153  			for j := 0; j < b.N; j++ {
   154  				// Simulate n locks colliding on the same semaRoot.
   155  				//
   156  				// Each enqueue/dequeue operation should be O(log n), because
   157  				// each semaRoot is a tree. This could be O(n) if it was
   158  				// some simpler data structure.
   159  				for i := 0; i < n; i++ {
   160  					tab.Enqueue(&u[i*SemTableSize])
   161  				}
   162  				for i := 0; i < n; i++ {
   163  					if !tab.Dequeue(&u[i*SemTableSize]) {
   164  						b.Fatal("failed to dequeue")
   165  					}
   166  				}
   167  			}
   168  		})
   169  	}
   170  }
   171  

View as plain text