|
| 1 | +package common |
| 2 | + |
| 3 | +import ( |
| 4 | + "errors" |
| 5 | + "fmt" |
| 6 | + "net" |
| 7 | + "os" |
| 8 | + "strings" |
| 9 | + "sync" |
| 10 | + "testing" |
| 11 | + "time" |
| 12 | + |
| 13 | + "github.com/ory/dockertest/v3" |
| 14 | + dc "github.com/ory/dockertest/v3/docker" |
| 15 | +) |
| 16 | + |
| 17 | +// GetEnvOrDefault returns the value of an environment variable |
| 18 | +// or fallback value, if environment variable is undefined or empty. |
| 19 | +func GetEnvOrDefault(key, fallback string) string { |
| 20 | + value := os.Getenv(key) |
| 21 | + if len(value) == 0 { |
| 22 | + return fallback |
| 23 | + } |
| 24 | + return value |
| 25 | +} |
| 26 | + |
| 27 | +// -- Random port supplier. |
| 28 | + |
| 29 | +// RandomPortSupplier wraps the functionality for random port handling in tests. |
| 30 | +type RandomPortSupplier interface { |
| 31 | + Cleanup() |
| 32 | + Discover() error |
| 33 | + DiscoveredHost() (string, bool) |
| 34 | + DiscoveredPort() (string, bool) |
| 35 | +} |
| 36 | + |
| 37 | +type listenerPortSupplier struct { |
| 38 | + closed bool |
| 39 | + discovered bool |
| 40 | + discoveredHost string |
| 41 | + discoveredPort string |
| 42 | + listener net.Listener |
| 43 | + lock *sync.Mutex |
| 44 | +} |
| 45 | + |
| 46 | +// NewRandomPortSupplier creates an initialized instance of a random port supplier. |
| 47 | +func NewRandomPortSupplier() (RandomPortSupplier, error) { |
| 48 | + listener, err := net.Listen("tcp", "127.0.0.1:0") |
| 49 | + if err != nil { |
| 50 | + return nil, err |
| 51 | + } |
| 52 | + return &listenerPortSupplier{ |
| 53 | + lock: &sync.Mutex{}, |
| 54 | + listener: listener, |
| 55 | + }, nil |
| 56 | +} |
| 57 | + |
| 58 | +func (l *listenerPortSupplier) Cleanup() { |
| 59 | + l.lock.Lock() |
| 60 | + defer l.lock.Unlock() |
| 61 | + if !l.closed { |
| 62 | + l.listener.Close() |
| 63 | + l.closed = true |
| 64 | + } |
| 65 | +} |
| 66 | + |
| 67 | +func (l *listenerPortSupplier) Discover() error { |
| 68 | + l.lock.Lock() |
| 69 | + defer l.lock.Unlock() |
| 70 | + if l.closed { |
| 71 | + return errors.New("was-closed") |
| 72 | + } |
| 73 | + host, port, err := net.SplitHostPort(l.listener.Addr().String()) |
| 74 | + if err != nil { |
| 75 | + return err |
| 76 | + } |
| 77 | + l.discoveredHost = host |
| 78 | + l.discoveredPort = port |
| 79 | + l.discovered = true |
| 80 | + return nil |
| 81 | +} |
| 82 | + |
| 83 | +func (l *listenerPortSupplier) DiscoveredHost() (string, bool) { |
| 84 | + l.lock.Lock() |
| 85 | + defer l.lock.Unlock() |
| 86 | + return l.discoveredHost, l.discovered |
| 87 | +} |
| 88 | + |
| 89 | +func (l *listenerPortSupplier) DiscoveredPort() (string, bool) { |
| 90 | + l.lock.Lock() |
| 91 | + defer l.lock.Unlock() |
| 92 | + return l.discoveredPort, l.discovered |
| 93 | +} |
| 94 | + |
| 95 | +// WaitForContainerExit0 waits for the container to exist with code 0. |
| 96 | +func WaitForContainerExit0(t *testing.T, pool *dockertest.Pool, containerID string) error { |
| 97 | + finalState := "not started" |
| 98 | + finalStatus := "" |
| 99 | + |
| 100 | + benchMigrateStart := time.Now() |
| 101 | + chanSuccess := make(chan struct{}, 1) |
| 102 | + chanError := make(chan error, 1) |
| 103 | + |
| 104 | + go func() { |
| 105 | + poolRetryErr := pool.Retry(func() error { |
| 106 | + containers, _ := pool.Client.ListContainers(dc.ListContainersOptions{All: true}) |
| 107 | + for _, container := range containers { |
| 108 | + if container.ID == containerID { |
| 109 | + time.Sleep(time.Millisecond * 50) |
| 110 | + if container.State == "running" { |
| 111 | + return errors.New("still running") |
| 112 | + } |
| 113 | + if container.State == "restarting" { |
| 114 | + t.Logf("container %s is restarting with status '%s'...", containerID, container.Status) |
| 115 | + time.Sleep(time.Second) |
| 116 | + continue |
| 117 | + } |
| 118 | + finalState = container.State |
| 119 | + finalStatus = container.Status |
| 120 | + return nil |
| 121 | + } |
| 122 | + } |
| 123 | + return errors.New("no container") |
| 124 | + }) |
| 125 | + if poolRetryErr == nil { |
| 126 | + close(chanSuccess) |
| 127 | + return |
| 128 | + } |
| 129 | + chanError <- poolRetryErr |
| 130 | + }() |
| 131 | + |
| 132 | + select { |
| 133 | + case <-chanSuccess: |
| 134 | + t.Logf("container %s finished successfully after: %s", containerID, time.Now().Sub(benchMigrateStart).String()) |
| 135 | + case receivedError := <-chanError: |
| 136 | + return receivedError |
| 137 | + case <-time.After(time.Second * 10): |
| 138 | + return fmt.Errorf("container %s complete within timeout", containerID) |
| 139 | + } |
| 140 | + |
| 141 | + if finalState != "exited" { |
| 142 | + return fmt.Errorf("expected container %s to be in state exited but received: '%s'", containerID, finalState) |
| 143 | + } |
| 144 | + // it was exited, ... |
| 145 | + if !strings.HasPrefix(strings.ToLower(finalStatus), "exited (0)") { |
| 146 | + return fmt.Errorf("expected container %s to exit with status 0, received full exit message: '%s'", containerID, finalStatus) |
| 147 | + } |
| 148 | + |
| 149 | + return nil |
| 150 | +} |
| 151 | + |
| 152 | +// CompareStringSlices cpmpares if two string slices have the same length and same values. |
| 153 | +func CompareStringSlices(t *testing.T, this, that []string) { |
| 154 | + if len(this) != len(that) { |
| 155 | + t.Fatalf("expected did not match received: '%v' vs '%v'", this, that) |
| 156 | + } |
| 157 | + for idx, exp := range this { |
| 158 | + if exp != that[idx] { |
| 159 | + t.Fatalf("expected did not match received at index %d: '%v' vs '%v'", idx, exp, that[idx]) |
| 160 | + } |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +// StringSlicesEqual compares two slices without failing the test. |
| 165 | +func StringSlicesEqual(t *testing.T, this, that []string) bool { |
| 166 | + if len(this) != len(that) { |
| 167 | + return false |
| 168 | + } |
| 169 | + for idx, exp := range this { |
| 170 | + if exp != that[idx] { |
| 171 | + return false |
| 172 | + } |
| 173 | + } |
| 174 | + return true |
| 175 | +} |
| 176 | + |
| 177 | +// -- |
| 178 | + |
| 179 | +// Eventually retries the request maximum number of tries and stops on success, or error. |
| 180 | +func Eventually(t *testing.T, maxTimes int, f func() error, messageLabels ...interface{}) { |
| 181 | + nTries := 0 |
| 182 | + for { |
| 183 | + if nTries > maxTimes { |
| 184 | + t.Fatal(append([]interface{}{fmt.Sprintf("Failed running block %d times", maxTimes)}, messageLabels...)...) |
| 185 | + } |
| 186 | + if err := f(); err != nil { |
| 187 | + nTries = nTries + 1 |
| 188 | + t.Log(append(messageLabels, err.Error())...) |
| 189 | + <-time.After(time.Second) |
| 190 | + continue |
| 191 | + } |
| 192 | + |
| 193 | + break |
| 194 | + } |
| 195 | +} |
0 commit comments