add lock around saving ts clients

Closes #1544

Co-Authored-By: Patrick Huang <huangxiaoman@gmail.com>
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-09-10 09:17:17 +02:00 committed by Kristoffer Dalby
parent 4c12c02e71
commit 9ccf87c566
2 changed files with 7 additions and 3 deletions

View File

@ -197,7 +197,9 @@ func (s *EmbeddedDERPServerScenario) CreateTailscaleIsolatedNodesInUser(
)
}
s.mu.Lock()
user.Clients[tsClient.Hostname()] = tsClient
s.mu.Unlock()
return nil
})

View File

@ -112,7 +112,7 @@ type Scenario struct {
pool *dockertest.Pool
network *dockertest.Network
headscaleLock sync.Mutex
mu sync.Mutex
}
// NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with
@ -212,8 +212,8 @@ func (s *Scenario) Users() []string {
// will be return, otherwise a new instance will be created.
// TODO(kradalby): make port and headscale configurable, multiple instances support?
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
s.headscaleLock.Lock()
defer s.headscaleLock.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
if headscale, ok := s.controlServers.Load("headscale"); ok {
return headscale, nil
@ -326,7 +326,9 @@ func (s *Scenario) CreateTailscaleNodesInUser(
)
}
s.mu.Lock()
user.Clients[tsClient.Hostname()] = tsClient
s.mu.Unlock()
return nil
})