- Notifications
You must be signed in to change notification settings - Fork1k
feat: improve coder connect tunnel handling on reconnect#17598
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.
Already on GitHub?Sign in to your account
Uh oh!
There was an error while loading.Please reload this page.
Changes fromall commits
52f1c2b
288c33e
0de5df2
2b77b8e
3d39713
ca2e1bf
5dec731
ae34934
101702d
0d2ecb2
File filter
Filter by extension
Conversations
Uh oh!
There was an error while loading.Please reload this page.
Jump to
Uh oh!
There was an error while loading.Please reload this page.
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -88,6 +88,7 @@ func NewTunnel( | ||
netLoopDone: make(chan struct{}), | ||
uSendCh: s.sendCh, | ||
agents: map[uuid.UUID]tailnet.Agent{}, | ||
workspaces: map[uuid.UUID]tailnet.Workspace{}, | ||
clock: quartz.NewReal(), | ||
}, | ||
} | ||
@@ -347,7 +348,9 @@ type updater struct { | ||
uSendCh chan<- *TunnelMessage | ||
// agents contains the agents that are currently connected to the tunnel. | ||
agents map[uuid.UUID]tailnet.Agent | ||
// workspaces contains the workspaces to which agents are currently connected via the tunnel. | ||
workspaces map[uuid.UUID]tailnet.Workspace | ||
conn Conn | ||
clock quartz.Clock | ||
} | ||
@@ -397,14 +400,32 @@ func (u *updater) sendUpdateResponse(req *request[*TunnelMessage, *ManagerMessag | ||
// createPeerUpdateLocked creates a PeerUpdate message from a workspace update, populating | ||
// the network status of the agents. | ||
func (u *updater) createPeerUpdateLocked(update tailnet.WorkspaceUpdate) *PeerUpdate { | ||
// if the update is a snapshot, we need to process the full state | ||
if update.Kind == tailnet.Snapshot { | ||
processSnapshotUpdate(&update, u.agents, u.workspaces) | ||
} | ||
out := &PeerUpdate{ | ||
UpsertedWorkspaces: make([]*Workspace, len(update.UpsertedWorkspaces)), | ||
UpsertedAgents: make([]*Agent, len(update.UpsertedAgents)), | ||
DeletedWorkspaces: make([]*Workspace, len(update.DeletedWorkspaces)), | ||
DeletedAgents: make([]*Agent, len(update.DeletedAgents)), | ||
} | ||
// save the workspace update to the tunnel's state, such that it can | ||
// be used to populate automated peer updates. | ||
for _, agent := range update.UpsertedAgents { | ||
u.agents[agent.ID] = agent.Clone() | ||
} | ||
for _, agent := range update.DeletedAgents { | ||
delete(u.agents, agent.ID) | ||
} | ||
for _, workspace := range update.UpsertedWorkspaces { | ||
u.workspaces[workspace.ID] = workspace.Clone() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. We don't actually need a full clone of the workspace, but it's probably clearer to do the legit Clone so we don't have a partially cloned object waiting to trip up some future editor of this code. | ||
} | ||
for _, workspace := range update.DeletedWorkspaces { | ||
delete(u.workspaces, workspace.ID) | ||
} | ||
for i, ws := range update.UpsertedWorkspaces { | ||
out.UpsertedWorkspaces[i] = &Workspace{ | ||
@@ -413,6 +434,7 @@ func (u *updater) createPeerUpdateLocked(update tailnet.WorkspaceUpdate) *PeerUp | ||
Status: Workspace_Status(ws.Status), | ||
} | ||
} | ||
upsertedAgents := u.convertAgentsLocked(update.UpsertedAgents) | ||
out.UpsertedAgents = upsertedAgents | ||
for i, ws := range update.DeletedWorkspaces { | ||
@@ -472,17 +494,6 @@ func (u *updater) convertAgentsLocked(agents []*tailnet.Agent) []*Agent { | ||
return out | ||
} | ||
// setConn sets the `conn` and returns false if there's already a connection set. | ||
func (u *updater) setConn(conn Conn) bool { | ||
u.mu.Lock() | ||
@@ -552,6 +563,46 @@ func (u *updater) netStatusLoop() { | ||
} | ||
} | ||
// processSnapshotUpdate handles the logic when a full state update is received. | ||
// When the tunnel is live, we only receive diffs, but the first packet on any given | ||
// reconnect to the tailnet API is a full state. | ||
// Without this logic we weren't processing deletes for any workspaces or agents deleted | ||
// while the client was disconnected while the computer was asleep. | ||
func processSnapshotUpdate(update *tailnet.WorkspaceUpdate, agents map[uuid.UUID]tailnet.Agent, workspaces map[uuid.UUID]tailnet.Workspace) { | ||
// ignoredWorkspaces is initially populated with the workspaces that are | ||
// in the current update. Later on we populate it with the deleted workspaces too | ||
// so that we don't send duplicate updates. Same applies to ignoredAgents. | ||
ignoredWorkspaces := make(map[uuid.UUID]struct{}, len(update.UpsertedWorkspaces)) | ||
ignoredAgents := make(map[uuid.UUID]struct{}, len(update.UpsertedAgents)) | ||
for _, workspace := range update.UpsertedWorkspaces { | ||
ignoredWorkspaces[workspace.ID] = struct{}{} | ||
} | ||
for _, agent := range update.UpsertedAgents { | ||
ignoredAgents[agent.ID] = struct{}{} | ||
} | ||
for _, agent := range agents { | ||
if _, present := ignoredAgents[agent.ID]; !present { | ||
// delete any current agents that are not in the new update | ||
update.DeletedAgents = append(update.DeletedAgents, &tailnet.Agent{ | ||
ID: agent.ID, | ||
Name: agent.Name, | ||
WorkspaceID: agent.WorkspaceID, | ||
}) | ||
} | ||
} | ||
for _, workspace := range workspaces { | ||
if _, present := ignoredWorkspaces[workspace.ID]; !present { | ||
update.DeletedWorkspaces = append(update.DeletedWorkspaces, &tailnet.Workspace{ | ||
ID: workspace.ID, | ||
Name: workspace.Name, | ||
Status: workspace.Status, | ||
}) | ||
ignoredWorkspaces[workspace.ID] = struct{}{} | ||
} | ||
} | ||
} | ||
// hostsToIPStrings returns a slice of all unique IP addresses in the values | ||
// of the given map. | ||
func hostsToIPStrings(hosts map[dnsname.FQDN][]netip.Addr) []string { | ||
Uh oh!
There was an error while loading.Please reload this page.
Uh oh!
There was an error while loading.Please reload this page.