Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

fix(coderd): ensure agent WebSocket conn is cleaned up (#19711)#20094

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
stirby merged 1 commit intorelease/2.26fromcj/cherry-2.26/gh-19711
Oct 1, 2025
Merged
Show file tree
Hide file tree
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 8 additions & 5 deletionscoderd/workspaceagents.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -817,12 +817,13 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
var (
ctx = r.Context()
workspaceAgent = httpmw.WorkspaceAgentParam(r)
logger = api.Logger.Named("agent_container_watcher").With(slog.F("agent_id", workspaceAgent.ID))
)

// If the agent is unreachable, the request will hang. Assume that if we
// don't get a response after 30s that the agent is unreachable.
dialCtx,cancel := context.WithTimeout(ctx, 30*time.Second)
defercancel()
dialCtx,dialCancel := context.WithTimeout(ctx, 30*time.Second)
deferdialCancel()
apiAgent, err := db2sdk.WorkspaceAgent(
api.DERPMap(),
*api.TailnetCoordinator.Load(),
Expand DownExpand Up@@ -857,8 +858,7 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
}
defer release()

watcherLogger := api.Logger.Named("agent_container_watcher").With(slog.F("agent_id", workspaceAgent.ID))
containersCh, closer, err := agentConn.WatchContainers(ctx, watcherLogger)
containersCh, closer, err := agentConn.WatchContainers(ctx, logger)
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error watching agent's containers.",
Expand All@@ -877,14 +877,17 @@ func (api *API) watchWorkspaceAgentContainers(rw http.ResponseWriter, r *http.Re
return
}

ctx, cancel := context.WithCancel(r.Context())
defer cancel()

// Here we close the websocket for reading, so that the websocket library will handle pings and
// close frames.
_ = conn.CloseRead(context.Background())

ctx, wsNetConn := codersdk.WebsocketNetConn(ctx, conn, websocket.MessageText)
defer wsNetConn.Close()

go httpapi.Heartbeat(ctx, conn)
go httpapi.HeartbeatClose(ctx, logger, cancel, conn)

encoder := json.NewEncoder(wsNetConn)

Expand Down
137 changes: 136 additions & 1 deletioncoderd/workspaceagents_internal_test.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -59,10 +59,145 @@ func (fakeAgentProvider) Close() error {
return nil
}

type channelCloser struct {
closeFn func()
}

func (c *channelCloser) Close() error {
c.closeFn()
return nil
}

func TestWatchAgentContainers(t *testing.T) {
t.Parallel()

t.Run("WebSocketClosesProperly", func(t *testing.T) {
t.Run("CoderdWebSocketCanHandleClientClosing", func(t *testing.T) {
t.Parallel()

// This test ensures that the agent containers `/watch` websocket can gracefully
// handle the client websocket closing. This test was created in
// response to this issue: https://github.com/coder/coder/issues/19449

var (
ctx = testutil.Context(t, testutil.WaitLong)
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug).Named("coderd")

mCtrl = gomock.NewController(t)
mDB = dbmock.NewMockStore(mCtrl)
mCoordinator = tailnettest.NewMockCoordinator(mCtrl)
mAgentConn = agentconnmock.NewMockAgentConn(mCtrl)

fAgentProvider = fakeAgentProvider{
agentConn: func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) {
return mAgentConn, func() {}, nil
},
}

workspaceID = uuid.New()
agentID = uuid.New()
resourceID = uuid.New()
jobID = uuid.New()
buildID = uuid.New()

containersCh = make(chan codersdk.WorkspaceAgentListContainersResponse)

r = chi.NewMux()

api = API{
ctx: ctx,
Options: &Options{
AgentInactiveDisconnectTimeout: testutil.WaitShort,
Database: mDB,
Logger: logger,
DeploymentValues: &codersdk.DeploymentValues{},
TailnetCoordinator: tailnettest.NewFakeCoordinator(),
},
}
)

var tailnetCoordinator tailnet.Coordinator = mCoordinator
api.TailnetCoordinator.Store(&tailnetCoordinator)
api.agentProvider = fAgentProvider

// Setup: Allow `ExtractWorkspaceAgentParams` to complete.
mDB.EXPECT().GetWorkspaceAgentByID(gomock.Any(), agentID).Return(database.WorkspaceAgent{
ID: agentID,
ResourceID: resourceID,
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
FirstConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
LastConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
}, nil)
mDB.EXPECT().GetWorkspaceResourceByID(gomock.Any(), resourceID).Return(database.WorkspaceResource{
ID: resourceID,
JobID: jobID,
}, nil)
mDB.EXPECT().GetProvisionerJobByID(gomock.Any(), jobID).Return(database.ProvisionerJob{
ID: jobID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
}, nil)
mDB.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), jobID).Return(database.WorkspaceBuild{
WorkspaceID: workspaceID,
ID: buildID,
}, nil)

// And: Allow `db2dsk.WorkspaceAgent` to complete.
mCoordinator.EXPECT().Node(gomock.Any()).Return(nil)

// And: Allow `WatchContainers` to be called, returing our `containersCh` channel.
mAgentConn.EXPECT().WatchContainers(gomock.Any(), gomock.Any()).
DoAndReturn(func(_ context.Context, _ slog.Logger) (<-chan codersdk.WorkspaceAgentListContainersResponse, io.Closer, error) {
return containersCh, &channelCloser{closeFn: func() {
close(containersCh)
}}, nil
})

// And: We mount the HTTP Handler
r.With(httpmw.ExtractWorkspaceAgentParam(mDB)).
Get("/workspaceagents/{workspaceagent}/containers/watch", api.watchWorkspaceAgentContainers)

// Given: We create the HTTP server
srv := httptest.NewServer(r)
defer srv.Close()

// And: Dial the WebSocket
wsURL := strings.Replace(srv.URL, "http://", "ws://", 1)
conn, resp, err := websocket.Dial(ctx, fmt.Sprintf("%s/workspaceagents/%s/containers/watch", wsURL, agentID), nil)
require.NoError(t, err)
if resp.Body != nil {
defer resp.Body.Close()
}

// And: Create a streaming decoder
decoder := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger)
defer decoder.Close()
decodeCh := decoder.Chan()

// And: We can successfully send through the channel.
testutil.RequireSend(ctx, t, containersCh, codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{{
ID: "test-container-id",
}},
})

// And: Receive the data.
containerResp := testutil.RequireReceive(ctx, t, decodeCh)
require.Len(t, containerResp.Containers, 1)
require.Equal(t, "test-container-id", containerResp.Containers[0].ID)

// When: We close the WebSocket
conn.Close(websocket.StatusNormalClosure, "test closing connection")

// Then: We expect `containersCh` to be closed.
select {
case <-ctx.Done():
t.Fail()

case _, ok := <-containersCh:
require.False(t, ok, "channel is expected to be closed")
}
})

t.Run("CoderdWebSocketCanHandleAgentClosing", func(t *testing.T) {
t.Parallel()

// This test ensures that the agent containers `/watch` websocket can gracefully
Expand Down
Loading

[8]ページ先頭

©2009-2025 Movatter.jp