Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

refactor: convert workspacesdk.AgentConn to an interface#19392

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
DanielleMaywood merged 5 commits intomainfromdanielle/flake/payload-too-large
Aug 20, 2025
Merged
Show file tree
Hide file tree
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletionMakefile
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -636,7 +636,8 @@ GEN_FILES := \
coderd/database/pubsub/psmock/psmock.go \
agent/agentcontainers/acmock/acmock.go \
agent/agentcontainers/dcspec/dcspec_gen.go \
coderd/httpmw/loggermw/loggermock/loggermock.go
coderd/httpmw/loggermw/loggermock/loggermock.go \
codersdk/workspacesdk/agentconnmock/agentconnmock.go

# all gen targets should be added here and to gen/mark-fresh
gen: gen/db gen/golden-files $(GEN_FILES)
Expand DownExpand Up@@ -686,6 +687,7 @@ gen/mark-fresh:
agent/agentcontainers/acmock/acmock.go \
agent/agentcontainers/dcspec/dcspec_gen.go \
coderd/httpmw/loggermw/loggermock/loggermock.go \
codersdk/workspacesdk/agentconnmock/agentconnmock.go \
"

for file in $$files; do
Expand DownExpand Up@@ -729,6 +731,10 @@ coderd/httpmw/loggermw/loggermock/loggermock.go: coderd/httpmw/loggermw/logger.g
go generate ./coderd/httpmw/loggermw/loggermock/
touch "$@"

codersdk/workspacesdk/agentconnmock/agentconnmock.go: codersdk/workspacesdk/agentconn.go
go generate ./codersdk/workspacesdk/agentconnmock/
touch "$@"

agent/agentcontainers/dcspec/dcspec_gen.go: \
node_modules/.installed \
agent/agentcontainers/dcspec/devContainer.base.schema.json \
Expand Down
14 changes: 7 additions & 7 deletionsagent/agent_test.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -2750,9 +2750,9 @@ func TestAgent_Dial(t *testing.T) {

switch l.Addr().Network() {
case "tcp":
conn, err = agentConn.Conn.DialContextTCP(ctx, ipp)
conn, err = agentConn.TailnetConn().DialContextTCP(ctx, ipp)
case "udp":
conn, err = agentConn.Conn.DialContextUDP(ctx, ipp)
conn, err = agentConn.TailnetConn().DialContextUDP(ctx, ipp)
default:
t.Fatalf("unknown network: %s", l.Addr().Network())
}
Expand DownExpand Up@@ -2811,7 +2811,7 @@ func TestAgent_UpdatedDERP(t *testing.T) {
})

// Setup a client connection.
newClientConn := func(derpMap *tailcfg.DERPMap, name string)*workspacesdk.AgentConn {
newClientConn := func(derpMap *tailcfg.DERPMap, name string) workspacesdk.AgentConn {
conn, err := tailnet.NewConn(&tailnet.Options{
Addresses: []netip.Prefix{tailnet.TailscaleServicePrefix.RandomPrefix()},
DERPMap: derpMap,
Expand DownExpand Up@@ -2891,13 +2891,13 @@ func TestAgent_UpdatedDERP(t *testing.T) {

// Connect from a second client and make sure it uses the new DERP map.
conn2 := newClientConn(newDerpMap, "client2")
require.Equal(t, []int{2}, conn2.DERPMap().RegionIDs())
require.Equal(t, []int{2}, conn2.TailnetConn().DERPMap().RegionIDs())
t.Log("conn2 got the new DERPMap")

// If the first client gets a DERP map update, it should be able to
// reconnect just fine.
conn1.SetDERPMap(newDerpMap)
require.Equal(t, []int{2}, conn1.DERPMap().RegionIDs())
conn1.TailnetConn().SetDERPMap(newDerpMap)
require.Equal(t, []int{2}, conn1.TailnetConn().DERPMap().RegionIDs())
t.Log("set the new DERPMap on conn1")
ctx, cancel := context.WithTimeout(context.Background(), testutil.WaitLong)
defer cancel()
Expand DownExpand Up@@ -3264,7 +3264,7 @@ func setupSSHSessionOnPort(
}

func setupAgent(t testing.TB, metadata agentsdk.Manifest, ptyTimeout time.Duration, opts ...func(*agenttest.Client, *agent.Options)) (
*workspacesdk.AgentConn,
workspacesdk.AgentConn,
*agenttest.Client,
<-chan *proto.Stats,
afero.Fs,
Expand Down
4 changes: 2 additions & 2 deletionscli/ping.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -147,7 +147,7 @@ func (r *RootCmd) ping() *serpent.Command {
}
defer conn.Close()

derpMap := conn.DERPMap()
derpMap := conn.TailnetConn().DERPMap()

diagCtx, diagCancel := context.WithTimeout(inv.Context(), 30*time.Second)
defer diagCancel()
Expand All@@ -156,7 +156,7 @@ func (r *RootCmd) ping() *serpent.Command {
// Silent ping to determine whether we should show diags
_, didP2p, _, _ := conn.Ping(ctx)

ni := conn.GetNetInfo()
ni := conn.TailnetConn().GetNetInfo()
connDiags := cliui.ConnDiags{
DisableDirect: r.disableDirect,
LocalNetInfo: ni,
Expand Down
2 changes: 1 addition & 1 deletioncli/portforward.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -221,7 +221,7 @@ func (r *RootCmd) portForward() *serpent.Command {
func listenAndPortForward(
ctx context.Context,
inv *serpent.Invocation,
conn*workspacesdk.AgentConn,
conn workspacesdk.AgentConn,
wg *sync.WaitGroup,
spec portForwardSpec,
logger slog.Logger,
Expand Down
4 changes: 2 additions & 2 deletionscli/speedtest.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -139,7 +139,7 @@ func (r *RootCmd) speedtest() *serpent.Command {
if err != nil {
continue
}
status := conn.Status()
status := conn.TailnetConn().Status()
if len(status.Peers()) != 1 {
continue
}
Expand DownExpand Up@@ -189,7 +189,7 @@ func (r *RootCmd) speedtest() *serpent.Command {
outputResult.Intervals[i] = interval
}
}
conn.Conn.SendSpeedtestTelemetry(outputResult.Overall.ThroughputMbits)
conn.TailnetConn().SendSpeedtestTelemetry(outputResult.Overall.ThroughputMbits)
out, err := formatter.Format(inv.Context(), outputResult)
if err != nil {
return err
Expand Down
12 changes: 6 additions & 6 deletionscli/ssh.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -590,7 +590,7 @@ func (r *RootCmd) ssh() *serpent.Command {
}

err = sshSession.Wait()
conn.SendDisconnectedTelemetry()
conn.TailnetConn().SendDisconnectedTelemetry()
if err != nil {
if exitErr := (&gossh.ExitError{}); errors.As(err, &exitErr) {
// Clear the error since it's not useful beyond
Expand DownExpand Up@@ -1364,7 +1364,7 @@ func getUsageAppName(usageApp string) codersdk.UsageAppName {

func setStatsCallback(
ctx context.Context,
agentConn*workspacesdk.AgentConn,
agentConn workspacesdk.AgentConn,
logger slog.Logger,
networkInfoDir string,
networkInfoInterval time.Duration,
Expand DownExpand Up@@ -1437,7 +1437,7 @@ func setStatsCallback(

now := time.Now()
cb(now, now.Add(time.Nanosecond), map[netlogtype.Connection]netlogtype.Counts{}, map[netlogtype.Connection]netlogtype.Counts{})
agentConn.SetConnStatsCallback(networkInfoInterval, 2048, cb)
agentConn.TailnetConn().SetConnStatsCallback(networkInfoInterval, 2048, cb)
return errCh, nil
}

Expand All@@ -1451,13 +1451,13 @@ type sshNetworkStats struct {
UsingCoderConnect bool `json:"using_coder_connect"`
}

func collectNetworkStats(ctx context.Context, agentConn*workspacesdk.AgentConn, start, end time.Time, counts map[netlogtype.Connection]netlogtype.Counts) (*sshNetworkStats, error) {
func collectNetworkStats(ctx context.Context, agentConn workspacesdk.AgentConn, start, end time.Time, counts map[netlogtype.Connection]netlogtype.Counts) (*sshNetworkStats, error) {
latency, p2p, pingResult, err := agentConn.Ping(ctx)
if err != nil {
return nil, err
}
node := agentConn.Node()
derpMap := agentConn.DERPMap()
node := agentConn.TailnetConn().Node()
derpMap := agentConn.TailnetConn().DERPMap()

totalRx := uint64(0)
totalTx := uint64(0)
Expand Down
6 changes: 3 additions & 3 deletionscoderd/coderd.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -325,6 +325,9 @@ func New(options *Options) *API {
})
}

if options.PrometheusRegistry == nil {
options.PrometheusRegistry = prometheus.NewRegistry()
}
if options.Authorizer == nil {
options.Authorizer = rbac.NewCachingAuthorizer(options.PrometheusRegistry)
if buildinfo.IsDev() {
Comment on lines +328 to 333
Copy link
ContributorAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

This re-order fixes a nil pointer access when noPrometheusRegistry andAuthorizer are passed through options.

Expand DownExpand Up@@ -381,9 +384,6 @@ func New(options *Options) *API {
if options.FilesRateLimit == 0 {
options.FilesRateLimit = 12
}
if options.PrometheusRegistry == nil {
options.PrometheusRegistry = prometheus.NewRegistry()
}
if options.Clock == nil {
options.Clock = quartz.NewReal()
}
Expand Down
4 changes: 2 additions & 2 deletionscoderd/tailnet.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -277,9 +277,9 @@ func (s *ServerTailnet) dialContext(ctx context.Context, network, addr string) (
}, nil
}

func (s *ServerTailnet) AgentConn(ctx context.Context, agentID uuid.UUID) (*workspacesdk.AgentConn, func(), error) {
func (s *ServerTailnet) AgentConn(ctx context.Context, agentID uuid.UUID) (workspacesdk.AgentConn, func(), error) {
var (
conn*workspacesdk.AgentConn
conn workspacesdk.AgentConn
ret func()
)

Expand Down
186 changes: 186 additions & 0 deletionscoderd/workspaceagents_internal_test.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,186 @@
package coderd

import (
"bytes"
"context"
"database/sql"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"strings"
"testing"

"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"

"cdr.dev/slog"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmock"
"github.com/coder/coder/v2/coderd/database/dbtime"
"github.com/coder/coder/v2/coderd/httpmw"
"github.com/coder/coder/v2/coderd/workspaceapps/appurl"
"github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/workspacesdk"
"github.com/coder/coder/v2/codersdk/workspacesdk/agentconnmock"
"github.com/coder/coder/v2/codersdk/wsjson"
"github.com/coder/coder/v2/tailnet"
"github.com/coder/coder/v2/tailnet/tailnettest"
"github.com/coder/coder/v2/testutil"
"github.com/coder/websocket"
)

type fakeAgentProvider struct {
agentConn func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error)
}

func (fakeAgentProvider) ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID, app appurl.ApplicationURL, wildcardHost string) *httputil.ReverseProxy {
panic("unimplemented")
}

func (f fakeAgentProvider) AgentConn(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) {
if f.agentConn != nil {
return f.agentConn(ctx, agentID)
}

panic("unimplemented")
}

func (fakeAgentProvider) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) {
panic("unimplemented")
}

func (fakeAgentProvider) Close() error {
return nil
}

func TestWatchAgentContainers(t *testing.T) {
t.Parallel()

t.Run("WebSocketClosesProperly", func(t *testing.T) {
t.Parallel()

// This test ensures that the agent containers `/watch` websocket can gracefully
// handle the underlying websocket unexpectedly closing. This test was created in
// response to this issue: https://github.com/coder/coder/issues/19372

var (
ctx = testutil.Context(t, testutil.WaitShort)
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug).Named("coderd")

mCtrl = gomock.NewController(t)
mDB = dbmock.NewMockStore(mCtrl)
mCoordinator = tailnettest.NewMockCoordinator(mCtrl)
mAgentConn = agentconnmock.NewMockAgentConn(mCtrl)

fAgentProvider = fakeAgentProvider{
agentConn: func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) {
return mAgentConn, func() {}, nil
},
}

workspaceID = uuid.New()
agentID = uuid.New()
resourceID = uuid.New()
jobID = uuid.New()
buildID = uuid.New()

containersCh = make(chan codersdk.WorkspaceAgentListContainersResponse)

r = chi.NewMux()

api = API{
ctx: ctx,
Options: &Options{
AgentInactiveDisconnectTimeout: testutil.WaitShort,
Database: mDB,
Logger: logger,
DeploymentValues: &codersdk.DeploymentValues{},
TailnetCoordinator: tailnettest.NewFakeCoordinator(),
},
}
)

var tailnetCoordinator tailnet.Coordinator = mCoordinator
api.TailnetCoordinator.Store(&tailnetCoordinator)
api.agentProvider = fAgentProvider

// Setup: Allow `ExtractWorkspaceAgentParams` to complete.
mDB.EXPECT().GetWorkspaceAgentByID(gomock.Any(), agentID).Return(database.WorkspaceAgent{
ID: agentID,
ResourceID: resourceID,
LifecycleState: database.WorkspaceAgentLifecycleStateReady,
FirstConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
LastConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()},
}, nil)
mDB.EXPECT().GetWorkspaceResourceByID(gomock.Any(), resourceID).Return(database.WorkspaceResource{
ID: resourceID,
JobID: jobID,
}, nil)
mDB.EXPECT().GetProvisionerJobByID(gomock.Any(), jobID).Return(database.ProvisionerJob{
ID: jobID,
Type: database.ProvisionerJobTypeWorkspaceBuild,
}, nil)
mDB.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), jobID).Return(database.WorkspaceBuild{
WorkspaceID: workspaceID,
ID: buildID,
}, nil)

// And: Allow `db2dsk.WorkspaceAgent` to complete.
mCoordinator.EXPECT().Node(gomock.Any()).Return(nil)

// And: Allow `WatchContainers` to be called, returing our `containersCh` channel.
mAgentConn.EXPECT().WatchContainers(gomock.Any(), gomock.Any()).
Return(containersCh, io.NopCloser(&bytes.Buffer{}), nil)

// And: We mount the HTTP Handler
r.With(httpmw.ExtractWorkspaceAgentParam(mDB)).
Get("/workspaceagents/{workspaceagent}/containers/watch", api.watchWorkspaceAgentContainers)

// Given: We create the HTTP server
srv := httptest.NewServer(r)
defer srv.Close()

// And: Dial the WebSocket
wsURL := strings.Replace(srv.URL, "http://", "ws://", 1)
conn, resp, err := websocket.Dial(ctx, fmt.Sprintf("%s/workspaceagents/%s/containers/watch", wsURL, agentID), nil)
require.NoError(t, err)
if resp.Body != nil {
defer resp.Body.Close()
}

// And: Create a streaming decoder
decoder := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger)
defer decoder.Close()
decodeCh := decoder.Chan()

// And: We can successfully send through the channel.
testutil.RequireSend(ctx, t, containersCh, codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{{
ID: "test-container-id",
}},
})

// And: Receive the data.
containerResp := testutil.RequireReceive(ctx, t, decodeCh)
require.Len(t, containerResp.Containers, 1)
require.Equal(t, "test-container-id", containerResp.Containers[0].ID)

// When: We close the `containersCh`
close(containersCh)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

Should we have at least one test message sent first to confirm the base case works and then the exit?

Copy link
ContributorAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

Sounds good to me


// Then: We expect `decodeCh` to be closed.
select {
case <-ctx.Done():
t.Fail()

case _, ok := <-decodeCh:
require.False(t, ok, "channel is expected to be closed")
}
})
}
Loading
Loading

[8]ページ先頭

©2009-2025 Movatter.jp