- Notifications
You must be signed in to change notification settings - Fork1k
refactor: convert workspacesdk.AgentConn to an interface#19392
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.
Already on GitHub?Sign in to your account
Uh oh!
There was an error while loading.Please reload this page.
Changes fromall commits
1cc4903
ea4de03
fa0b97e
123760a
4c9a9e6
File filter
Filter by extension
Conversations
Uh oh!
There was an error while loading.Please reload this page.
Jump to
Uh oh!
There was an error while loading.Please reload this page.
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -325,6 +325,9 @@ func New(options *Options) *API { | ||
}) | ||
} | ||
if options.PrometheusRegistry == nil { | ||
options.PrometheusRegistry = prometheus.NewRegistry() | ||
} | ||
if options.Authorizer == nil { | ||
options.Authorizer = rbac.NewCachingAuthorizer(options.PrometheusRegistry) | ||
if buildinfo.IsDev() { | ||
Comment on lines +328 to 333 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. This re-order fixes a nil pointer access when no | ||
@@ -381,9 +384,6 @@ func New(options *Options) *API { | ||
if options.FilesRateLimit == 0 { | ||
options.FilesRateLimit = 12 | ||
} | ||
if options.Clock == nil { | ||
options.Clock = quartz.NewReal() | ||
} | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,186 @@ | ||
package coderd | ||
import ( | ||
"bytes" | ||
"context" | ||
"database/sql" | ||
"fmt" | ||
"io" | ||
"net/http" | ||
"net/http/httptest" | ||
"net/http/httputil" | ||
"net/url" | ||
"strings" | ||
"testing" | ||
"github.com/go-chi/chi/v5" | ||
"github.com/google/uuid" | ||
"github.com/stretchr/testify/require" | ||
"go.uber.org/mock/gomock" | ||
"cdr.dev/slog" | ||
"cdr.dev/slog/sloggers/slogtest" | ||
"github.com/coder/coder/v2/coderd/database" | ||
"github.com/coder/coder/v2/coderd/database/dbmock" | ||
"github.com/coder/coder/v2/coderd/database/dbtime" | ||
"github.com/coder/coder/v2/coderd/httpmw" | ||
"github.com/coder/coder/v2/coderd/workspaceapps/appurl" | ||
"github.com/coder/coder/v2/codersdk" | ||
"github.com/coder/coder/v2/codersdk/workspacesdk" | ||
"github.com/coder/coder/v2/codersdk/workspacesdk/agentconnmock" | ||
"github.com/coder/coder/v2/codersdk/wsjson" | ||
"github.com/coder/coder/v2/tailnet" | ||
"github.com/coder/coder/v2/tailnet/tailnettest" | ||
"github.com/coder/coder/v2/testutil" | ||
"github.com/coder/websocket" | ||
) | ||
type fakeAgentProvider struct { | ||
agentConn func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) | ||
} | ||
func (fakeAgentProvider) ReverseProxy(targetURL, dashboardURL *url.URL, agentID uuid.UUID, app appurl.ApplicationURL, wildcardHost string) *httputil.ReverseProxy { | ||
panic("unimplemented") | ||
} | ||
func (f fakeAgentProvider) AgentConn(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) { | ||
if f.agentConn != nil { | ||
return f.agentConn(ctx, agentID) | ||
} | ||
panic("unimplemented") | ||
} | ||
func (fakeAgentProvider) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { | ||
panic("unimplemented") | ||
} | ||
func (fakeAgentProvider) Close() error { | ||
return nil | ||
} | ||
func TestWatchAgentContainers(t *testing.T) { | ||
t.Parallel() | ||
t.Run("WebSocketClosesProperly", func(t *testing.T) { | ||
t.Parallel() | ||
// This test ensures that the agent containers `/watch` websocket can gracefully | ||
// handle the underlying websocket unexpectedly closing. This test was created in | ||
// response to this issue: https://github.com/coder/coder/issues/19372 | ||
var ( | ||
ctx = testutil.Context(t, testutil.WaitShort) | ||
logger = slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug).Named("coderd") | ||
mCtrl = gomock.NewController(t) | ||
mDB = dbmock.NewMockStore(mCtrl) | ||
mCoordinator = tailnettest.NewMockCoordinator(mCtrl) | ||
mAgentConn = agentconnmock.NewMockAgentConn(mCtrl) | ||
fAgentProvider = fakeAgentProvider{ | ||
agentConn: func(ctx context.Context, agentID uuid.UUID) (_ workspacesdk.AgentConn, release func(), _ error) { | ||
return mAgentConn, func() {}, nil | ||
}, | ||
} | ||
workspaceID = uuid.New() | ||
agentID = uuid.New() | ||
resourceID = uuid.New() | ||
jobID = uuid.New() | ||
buildID = uuid.New() | ||
containersCh = make(chan codersdk.WorkspaceAgentListContainersResponse) | ||
r = chi.NewMux() | ||
api = API{ | ||
ctx: ctx, | ||
Options: &Options{ | ||
AgentInactiveDisconnectTimeout: testutil.WaitShort, | ||
Database: mDB, | ||
Logger: logger, | ||
DeploymentValues: &codersdk.DeploymentValues{}, | ||
TailnetCoordinator: tailnettest.NewFakeCoordinator(), | ||
}, | ||
} | ||
) | ||
var tailnetCoordinator tailnet.Coordinator = mCoordinator | ||
api.TailnetCoordinator.Store(&tailnetCoordinator) | ||
api.agentProvider = fAgentProvider | ||
// Setup: Allow `ExtractWorkspaceAgentParams` to complete. | ||
mDB.EXPECT().GetWorkspaceAgentByID(gomock.Any(), agentID).Return(database.WorkspaceAgent{ | ||
ID: agentID, | ||
ResourceID: resourceID, | ||
LifecycleState: database.WorkspaceAgentLifecycleStateReady, | ||
FirstConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()}, | ||
LastConnectedAt: sql.NullTime{Valid: true, Time: dbtime.Now()}, | ||
}, nil) | ||
mDB.EXPECT().GetWorkspaceResourceByID(gomock.Any(), resourceID).Return(database.WorkspaceResource{ | ||
ID: resourceID, | ||
JobID: jobID, | ||
}, nil) | ||
mDB.EXPECT().GetProvisionerJobByID(gomock.Any(), jobID).Return(database.ProvisionerJob{ | ||
ID: jobID, | ||
Type: database.ProvisionerJobTypeWorkspaceBuild, | ||
}, nil) | ||
mDB.EXPECT().GetWorkspaceBuildByJobID(gomock.Any(), jobID).Return(database.WorkspaceBuild{ | ||
WorkspaceID: workspaceID, | ||
ID: buildID, | ||
}, nil) | ||
// And: Allow `db2dsk.WorkspaceAgent` to complete. | ||
mCoordinator.EXPECT().Node(gomock.Any()).Return(nil) | ||
// And: Allow `WatchContainers` to be called, returing our `containersCh` channel. | ||
mAgentConn.EXPECT().WatchContainers(gomock.Any(), gomock.Any()). | ||
Return(containersCh, io.NopCloser(&bytes.Buffer{}), nil) | ||
// And: We mount the HTTP Handler | ||
r.With(httpmw.ExtractWorkspaceAgentParam(mDB)). | ||
Get("/workspaceagents/{workspaceagent}/containers/watch", api.watchWorkspaceAgentContainers) | ||
// Given: We create the HTTP server | ||
srv := httptest.NewServer(r) | ||
defer srv.Close() | ||
// And: Dial the WebSocket | ||
wsURL := strings.Replace(srv.URL, "http://", "ws://", 1) | ||
conn, resp, err := websocket.Dial(ctx, fmt.Sprintf("%s/workspaceagents/%s/containers/watch", wsURL, agentID), nil) | ||
require.NoError(t, err) | ||
if resp.Body != nil { | ||
defer resp.Body.Close() | ||
} | ||
// And: Create a streaming decoder | ||
decoder := wsjson.NewDecoder[codersdk.WorkspaceAgentListContainersResponse](conn, websocket.MessageText, logger) | ||
defer decoder.Close() | ||
decodeCh := decoder.Chan() | ||
// And: We can successfully send through the channel. | ||
testutil.RequireSend(ctx, t, containersCh, codersdk.WorkspaceAgentListContainersResponse{ | ||
Containers: []codersdk.WorkspaceAgentContainer{{ | ||
ID: "test-container-id", | ||
}}, | ||
}) | ||
// And: Receive the data. | ||
containerResp := testutil.RequireReceive(ctx, t, decodeCh) | ||
require.Len(t, containerResp.Containers, 1) | ||
require.Equal(t, "test-container-id", containerResp.Containers[0].ID) | ||
// When: We close the `containersCh` | ||
close(containersCh) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. Should we have at least one test message sent first to confirm the base case works and then the exit? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. Sounds good to me | ||
// Then: We expect `decodeCh` to be closed. | ||
select { | ||
case <-ctx.Done(): | ||
t.Fail() | ||
case _, ok := <-decodeCh: | ||
require.False(t, ok, "channel is expected to be closed") | ||
} | ||
}) | ||
} |
Uh oh!
There was an error while loading.Please reload this page.
Uh oh!
There was an error while loading.Please reload this page.