Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

feat: add WaitUntilEmpty to LogSender#12159

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
spikecurtis merged 1 commit intomainfromspike/10534-wait-until-empty
Feb 20, 2024
Merged
Show file tree
Hide file tree
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletionscodersdk/agentsdk/logs.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -437,6 +437,7 @@ func (l *LogSender) SendLoop(ctx context.Context, dest logDest) error {
l.exceededLogLimit = true
// no point in keeping anything we have queued around, server will not accept them
l.queues = make(map[uuid.UUID]*logQueue)
l.Broadcast() // might unblock WaitUntilEmpty
return LogLimitExceededError
}

Expand All@@ -451,6 +452,7 @@ func (l *LogSender) SendLoop(ctx context.Context, dest logDest) error {
if len(q.logs) == 0 {
// no empty queues
delete(l.queues, src)
l.Broadcast() // might unblock WaitUntilEmpty
continue
}
q.lastFlush = time.Now()
Expand DownExpand Up@@ -487,6 +489,34 @@ func (l *LogSender) GetScriptLogger(logSourceID uuid.UUID) ScriptLogger {
return ScriptLogger{srcID: logSourceID, sender: l}
}

// WaitUntilEmpty waits until the LogSender's queues are empty or the given context expires.
func (l *LogSender) WaitUntilEmpty(ctx context.Context) error {
ctxDone := false
nevermind := make(chan struct{})
defer close(nevermind)
go func() {
select {
case <-ctx.Done():
l.L.Lock()
defer l.L.Unlock()
ctxDone = true
l.Broadcast()
return
case <-nevermind:
return
}
}()
Copy link
Member

@mafredrimafredriFeb 15, 2024
edited
Loading

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

Why are we duplicating logic fromSendLoop here? Since this method doesn't attempt to send, it's quite pointless unless the signaling is happening from a runningSendLoop anyway.

Edit: Ah, nevermind, just realized this is only here to handle the user provided context.

I think this could be greatly simplified:

func (l*LogSender)WaitUntilEmpty(ctx context.Context)error {select {case<-ctx.Done():returnctx.Err()case<-l.allSent:returnnil}}

Copy link
ContributorAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

The problem with anallSent channel is how to arrange when it should read. Closing the channel won't work, because you can't "unclose" it if more data gets queued.

Writing to the channel won't work if there are more than one caller to WaitUntilEmpty.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

True, it would work better/simpler if the send loop was channel-based as well. In that case, one approach could be this:

func (l*LogSender)WaitUntilEmpty(ctx context.Context)error {wait:=make(chanstruct{})l.waitUntilEmpty<-waitselect {case<-ctx.Done():returnctx.Err()case<-wait:returnnil}}// SendLoopvarwaiters []chanstruct{}for {select {case<-tick:case<-l.waitUntilEmpty:waiters=append(waiters,wait)}// ...iflen(l.queues)==0 {for_,wait:=rangewaiters {close(wait)}waiters=nil}

But it's not quite as nice when retrofitted into the mutex style loop.

Copy link
ContributorAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

The problem there is that it requiresSendLoop to actually be running in order forWaitUntilEmpty() to return, which it might not be but we are still empty.

Channels are great for communicating between goroutines. Here what we really, actually want is to know when acondition is satisfied, regardless of other running goroutines, and for thatsync.Cond is your friend.

l.L.Lock()
defer l.L.Unlock()
for len(l.queues) != 0 && !ctxDone {
l.Wait()
}
if len(l.queues) == 0 {
return nil
}
return ctx.Err()
Comment on lines +514 to +517
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

Suggested change
iflen(l.queues)==0 {
returnnil
}
returnctx.Err()
returnctx.Err()

We don't actually need this check, this way we give priority to the context cancellation, even if we happen to be done at the same time (this can be preferable in some cases).

}

type ScriptLogger struct {
sender *LogSender
srcID uuid.UUID
Expand Down
48 changes: 47 additions & 1 deletioncodersdk/agentsdk/logs_internal_test.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -56,6 +56,12 @@ func TestLogSender_Mainline(t *testing.T) {
loopErr <- err
}()

empty := make(chan error, 1)
go func() {
err := uut.WaitUntilEmpty(ctx)
empty <- err
}()

// since neither source has even been flushed, it should immediately Flush
// both, although the order is not controlled
var logReqs []*proto.BatchCreateLogsRequest
Expand DownExpand Up@@ -104,8 +110,11 @@ func TestLogSender_Mainline(t *testing.T) {
require.Equal(t, proto.Log_DEBUG, req.Logs[0].GetLevel())
require.Equal(t, t1, req.Logs[0].GetCreatedAt().AsTime())

err := testutil.RequireRecvCtx(ctx, t, empty)
require.NoError(t, err)

cancel()
err:= testutil.RequireRecvCtx(testCtx, t, loopErr)
err = testutil.RequireRecvCtx(testCtx, t, loopErr)
require.ErrorIs(t, err, context.Canceled)

// we can still enqueue more logs after SendLoop returns
Expand All@@ -132,6 +141,12 @@ func TestLogSender_LogLimitExceeded(t *testing.T) {
Level: codersdk.LogLevelInfo,
})

empty := make(chan error, 1)
go func() {
err := uut.WaitUntilEmpty(ctx)
empty <- err
}()

loopErr := make(chan error, 1)
go func() {
err := uut.SendLoop(ctx, fDest)
Expand All@@ -146,6 +161,10 @@ func TestLogSender_LogLimitExceeded(t *testing.T) {
err := testutil.RequireRecvCtx(ctx, t, loopErr)
require.ErrorIs(t, err, LogLimitExceededError)

// Should also unblock WaitUntilEmpty
err = testutil.RequireRecvCtx(ctx, t, empty)
require.NoError(t, err)

// we can still enqueue more logs after SendLoop returns, but they don't
// actually get enqueued
uut.Enqueue(ls1, Log{
Expand DownExpand Up@@ -363,6 +382,33 @@ func TestLogSender_SendError(t *testing.T) {
uut.L.Unlock()
}

func TestLogSender_WaitUntilEmpty_ContextExpired(t *testing.T) {
t.Parallel()
testCtx := testutil.Context(t, testutil.WaitShort)
ctx, cancel := context.WithCancel(testCtx)
logger := slogtest.Make(t, nil).Leveled(slog.LevelDebug)
uut := NewLogSender(logger)

t0 := dbtime.Now()

ls1 := uuid.UUID{0x11}
uut.Enqueue(ls1, Log{
CreatedAt: t0,
Output: "test log 0, src 1",
Level: codersdk.LogLevelInfo,
})

empty := make(chan error, 1)
go func() {
err := uut.WaitUntilEmpty(ctx)
empty <- err
}()

cancel()
err := testutil.RequireRecvCtx(testCtx, t, empty)
require.ErrorIs(t, err, context.Canceled)
}

type fakeLogDest struct {
reqs chan *proto.BatchCreateLogsRequest
resps chan *proto.BatchCreateLogsResponse
Expand Down

[8]ページ先頭

©2009-2025 Movatter.jp