Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

feat: add boundary log forwarding from agent to coderd#21345

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Draft
zedkipp wants to merge1 commit intomain
base:main
Choose a base branch
Loading
fromzedkipp/boundary-logs
Draft
Show file tree
Hide file tree
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
feat: add boundary log forwarding from agent to coderd
Add receiving boundary logs via stream unix socket in the agent, forwardingof boundary audit logs from agent to coderd via agent API, and re-emissionof boundary logs to coderd stderr.Log format example:[API] 2025-12-08 20:58:46.093 [warn] boundary: workspace.id=... decision=deny http.method="GET" http.url="..." time="..."
  • Loading branch information
@zedkipp
zedkipp committedDec 20, 2025
commitce980b9fc44c6a585daf6e64af7f422240df689b
37 changes: 37 additions & 0 deletionsagent/agent.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -43,6 +43,7 @@ import (
"github.com/coder/coder/v2/agent/agentscripts"
"github.com/coder/coder/v2/agent/agentsocket"
"github.com/coder/coder/v2/agent/agentssh"
"github.com/coder/coder/v2/agent/boundarylogproxy"
"github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/agent/proto/resourcesmonitor"
"github.com/coder/coder/v2/agent/reconnectingpty"
Expand DownExpand Up@@ -277,6 +278,10 @@ type agent struct {

logSender *agentsdk.LogSender

// boundaryLogProxy is a socket server that forwards boundary audit logs to coderd.
// It may be nil if there is a problem starting the server.
boundaryLogProxy *boundarylogproxy.Server

prometheusRegistry *prometheus.Registry
// metrics are prometheus registered metrics that will be collected and
// labeled in Coder with the agent + workspace.
Expand DownExpand Up@@ -371,6 +376,7 @@ func (a *agent) init() {
)

a.initSocketServer()
a.startBoundaryLogProxyServer()

go a.runLoop()
}
Expand All@@ -395,6 +401,21 @@ func (a *agent) initSocketServer() {
a.logger.Debug(a.hardCtx, "socket server started", slog.F("path", a.socketPath))
}

// startBoundaryLogProxyServer starts the boundary log proxy socket server.
func (a *agent) startBoundaryLogProxyServer() {
const boundaryAuditSocketPath = "/tmp/boundary-audit.sock"

proxy := boundarylogproxy.NewServer(a.logger, boundaryAuditSocketPath)
if err := proxy.Start(); err != nil {
a.logger.Warn(a.hardCtx, "failed to start boundary log proxy", slog.Error(err))
return
}

a.boundaryLogProxy = proxy
a.logger.Info(a.hardCtx, "boundary log proxy server started",
slog.F("socket_path", boundaryAuditSocketPath))
}

// runLoop attempts to start the agent in a retry loop.
// Coder may be offline temporarily, a connection issue
// may be happening, but regardless after the intermittent
Expand DownExpand Up@@ -1012,6 +1033,15 @@ func (a *agent) run() (retErr error) {
return err
})

// Forward boundary audit logs to coderd if boundary log forwarding is enabled.
// These are audit logs so they should continue during graceful shutdown.
if a.boundaryLogProxy != nil {
proxyFunc := func(ctx context.Context, aAPI proto.DRPCAgentClient27) error {
return a.boundaryLogProxy.RunForwarder(ctx, aAPI)
}
connMan.startAgentAPI("boundary log proxy", gracefulShutdownBehaviorRemain, proxyFunc)
}

// part of graceful shut down is reporting the final lifecycle states, e.g "ShuttingDown" so the
// lifecycle reporting has to be via gracefulShutdownBehaviorRemain
connMan.startAgentAPI("report lifecycle", gracefulShutdownBehaviorRemain, a.reportLifecycle)
Expand DownExpand Up@@ -1982,6 +2012,13 @@ func (a *agent) Close() error {
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
}

if a.boundaryLogProxy != nil {
err = a.boundaryLogProxy.Close()
if err != nil {
a.logger.Warn(context.Background(), "close boundary log proxy", slog.Error(err))
}
}

// Wait for the graceful shutdown to complete, but don't wait forever so
// that we don't break user expectations.
go func() {
Expand Down
228 changes: 225 additions & 3 deletionsagent/boundarylogproxy/proxy.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,229 @@
// Package boundarylogproxy provides a Unix socket server that receives boundary
// audit logs and forwards them to coderd via the agent API.
//
// Wire Format:
// Boundary sends tag and length prefixed protobuf messages over the Unix socket (TLV).
// - 4 bits: big-endian tag (always 1 for now)
// - 28 bits: big-endian length of the protobuf data
// - length bytes: encoded protobuf data
package boundarylogproxy

// Server a placeholder for the server that will listen on a Unix socket for
// boundary logs to be forwarded.
type Server struct{}
import (
"context"
"encoding/binary"
"errors"
"io"
"net"
"os"
"sync"
"time"

"golang.org/x/xerrors"
"google.golang.org/protobuf/proto"

"cdr.dev/slog"
agentproto "github.com/coder/coder/v2/agent/proto"
)

const (
// logBufferSize is the size of the channel buffer for incoming log requests
// from workspaces. This buffer size is intended to handle short bursts of workspaces
// forwarding batches of logs in parallel.
logBufferSize = 100
)

// Reporter reports boundary logs from workspaces.
type Reporter interface {
ReportBoundaryLogs(ctx context.Context, req *agentproto.ReportBoundaryLogsRequest) (*agentproto.ReportBoundaryLogsResponse, error)
}

// Server listens on a Unix socket for boundary log messages and buffers them
// for forwarding to coderd. The socket server and the forwarder are decoupled:
// - Start() creates the socket and accepts a connection from boundary
// - RunForwarder() drains the buffer and sends logs to coderd via AgentAPI
type Server struct {
logger slog.Logger
socketPath string

listener net.Listener
cancel context.CancelFunc
wg sync.WaitGroup

// logs buffers incoming log requests for the forwarder to drain.
logs chan *agentproto.ReportBoundaryLogsRequest
}

// NewServer creates a new boundary log proxy server.
func NewServer(logger slog.Logger, socketPath string) *Server {
return &Server{
logger: logger.Named("boundary-log-proxy"),
socketPath: socketPath,
logs: make(chan *agentproto.ReportBoundaryLogsRequest, logBufferSize),
}
}

// Start begins listening for connections on the Unix socket, and handles new
// connections in a separate goroutine. Incoming logs from connections are
// buffered until RunForwarder drains them.
func (s *Server) Start() error {
if err := os.Remove(s.socketPath); err != nil && !os.IsNotExist(err) {
return xerrors.Errorf("remove existing socket: %w", err)
}

listener, err := net.Listen("unix", s.socketPath)
if err != nil {
return xerrors.Errorf("listen on socket: %w", err)
}

s.listener = listener
ctx, cancel := context.WithCancel(context.Background())
s.cancel = cancel

s.wg.Add(1)
go s.acceptLoop(ctx)

s.logger.Info(ctx, "boundary log proxy started", slog.F("socket_path", s.socketPath))
return nil
}

// RunForwarder drains the log buffer and forwards logs to coderd.
// This should be called via startAgentAPI to ensure the API client is always
// current and to handle reconnections properly. It blocks until ctx is canceled.
func (s *Server) RunForwarder(ctx context.Context, sender Reporter) error {
s.logger.Debug(ctx, "boundary log forwarder started")

var intervalForwardCount, intervalTotalLogCount uint32
forwardCounterInterval := 30 * time.Second
forwardCountDebugLogTimer := time.NewTicker(forwardCounterInterval)

for {
select {
case <-ctx.Done():
return ctx.Err()
case <-forwardCountDebugLogTimer.C:
s.logger.Debug(ctx, "forwarded boundary logs",
slog.F("log_count", intervalForwardCount),
slog.F("interval", forwardCounterInterval))
intervalForwardCount = 0
intervalTotalLogCount = 0
case req := <-s.logs:
intervalForwardCount++
intervalTotalLogCount += uint32(len(req.Logs))

Check failure on line 111 in agent/boundarylogproxy/proxy.go

View workflow job for this annotation

GitHub Actions/ lint

G115: integer overflow conversion int -> uint32 (gosec)

_, err := sender.ReportBoundaryLogs(ctx, req)
if err != nil {
s.logger.Warn(ctx, "failed to forward boundary logs",
slog.Error(err),
slog.F("log_count", len(req.Logs)))
// Continue forwarding other logs. The current batch is lost,
//but the socket stays alive.
}
}
}
}

func (s *Server) acceptLoop(ctx context.Context) {
defer s.wg.Done()

for {
conn, err := s.listener.Accept()
if err != nil {
if ctx.Err() != nil {
return
}
s.logger.Warn(ctx, "accept error", slog.Error(err))

Check failure on line 134 in agent/boundarylogproxy/proxy.go

View workflow job for this annotation

GitHub Actions/ lint

ruleguard: Message "accept error" is too short, it must be at least 16 characters long. (gocritic)
continue
}

s.wg.Add(1)
go s.handleConnection(ctx, conn)
}
}

func (s *Server) handleConnection(ctx context.Context, conn net.Conn) {
defer s.wg.Done()

ctx, cancel := context.WithCancel(ctx)
defer cancel()

s.wg.Add(1)
go func() {
defer s.wg.Done()
<-ctx.Done()
_ = conn.Close()
}()

// Even though the length of data received can be larger than maxMsgSize,
// practically they are not expected to be. This is a sanity check and
// allows re-using a small fixed size read buffer.
const maxMsgSize = 1 << 15
buf := make([]byte, maxMsgSize)

for {
select {
case <-ctx.Done():
return
default:
}

var header uint32
if err := binary.Read(conn, binary.BigEndian, &header); err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) {
return
}
s.logger.Warn(ctx, "read length error", slog.Error(err))
return
}

length := header & 0x0FFFFFFF
tag := header >> 28

if tag != 1 {
s.logger.Warn(ctx, "invalid tag", slog.F("tag", tag))

Check failure on line 182 in agent/boundarylogproxy/proxy.go

View workflow job for this annotation

GitHub Actions/ lint

ruleguard: Message "invalid tag" is too short, it must be at least 16 characters long. (gocritic)
return
}

if length > maxMsgSize {
s.logger.Warn(ctx, "message too large", slog.F("length", length))
return
}

if _, err := io.ReadFull(conn, buf[:length]); err != nil {
s.logger.Warn(ctx, "read body error", slog.Error(err))

Check failure on line 192 in agent/boundarylogproxy/proxy.go

View workflow job for this annotation

GitHub Actions/ lint

ruleguard: Message "read body error" is too short, it must be at least 16 characters long. (gocritic)
return
}

var req agentproto.ReportBoundaryLogsRequest
if err := proto.Unmarshal(buf[:length], &req); err != nil {
s.logger.Warn(ctx, "unmarshal error", slog.Error(err))

Check failure on line 198 in agent/boundarylogproxy/proxy.go

View workflow job for this annotation

GitHub Actions/ lint

ruleguard: Message "unmarshal error" is too short, it must be at least 16 characters long. (gocritic)
continue
}

select {
case s.logs <- &req:
default:
s.logger.Warn(ctx, "dropping boundary logs, buffer full",
slog.F("log_count", len(req.Logs)))
}
}
}

// Close stops the server and blocks until resources have been cleaned up.
// It must be called after Start.
func (s *Server) Close() error {
if s.cancel != nil {
s.cancel()
}

if s.listener != nil {
_ = s.listener.Close()
}

s.wg.Wait()

err := os.Remove(s.socketPath)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
return nil
}
Loading
Loading

[8]ページ先頭

©2009-2025 Movatter.jp