Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

chore: file cache Release tied 1:1 with an acquire#18410

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Open
Emyrk wants to merge3 commits intomain
base:main
Choose a base branch
Loading
fromstevenmasley/cache_releaseable
Open
Show file tree
Hide file tree
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 35 additions & 6 deletionscoderd/files/cache.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -3,6 +3,7 @@ package files
import (
"bytes"
"context"
"io"
"io/fs"
"sync"

Expand DownExpand Up@@ -140,20 +141,36 @@ type cacheEntry struct {

type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error)

var (
_ fs.FS = (*CloseFS)(nil)
_ io.Closer = (*CloseFS)(nil)
)

// CloseFS is a wrapper around fs.FS that implements io.Closer. The Close()
// method tells the cache to release the fileID. Once all open references are
// closed, the file is removed from the cache.
type CloseFS struct {
fs.FS

close func() error
}

func (f *CloseFS) Close() error { return f.close() }
Comment on lines +152 to +158
Copy link
MemberAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

I named thisClose to match the pattern of opening a file and deferring aClose


// Acquire will load the fs.FS for the given file. It guarantees that parallel
// calls for the same fileID will only result in one fetch, and that parallel
// calls for distinct fileIDs will fetch in parallel.
//
// Safety: Every call to Acquire that does not return an error must have a
// matching call to Release.
func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (*CloseFS, error) {
// It's important that this `Load` call occurs outside of `prepare`, after the
// mutex has been released, or we would continue to hold the lock until the
// entire file has been fetched, which may be slow, and would prevent other
// files from being fetched in parallel.
it, err := c.prepare(ctx, fileID).Load()
if err != nil {
c.Release(fileID)
c.release(fileID)
return nil, err
}

Expand All@@ -163,11 +180,20 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) {
}
// Always check the caller can actually read the file.
if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil {
c.Release(fileID)
c.release(fileID)
return nil, err
}

return it.FS, err
var once sync.Once
return &CloseFS{
FS: it.FS,
close: func() error {
// sync.Once makes the Close() idempotent, so we can call it
// multiple times without worrying about double-releasing.
once.Do(func() { c.release(fileID) })
return nil
},
}, nil
}

func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] {
Expand DownExpand Up@@ -203,9 +229,12 @@ func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithEr
return entry.value
}

//Release decrements the reference count for the given fileID, and frees the
//release decrements the reference count for the given fileID, and frees the
// backing data if there are no further references being held.
func (c *Cache) Release(fileID uuid.UUID) {
//
// release should only be called after a successful call to Acquire using the Release()
// method on the returned *CloseFS.
func (c *Cache) release(fileID uuid.UUID) {
Copy link
MemberAuthor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others.Learn more.

Unexported to force callers to use theClose method for releasing.

c.lock.Lock()
defer c.lock.Unlock()

Expand Down
15 changes: 9 additions & 6 deletionscoderd/files/cache_test.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -75,7 +75,7 @@ func TestCacheRBAC(t *testing.T) {
require.Equal(t, 0, cache.Count())

// Read the file with a file reader to put it into the cache.
_, err := cache.Acquire(cacheReader, file.ID)
a, err := cache.Acquire(cacheReader, file.ID)
require.NoError(t, err)
require.Equal(t, 1, cache.Count())

Expand All@@ -86,12 +86,12 @@ func TestCacheRBAC(t *testing.T) {
require.Equal(t, 1, cache.Count())

// UserReader can
_, err = cache.Acquire(userReader, file.ID)
b, err:= cache.Acquire(userReader, file.ID)
require.NoError(t, err)
require.Equal(t, 1, cache.Count())

cache.Release(file.ID)
cache.Release(file.ID)
_ = a.Close()
_ = b.Close()
require.Equal(t, 0, cache.Count())

rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file))
Expand DownExpand Up@@ -179,13 +179,15 @@ func TestRelease(t *testing.T) {
ids = append(ids, uuid.New())
}

releases := make(map[uuid.UUID][]func() error, 0)
// Acquire a bunch of references
batchSize := 10
for openedIdx, id := range ids {
for batchIdx := range batchSize {
it, err := c.Acquire(ctx, id)
require.NoError(t, err)
require.Equal(t, emptyFS, it)
require.Equal(t, emptyFS, it.FS)
releases[id] = append(releases[id], it.Close)

// Each time a new file is opened, the metrics should be updated as so:
opened := openedIdx + 1
Expand All@@ -206,7 +208,8 @@ func TestRelease(t *testing.T) {
for closedIdx, id := range ids {
stillOpen := len(ids) - closedIdx
for closingIdx := range batchSize {
c.Release(id)
_ = releases[id][0]()
releases[id] = releases[id][1:]

// Each time a file is released, the metrics should decrement the file refs
require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil))
Expand Down
11 changes: 7 additions & 4 deletionscoderd/parameters.go
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -4,6 +4,7 @@ import (
"context"
"database/sql"
"encoding/json"
"io/fs"
"net/http"
"time"

Expand DownExpand Up@@ -144,15 +145,17 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r *
}

// Add the file first. Calling `Release` if it fails is a no-op, so this is safe.
templateFS, err := api.FileCache.Acquire(fileCtx, fileID)
var templateFS fs.FS
closeableTemplateFS, err := api.FileCache.Acquire(fileCtx, fileID)
if err != nil {
httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{
Message: "Internal error fetching template version Terraform.",
Detail: err.Error(),
})
return
}
defer api.FileCache.Release(fileID)
defer closeableTemplateFS.Close()
templateFS = closeableTemplateFS

// Having the Terraform plan available for the evaluation engine is helpful
// for populating values from data blocks, but isn't strictly required. If
Expand All@@ -171,9 +174,9 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r *
})
return
}
deferapi.FileCache.Release(tf.CachedModuleFiles.UUID)
defermoduleFilesFS.Close()

templateFS = files.NewOverlayFS(templateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}})
templateFS = files.NewOverlayFS(closeableTemplateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}})
}

owner, err := getWorkspaceOwnerData(ctx, api.Database, apikey.UserID, templateVersion.OrganizationID)
Expand Down
Loading

[8]ページ先頭

©2009-2025 Movatter.jp