- Notifications
You must be signed in to change notification settings - Fork926
chore: file cache Release tied 1:1 with an acquire#18410
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.
Already on GitHub?Sign in to your account
Uh oh!
There was an error while loading.Please reload this page.
Changes fromall commits
55dfb22
cfc36e9
59a19f3
9a3d811
9317020
9a21fee
e640600
File filter
Filter by extension
Conversations
Uh oh!
There was an error while loading.Please reload this page.
Jump to
Uh oh!
There was an error while loading.Please reload this page.
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -140,20 +140,33 @@ type cacheEntry struct { | ||
type fetcher func(context.Context, uuid.UUID) (CacheEntryValue, error) | ||
var _ fs.FS = (*CloseFS)(nil) | ||
// CloseFS is a wrapper around fs.FS that implements io.Closer. The Close() | ||
// method tells the cache to release the fileID. Once all open references are | ||
// closed, the file is removed from the cache. | ||
type CloseFS struct { | ||
fs.FS | ||
close func() | ||
} | ||
func (f *CloseFS) Close() { f.close() } | ||
// Acquire will load the fs.FS for the given file. It guarantees that parallel | ||
// calls for the same fileID will only result in one fetch, and that parallel | ||
// calls for distinct fileIDs will fetch in parallel. | ||
// | ||
// Safety: Every call to Acquire that does not return an error must have a | ||
// matching call to Release. | ||
func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (*CloseFS, error) { | ||
// It's important that this `Load` call occurs outside `prepare`, after the | ||
// mutex has been released, or we would continue to hold the lock until the | ||
// entire file has been fetched, which may be slow, and would prevent other | ||
// files from being fetched in parallel. | ||
it, err := c.prepare(ctx, fileID).Load() | ||
if err != nil { | ||
c.release(fileID) | ||
return nil, err | ||
} | ||
@@ -163,11 +176,19 @@ func (c *Cache) Acquire(ctx context.Context, fileID uuid.UUID) (fs.FS, error) { | ||
} | ||
// Always check the caller can actually read the file. | ||
if err := c.authz.Authorize(ctx, subject, policy.ActionRead, it.Object); err != nil { | ||
c.release(fileID) | ||
return nil, err | ||
} | ||
var once sync.Once | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. I'd rather store the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. You also have to store the It would be something like: typeCloseFSstruct {fs.FSonce sync.OncefileID uuid.UUIDcache*Cache}func (f*CloseFS)Close()error {f.once.Do(func() {f.cache.release(f.fileID)})} I do not want | ||
return &CloseFS{ | ||
FS: it.FS, | ||
close: func() { | ||
// sync.Once makes the Close() idempotent, so we can call it | ||
// multiple times without worrying about double-releasing. | ||
once.Do(func() { c.release(fileID) }) | ||
}, | ||
}, nil | ||
} | ||
func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithError[CacheEntryValue] { | ||
@@ -203,9 +224,12 @@ func (c *Cache) prepare(ctx context.Context, fileID uuid.UUID) *lazy.ValueWithEr | ||
return entry.value | ||
} | ||
//release decrements the reference count for the given fileID, and frees the | ||
// backing data if there are no further references being held. | ||
// | ||
// release should only be called after a successful call to Acquire using the Release() | ||
// method on the returned *CloseFS. | ||
func (c *Cache) release(fileID uuid.UUID) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. Unexported to force callers to use the | ||
c.lock.Lock() | ||
defer c.lock.Unlock() | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -75,7 +75,7 @@ func TestCacheRBAC(t *testing.T) { | ||
require.Equal(t, 0, cache.Count()) | ||
// Read the file with a file reader to put it into the cache. | ||
a, err := cache.Acquire(cacheReader, file.ID) | ||
require.NoError(t, err) | ||
require.Equal(t, 1, cache.Count()) | ||
@@ -86,12 +86,12 @@ func TestCacheRBAC(t *testing.T) { | ||
require.Equal(t, 1, cache.Count()) | ||
// UserReader can | ||
b, err:= cache.Acquire(userReader, file.ID) | ||
require.NoError(t, err) | ||
require.Equal(t, 1, cache.Count()) | ||
a.Close() | ||
b.Close() | ||
require.Equal(t, 0, cache.Count()) | ||
rec.AssertActorID(t, nobodyID.String(), rec.Pair(policy.ActionRead, file)) | ||
@@ -179,13 +179,15 @@ func TestRelease(t *testing.T) { | ||
ids = append(ids, uuid.New()) | ||
} | ||
releases := make(map[uuid.UUID][]func(), 0) | ||
// Acquire a bunch of references | ||
batchSize := 10 | ||
for openedIdx, id := range ids { | ||
for batchIdx := range batchSize { | ||
it, err := c.Acquire(ctx, id) | ||
require.NoError(t, err) | ||
require.Equal(t, emptyFS, it.FS) | ||
releases[id] = append(releases[id], it.Close) | ||
// Each time a new file is opened, the metrics should be updated as so: | ||
opened := openedIdx + 1 | ||
@@ -206,7 +208,8 @@ func TestRelease(t *testing.T) { | ||
for closedIdx, id := range ids { | ||
stillOpen := len(ids) - closedIdx | ||
for closingIdx := range batchSize { | ||
releases[id][0]() | ||
releases[id] = releases[id][1:] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. just change the loops to iterate over this There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others.Learn more. I didn't feel like changing the test. This works | ||
// Each time a file is released, the metrics should decrement the file refs | ||
require.Equal(t, (stillOpen*batchSize)-(closingIdx+1), promhelp.GaugeValue(t, reg, cachePromMetricName("open_file_refs_current"), nil)) | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -4,6 +4,7 @@ import ( | ||
"context" | ||
"database/sql" | ||
"encoding/json" | ||
"io/fs" | ||
"net/http" | ||
"time" | ||
@@ -144,15 +145,19 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * | ||
} | ||
// Add the file first. Calling `Release` if it fails is a no-op, so this is safe. | ||
var templateFS fs.FS | ||
closeableTemplateFS, err := api.FileCache.Acquire(fileCtx, fileID) | ||
if err != nil { | ||
httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ | ||
Message: "Internal error fetching template version Terraform.", | ||
Detail: err.Error(), | ||
}) | ||
return | ||
} | ||
defer closeableTemplateFS.Close() | ||
// templateFS does not implement the Close method. For it to be later merged with | ||
// the module files, we need to convert it to an OverlayFS. | ||
templateFS = closeableTemplateFS | ||
Emyrk marked this conversation as resolved. Show resolvedHide resolvedUh oh!There was an error while loading.Please reload this page. | ||
// Having the Terraform plan available for the evaluation engine is helpful | ||
// for populating values from data blocks, but isn't strictly required. If | ||
@@ -171,9 +176,9 @@ func (api *API) handleDynamicParameters(listen bool, rw http.ResponseWriter, r * | ||
}) | ||
return | ||
} | ||
defermoduleFilesFS.Close() | ||
templateFS = files.NewOverlayFS(closeableTemplateFS, []files.Overlay{{Path: ".terraform/modules", FS: moduleFilesFS}}) | ||
} | ||
owner, err := getWorkspaceOwnerData(ctx, api.Database, apikey.UserID, templateVersion.OrganizationID) | ||
Uh oh!
There was an error while loading.Please reload this page.