@@ -2,32 +2,38 @@ package files
2
2
3
3
import (
4
4
"context"
5
- "io/fs"
6
- "sync"
7
5
"sync/atomic"
8
6
"testing"
9
7
"time"
10
8
11
9
"github.com/google/uuid"
10
+ "github.com/prometheus/client_golang/prometheus"
12
11
"github.com/spf13/afero"
13
12
"github.com/stretchr/testify/require"
14
13
"golang.org/x/sync/errgroup"
15
14
15
+ "github.com/coder/coder/v2/coderd/coderdtest/promhelp"
16
16
"github.com/coder/coder/v2/testutil"
17
17
)
18
18
19
+ func cachePromMetricName (metric string )string {
20
+ return "coderd_file_cache_" + metric
21
+ }
22
+
19
23
func TestConcurrency (t * testing.T ) {
20
24
t .Parallel ()
21
25
26
+ const fileSize = 10
22
27
emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
23
28
var fetches atomic.Int64
24
- c := newTestCache (func (_ context.Context ,_ uuid.UUID ) (fs.FS ,error ) {
29
+ reg := prometheus .NewRegistry ()
30
+ c := New (func (_ context.Context ,_ uuid.UUID ) (cacheEntryValue ,error ) {
25
31
fetches .Add (1 )
26
32
// Wait long enough before returning to make sure that all of the goroutines
27
33
// will be waiting in line, ensuring that no one duplicated a fetch.
28
34
time .Sleep (testutil .IntervalMedium )
29
- return emptyFS ,nil
30
- })
35
+ return cacheEntryValue { FS : emptyFS , size : fileSize } ,nil
36
+ }, reg )
31
37
32
38
batches := 1000
33
39
groups := make ([]* errgroup.Group ,0 ,batches )
@@ -55,15 +61,29 @@ func TestConcurrency(t *testing.T) {
55
61
require .NoError (t ,g .Wait ())
56
62
}
57
63
require .Equal (t ,int64 (batches ),fetches .Load ())
64
+
65
+ // Verify all the counts & metrics are correct.
66
+ require .Equal (t ,batches ,c .Count ())
67
+ require .Equal (t ,batches * fileSize ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_current" ),nil ))
68
+ require .Equal (t ,batches * fileSize ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_total" ),nil ))
69
+ require .Equal (t ,batches ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_current" ),nil ))
70
+ require .Equal (t ,batches ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_files_total" ),nil ))
71
+ require .Equal (t ,batches * batchSize ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_file_refs_current" ),nil ))
72
+ require .Equal (t ,batches * batchSize ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_file_refs_total" ),nil ))
58
73
}
59
74
60
75
func TestRelease (t * testing.T ) {
61
76
t .Parallel ()
62
77
78
+ const fileSize = 10
63
79
emptyFS := afero .NewIOFS (afero .NewReadOnlyFs (afero .NewMemMapFs ()))
64
- c := newTestCache (func (_ context.Context ,_ uuid.UUID ) (fs.FS ,error ) {
65
- return emptyFS ,nil
66
- })
80
+ reg := prometheus .NewRegistry ()
81
+ c := New (func (_ context.Context ,_ uuid.UUID ) (cacheEntryValue ,error ) {
82
+ return cacheEntryValue {
83
+ FS :emptyFS ,
84
+ size :fileSize ,
85
+ },nil
86
+ },reg )
67
87
68
88
batches := 100
69
89
ids := make ([]uuid.UUID ,0 ,batches )
@@ -73,32 +93,60 @@ func TestRelease(t *testing.T) {
73
93
74
94
// Acquire a bunch of references
75
95
batchSize := 10
76
- for _ ,id := range ids {
77
- for range batchSize {
96
+ for openedIdx ,id := range ids {
97
+ for batchIdx := range batchSize {
78
98
it ,err := c .Acquire (t .Context (),id )
79
99
require .NoError (t ,err )
80
100
require .Equal (t ,emptyFS ,it )
101
+
102
+ // Each time a new file is opened, the metrics should be updated as so:
103
+ opened := openedIdx + 1
104
+ // Number of unique files opened is equal to the idx of the ids.
105
+ require .Equal (t ,opened ,c .Count ())
106
+ require .Equal (t ,opened ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_current" ),nil ))
107
+ // Current file size is unique files * file size.
108
+ require .Equal (t ,opened * fileSize ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_current" ),nil ))
109
+ // The number of refs is the current iteration of both loops.
110
+ require .Equal (t , ((opened - 1 )* batchSize )+ (batchIdx + 1 ),promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_file_refs_current" ),nil ))
81
111
}
82
112
}
83
113
84
114
// Make sure cache is fully loaded
85
115
require .Equal (t ,len (c .data ),batches )
86
116
87
117
// Now release all of the references
88
- for _ ,id := range ids {
89
- for range batchSize {
118
+ for closedIdx ,id := range ids {
119
+ stillOpen := len (ids )- closedIdx
120
+ for closingIdx := range batchSize {
90
121
c .Release (id )
122
+
123
+ // Each time a file is released, the metrics should decrement the file refs
124
+ require .Equal (t , (stillOpen * batchSize )- (closingIdx + 1 ),promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_file_refs_current" ),nil ))
125
+
126
+ closed := closingIdx + 1 == batchSize
127
+ if closed {
128
+ continue
129
+ }
130
+
131
+ // File ref still exists, so the counts should not change yet.
132
+ require .Equal (t ,stillOpen ,c .Count ())
133
+ require .Equal (t ,stillOpen ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_current" ),nil ))
134
+ require .Equal (t ,stillOpen * fileSize ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_current" ),nil ))
91
135
}
92
136
}
93
137
94
138
// ...and make sure that the cache has emptied itself.
95
139
require .Equal (t ,len (c .data ),0 )
96
- }
97
140
98
- func newTestCache (fetcher func (context.Context , uuid.UUID ) (fs.FS ,error ))Cache {
99
- return Cache {
100
- lock : sync.Mutex {},
101
- data :make (map [uuid.UUID ]* cacheEntry ),
102
- fetcher :fetcher ,
103
- }
141
+ // Verify all the counts & metrics are correct.
142
+ // All existing files are closed
143
+ require .Equal (t ,0 ,c .Count ())
144
+ require .Equal (t ,0 ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_current" ),nil ))
145
+ require .Equal (t ,0 ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_files_current" ),nil ))
146
+ require .Equal (t ,0 ,promhelp .GaugeValue (t ,reg ,cachePromMetricName ("open_file_refs_current" ),nil ))
147
+
148
+ // Total counts remain
149
+ require .Equal (t ,batches * fileSize ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_files_size_bytes_total" ),nil ))
150
+ require .Equal (t ,batches ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_files_total" ),nil ))
151
+ require .Equal (t ,batches * batchSize ,promhelp .CounterValue (t ,reg ,cachePromMetricName ("open_file_refs_total" ),nil ))
104
152
}