forked from containers/storage
-
Notifications
You must be signed in to change notification settings - Fork 0
/
store.go
3845 lines (3465 loc) · 120 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package storage
import (
_ "embed"
"encoding/base64"
"errors"
"fmt"
"io"
"maps"
"os"
"path/filepath"
"reflect"
"slices"
"strings"
"sync"
"syscall"
"time"
// register all of the built-in drivers
_ "github.com/containers/storage/drivers/register"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringutils"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
)
type updateNameOperation int
const (
setNames updateNameOperation = iota
addNames
removeNames
)
const (
volatileFlag = "Volatile"
mountLabelFlag = "MountLabel"
processLabelFlag = "ProcessLabel"
mountOptsFlag = "MountOpts"
)
var (
stores []*store
storesLock sync.Mutex
)
// roMetadataStore wraps a method for reading metadata associated with an ID.
type roMetadataStore interface {
// Metadata reads metadata associated with an item with the specified ID.
Metadata(id string) (string, error)
}
// rwMetadataStore wraps a method for setting metadata associated with an ID.
type rwMetadataStore interface {
// SetMetadata updates the metadata associated with the item with the specified ID.
SetMetadata(id, metadata string) error
}
// metadataStore wraps up methods for getting and setting metadata associated with IDs.
type metadataStore interface {
roMetadataStore
rwMetadataStore
}
// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
type ApplyStagedLayerOptions struct {
ID string // Mandatory
ParentLayer string // Optional
Names []string // Optional
MountLabel string // Optional
Writeable bool // Optional
LayerOptions *LayerOptions // Optional
DiffOutput *drivers.DriverWithDifferOutput // Mandatory
DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
}
// MultiListOptions contains options to pass to MultiList
type MultiListOptions struct {
Images bool // if true, Images will be listed in the result
Layers bool // if true, layers will be listed in the result
Containers bool // if true, containers will be listed in the result
}
// MultiListResult contains slices of Images, Layers or Containers listed by MultiList method
type MultiListResult struct {
Images []Image
Layers []Layer
Containers []Container
}
// An roBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type roBigDataStore interface {
// BigData retrieves a (potentially large) piece of data associated with
// this ID, if it has previously been set.
BigData(id, key string) ([]byte, error)
// BigDataSize retrieves the size of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A rwImageBigDataStore wraps up how we store big-data associated with images.
type rwImageBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
// Pass github.com/containers/image/manifest.Digest as digestManifest
// to allow ByDigest to find images by their correct digests.
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
// A containerBigDataStore wraps up how we store big-data associated with containers.
type containerBigDataStore interface {
roBigDataStore
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
}
// A roLayerBigDataStore wraps up how we store RO big-data associated with layers.
type roLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
BigData(id, key string) (io.ReadCloser, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A rwLayerBigDataStore wraps up how we store big-data associated with layers.
type rwLayerBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data io.Reader) error
}
// A flaggableStore can have flags set and cleared on items which it manages.
type flaggableStore interface {
// ClearFlag removes a named flag from an item in the store.
ClearFlag(id string, flag string) error
// SetFlag sets a named flag and its value on an item in the store.
SetFlag(id string, flag string, value interface{}) error
}
type StoreOptions = types.StoreOptions
// Store wraps up the various types of file-based stores that we use into a
// singleton object that initializes and manages them all together.
type Store interface {
// RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
ImageStore() string
TransientStore() bool
GraphDriverName() string
GraphOptions() []string
PullOptions() map[string]string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
// GraphDriver obtains and returns a handle to the graph Driver object used
// by the Store.
GraphDriver() (drivers.Driver, error)
// CreateLayer creates a new layer in the underlying storage driver,
// optionally having the specified ID (one will be assigned if none is
// specified), with the specified layer (or no layer) as its parent,
// and with optional names. (The writeable flag is ignored.)
CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error)
// PutLayer combines the functions of CreateLayer and ApplyDiff,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error)
// CreateImage creates a new image, optionally with the specified ID
// (one will be assigned if none is specified), with optional names,
// referring to a specified image, and with optional metadata. An
// image is a record which associates the ID of a layer with a
// additional bookkeeping information which the library stores for the
// convenience of its caller.
CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error)
// CreateContainer creates a new container, optionally with the
// specified ID (one will be assigned if none is specified), with
// optional names, using the specified image's top layer as the basis
// for the container's layer, and assigning the specified ID to that
// layer (one will be created if none is specified). A container is a
// layer which is associated with additional bookkeeping information
// which the library stores for the convenience of its caller.
CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
// Metadata retrieves the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to).
Metadata(id string) (string, error)
// SetMetadata updates the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to) to match
// the specified value. The metadata value can be retrieved at any
// time using Metadata, or using Layer, Image, or Container and reading
// the object directly.
SetMetadata(id, metadata string) error
// Exists checks if there is a layer, image, or container which has the
// passed-in ID or name.
Exists(id string) bool
// Status asks for a status report, in the form of key-value pairs,
// from the underlying storage driver. The contents vary from driver
// to driver.
Status() ([][2]string, error)
// Delete removes the layer, image, or container which has the
// passed-in ID or name. Note that no safety checks are performed, so
// this can leave images with references to layers which do not exist,
// and layers with references to parents which no longer exist.
Delete(id string) error
// DeleteLayer attempts to remove the specified layer. If the layer is the
// parent of any other layer, or is referred to by any images, it will return
// an error.
DeleteLayer(id string) error
// DeleteImage removes the specified image if it is not referred to by
// any containers. If its top layer is then no longer referred to by
// any other images and is not the parent of any other layers, its top
// layer will be removed. If that layer's parent is no longer referred
// to by any other images and is not the parent of any other layers,
// then it, too, will be removed. This procedure will be repeated
// until a layer which should not be removed, or the base layer, is
// reached, at which point the list of removed layers is returned. If
// the commit argument is false, the image and layers are not removed,
// but the list of layers which would be removed is still returned.
DeleteImage(id string, commit bool) (layers []string, err error)
// DeleteContainer removes the specified container and its layer. If
// there is no matching container, or if the container exists but its
// layer does not, an error will be returned.
DeleteContainer(id string) error
// Wipe removes all known layers, images, and containers.
Wipe() error
// MountImage mounts an image to temp directory and returns the mount point.
// MountImage allows caller to mount an image. Images will always
// be mounted read/only
MountImage(id string, mountOptions []string, mountLabel string) (string, error)
// Unmount attempts to unmount an image, given an ID.
// Returns whether or not the layer is still mounted.
// WARNING: The return value may already be obsolete by the time it is available
// to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
UnmountImage(id string, force bool) (bool, error)
// Mount attempts to mount a layer, image, or container for access, and
// returns the pathname if it succeeds.
// Note if the mountLabel == "", the default label for the container
// will be used.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
Mount(id, mountLabel string) (string, error)
// Unmount attempts to unmount a layer, image, or container, given an ID, a
// name, or a mount path. Returns whether or not the layer is still mounted.
// WARNING: The return value may already be obsolete by the time it is available
// to the caller, so it can be used for heuristic sanity checks at best. It should almost always be ignored.
Unmount(id string, force bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
//
// WARNING: This value might already be obsolete by the time it is returned;
// In situations where concurrent mount/unmount attempts can happen, this field
// should not be used for any decisions, maybe apart from heuristic user warnings.
Mounted(id string) (int, error)
// Changes returns a summary of the changes which would need to be made
// to one layer to make its contents the same as a second layer. If
// the first layer is not specified, the second layer's parent is
// assumed. Each Change structure contains a Path relative to the
// layer's root directory, and a Kind which is either ChangeAdd,
// ChangeModify, or ChangeDelete.
Changes(from, to string) ([]archive.Change, error)
// DiffSize returns a count of the size of the tarstream which would
// specify the changes returned by Changes.
DiffSize(from, to string) (int64, error)
// Diff returns the tarstream which would specify the changes returned
// by Changes. If options are passed in, they can override default
// behaviors.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// ApplyDiff applies a tarstream to a layer. Information about the
// tarstream is cached with the layer. Typically, a layer which is
// populated using a tarstream will be expected to not be modified in
// any other way, either before or after the diff is applied.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
// ApplyDiffWithDiffer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
// successfully applied with ApplyStagedLayer.
// Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// PrepareStagedLayer applies a diff to a layer.
// It is the caller responsibility to clean the staging directory if it is not
// successfully applied with ApplyStagedLayer.
PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
// ApplyStagedLayer combines the functions of creating a layer and using the staging
// directory to populate it.
// It marks the layer for automatic removal if applying the diff fails for any reason.
ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
// CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
// DifferTarget gets the path to the differ target.
DifferTarget(id string) (string, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByTOCDigest returns a slice of the layers with the
// specified TOC digest value recorded for them.
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
// LayerParentOwners returns the UIDs and GIDs of owners of parents of
// the layer's mountpoint for which the layer's UID and GID maps (if
// any are defined) don't contain corresponding IDs.
LayerParentOwners(id string) ([]int, []int, error)
// Layers returns a list of the currently known layers.
Layers() ([]Layer, error)
// Images returns a list of the currently known images.
Images() ([]Image, error)
// Containers returns a list of the currently known containers.
Containers() ([]Container, error)
// Names returns the list of names for a layer, image, or container.
Names(id string) ([]string, error)
// Free removes the store from the list of stores
Free()
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
// Deprecated: Prone to race conditions, suggested alternatives are `AddNames` and `RemoveNames`.
SetNames(id string, names []string) error
// AddNames adds the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
AddNames(id string, names []string) error
// RemoveNames removes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
RemoveNames(id string, names []string) error
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
// ImageBigData retrieves a (possibly large) chunk of named data
// associated with an image.
ImageBigData(id, key string) ([]byte, error)
// ImageBigDataSize retrieves the size of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data
// associated with an image. Pass
// github.com/containers/image/manifest.Digest as digestManifest to
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageDirectory returns a path of a directory which the caller can
// use to store data, specific to the image, which the library does not
// directly manage. The directory will be deleted when the image is
// deleted.
ImageDirectory(id string) (string, error)
// ImageRunDirectory returns a path of a directory which the caller can
// use to store data, specific to the image, which the library does not
// directly manage. The directory will be deleted when the host system
// is restarted.
ImageRunDirectory(id string) (string, error)
// ListLayerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a layer.
ListLayerBigData(id string) ([]string, error)
// LayerBigData retrieves a (possibly large) chunk of named data
// associated with a layer.
LayerBigData(id, key string) (io.ReadCloser, error)
// SetLayerBigData stores a (possibly large) chunk of named data
// associated with a layer.
SetLayerBigData(id, key string, data io.Reader) error
// ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error)
// ListContainerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a container.
ListContainerBigData(id string) ([]string, error)
// ContainerBigData retrieves a (possibly large) chunk of named data
// associated with a container.
ContainerBigData(id, key string) ([]byte, error)
// ContainerBigDataSize retrieves the size of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
// ContainerSize computes the size of the container's layer and ancillary
// data. Warning: this is a potentially expensive operation.
ContainerSize(id string) (int64, error)
// Layer returns a specific layer.
Layer(id string) (*Layer, error)
// Image returns a specific image.
Image(id string) (*Image, error)
// ImagesByTopLayer returns a list of images which reference the specified
// layer as their top layer. They will have different IDs and names
// and may have different metadata, big data items, and flags.
ImagesByTopLayer(id string) ([]*Image, error)
// ImagesByDigest returns a list of images which contain a big data item
// named ImageDigestBigDataKey whose contents have the specified digest.
ImagesByDigest(d digest.Digest) ([]*Image, error)
// Container returns a specific container.
Container(id string) (*Container, error)
// ContainerByLayer returns a specific container based on its layer ID or
// name.
ContainerByLayer(id string) (*Container, error)
// ContainerDirectory returns a path of a directory which the caller
// can use to store data, specific to the container, which the library
// does not directly manage. The directory will be deleted when the
// container is deleted.
ContainerDirectory(id string) (string, error)
// SetContainerDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// directory.
SetContainerDirectoryFile(id, file string, data []byte) error
// FromContainerDirectory is a convenience function which reads
// the contents of the specified file relative to the container's
// directory.
FromContainerDirectory(id, file string) ([]byte, error)
// ContainerRunDirectory returns a path of a directory which the
// caller can use to store data, specific to the container, which the
// library does not directly manage. The directory will be deleted
// when the host system is restarted.
ContainerRunDirectory(id string) (string, error)
// SetContainerRunDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// run directory.
SetContainerRunDirectoryFile(id, file string, data []byte) error
// FromContainerRunDirectory is a convenience function which reads
// the contents of the specified file relative to the container's run
// directory.
FromContainerRunDirectory(id, file string) ([]byte, error)
// ContainerParentOwners returns the UIDs and GIDs of owners of parents
// of the container's layer's mountpoint for which the layer's UID and
// GID maps (if any are defined) don't contain corresponding IDs.
ContainerParentOwners(id string) ([]int, []int, error)
// Lookup returns the ID of a layer, image, or container with the specified
// name or ID.
Lookup(name string) (string, error)
// Shutdown attempts to free any kernel resources which are being used
// by the underlying driver. If "force" is true, any mounted (i.e., in
// use) layers are unmounted beforehand. If "force" is not true, then
// layers being in use is considered to be an error condition. A list
// of still-mounted layers is returned along with possible errors.
Shutdown(force bool) (layers []string, err error)
// Version returns version information, in the form of key-value pairs, from
// the storage package.
Version() ([][2]string, error)
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
// LayerFromAdditionalLayerStore searches the additional layer store and returns an object
// which can create a layer with the specified TOC digest associated with the specified image
// reference. Note that this hasn't been stored to this store yet: the actual creation of
// a usable layer is done by calling the returned object's PutAs() method. After creating
// a layer, the caller must then call the object's Release() method to free any temporary
// resources which were allocated for the object by this method or the object's PutAs()
// method.
// This API is experimental and can be changed without bumping the major version number.
LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error)
// Tries to clean up remainders of previous containers or layers that are not
// references in the json files. These can happen in the case of unclean
// shutdowns or regular restarts in transient store mode.
GarbageCollect() error
// Check returns a report of things that look wrong in the store.
Check(options *CheckOptions) (CheckReport, error)
// Repair attempts to remediate problems mentioned in the CheckReport,
// usually by deleting layers and images which are damaged. If the
// right options are set, it will remove containers as well.
Repair(report CheckReport, options *RepairOptions) []error
// MultiList returns a MultiListResult structure that contains layer, image, or container
// extracts according to the values in MultiListOptions.
// MultiList returns consistent values as of a single point in time.
// WARNING: The values may already be out of date by the time they are returned to the caller.
MultiList(MultiListOptions) (MultiListResult, error)
}
// AdditionalLayer represents a layer that is contained in the additional layer store
// This API is experimental and can be changed without bumping the major version number.
type AdditionalLayer interface {
// PutAs creates layer based on this handler, using diff contents from the additional
// layer store.
PutAs(id, parent string, names []string) (*Layer, error)
// TOCDigest returns the digest of TOC of this layer. Returns "" if unknown.
TOCDigest() digest.Digest
// CompressedSize returns the compressed size of this layer
CompressedSize() int64
// Release tells the additional layer store that we don't use this handler.
Release()
}
type AutoUserNsOptions = types.AutoUserNsOptions
type IDMappingOptions = types.IDMappingOptions
// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods.
type LayerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this layer. If nothing is specified, the layer will
// inherit settings from its parent layer or, if it has no parent
// layer, the Store object.
types.IDMappingOptions
// TemplateLayer is the ID of a layer whose contents will be used to
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
// OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
// provided along with these LayerOptions, and reliably known by the caller.
// The digest might not be exactly the digest of the provided tarstream
// (e.g. the digest might be of a compressed representation, while providing
// an uncompressed one); in that case the caller is responsible for the two matching.
// Use the default "" if this fields is not applicable or the value is not known.
OriginalDigest digest.Digest
// OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
// to OriginalDigest.
// If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
// not the tarstream.
// Use nil if not applicable or not known.
OriginalSize *int64
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
// of the tarstream (diff), if one is provided along with these LayerOptions,
// and reliably known by the caller.
// Use the default "" if this fields is not applicable or the value is not known.
UncompressedDigest digest.Digest
// True is the layer info can be treated as volatile
Volatile bool
// BigData is a set of items which should be stored with the layer.
BigData []LayerBigDataOption
// Flags is a set of named flags and their values to store with the layer.
// Currently these can only be set when the layer record is created, but that
// could change in the future.
Flags map[string]interface{}
}
type LayerBigDataOption struct {
Key string
Data io.Reader
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest
// Metadata is caller-specified metadata associated with the layer.
Metadata string
// BigData is a set of items which should be stored with the image.
BigData []ImageBigDataOption
// NamesHistory is used for guessing for what this image was named when a container was created based
// on it, but it no longer has any names.
NamesHistory []string
// Flags is a set of named flags and their values to store with the image. Currently these can only
// be set when the image record is created, but that could change in the future.
Flags map[string]interface{}
}
type ImageBigDataOption struct {
Key string
Data []byte
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
type ContainerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this container's layer. If nothing is specified, the
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
types.IDMappingOptions
LabelOpts []string
// Flags is a set of named flags and their values to store with the container.
// Currently these can only be set when the container record is created, but that
// could change in the future.
Flags map[string]interface{}
MountOpts []string
Volatile bool
StorageOpt map[string]string
// Metadata is caller-specified metadata associated with the container.
Metadata string
// BigData is a set of items which should be stored for the container.
BigData []ContainerBigDataOption
}
type ContainerBigDataOption struct {
Key string
Data []byte
}
type store struct {
// # Locking hierarchy:
// These locks do not all need to be held simultaneously, but if some code does need to lock more than one, it MUST do so in this order:
// - graphLock
// - layerStore.start{Reading,Writing}
// - roLayerStores[].startReading (in the order of the items of the roLayerStores array)
// - imageStore.start{Reading,Writing}
// - roImageStores[].startReading (in the order of the items of the roImageStores array)
// - containerStore.start{Reading,Writing}
// The following fields are only set when constructing store, and must never be modified afterwards.
// They are safe to access without any other locking.
runRoot string
graphDriverName string // Initially set to the user-requested value, possibly ""; updated during store construction, and does not change afterwards.
graphDriverPriority []string
// graphLock:
// - Ensures that we always reload graphDriver, and the primary layer store, after any process does store.Shutdown. This is necessary
// because (??) the Shutdown may forcibly unmount and clean up, affecting graph driver state in a way only a graph driver
// and layer store reinitialization can notice.
// - Ensures that store.Shutdown is exclusive with mount operations. This is necessary at because some
// graph drivers call mount.MakePrivate() during initialization, the mount operations require that, and the driver’s Cleanup() method
// may undo that. So, holding graphLock is required throughout the duration of Shutdown(), and the duration of any mount
// (but not unmount) calls.
// - Within this store object, protects access to some related in-memory state.
graphLock *lockfile.LockFile
usernsLock *lockfile.LockFile
graphRoot string
graphOptions []string
imageStoreDir string
pullOptions map[string]string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
autoNsMinSize uint32
autoNsMaxSize uint32
imageStore rwImageStore
rwImageStores []rwImageStore
roImageStores []roImageStore
containerStore rwContainerStore
digestLockRoot string
disableVolatile bool
transientStore bool
// The following fields can only be accessed with graphLock held.
graphLockLastWrite lockfile.LastWrite
// FIXME: This field is only set when holding graphLock, but locking rules of the driver
// interface itself are not documented here. It is extensively used without holding graphLock.
graphDriver drivers.Driver
layerStoreUseGetters rwLayerStore // Almost all users should use the provided accessors instead of accessing this field directly.
roLayerStoresUseGetters []roLayerStore // Almost all users should use the provided accessors instead of accessing this field directly.
// FIXME: The following fields need locking, and don’t have it.
additionalUIDs *idSet // Set by getAvailableIDs()
additionalGIDs *idSet // Set by getAvailableIDs()
}
// GetStore attempts to find an already-created Store object matching the
// specified location and graph driver, and if it can't, it creates and
// initializes a new Store object, and the underlying storage that it controls.
//
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use
// - `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
//
// if reexec.Init() {
// return
// }
func GetStore(options types.StoreOptions) (Store, error) {
defaultOpts, err := types.Options()
if err != nil {
return nil, err
}
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = defaultOpts
}
if options.GraphRoot != "" {
dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, err
}
options.GraphRoot = dir
}
if options.RunRoot != "" {
dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, err
}
options.RunRoot = dir
}
storesLock.Lock()
defer storesLock.Unlock()
// return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first
for _, s := range stores {
if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
// if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
switch {
case options.RunRoot == "" && options.GraphRoot == "":
return nil, fmt.Errorf("no storage runroot or graphroot specified: %w", ErrIncompleteOptions)
case options.GraphRoot == "":
options.GraphRoot = defaultOpts.GraphRoot
case options.RunRoot == "":
options.RunRoot = defaultOpts.RunRoot
}
if err := os.MkdirAll(options.RunRoot, 0o700); err != nil {
return nil, err
}
if err := os.MkdirAll(options.GraphRoot, 0o700); err != nil {
return nil, err
}
if options.ImageStore != "" {
if err := os.MkdirAll(options.ImageStore, 0o700); err != nil {
return nil, err
}
}
if err := os.MkdirAll(filepath.Join(options.GraphRoot, options.GraphDriverName), 0o700); err != nil {
return nil, err
}
if options.ImageStore != "" {
if err := os.MkdirAll(filepath.Join(options.ImageStore, options.GraphDriverName), 0o700); err != nil {
return nil, err
}
}
graphLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
usernsLock, err := lockfile.GetLockFile(filepath.Join(options.GraphRoot, "userns.lock"))
if err != nil {
return nil, err
}
autoNsMinSize := options.AutoNsMinSize
autoNsMaxSize := options.AutoNsMaxSize
if autoNsMinSize == 0 {
autoNsMinSize = AutoUserNsMinSize
}
if autoNsMaxSize == 0 {
autoNsMaxSize = AutoUserNsMaxSize
}
s := &store{
runRoot: options.RunRoot,
graphDriverName: options.GraphDriverName,
graphDriverPriority: options.GraphDriverPriority,
graphLock: graphLock,
usernsLock: usernsLock,
graphRoot: options.GraphRoot,
graphOptions: options.GraphDriverOptions,
imageStoreDir: options.ImageStore,
pullOptions: options.PullOptions,
uidMap: copySlicePreferringNil(options.UIDMap),
gidMap: copySlicePreferringNil(options.GIDMap),
autoUsernsUser: options.RootAutoNsUser,
autoNsMinSize: autoNsMinSize,
autoNsMaxSize: autoNsMaxSize,
disableVolatile: options.DisableVolatile,
transientStore: options.TransientStore,
additionalUIDs: nil,
additionalGIDs: nil,
}
if err := s.load(); err != nil {
return nil, err
}
stores = append(stores, s)
return s, nil
}
func (s *store) RunRoot() string {
return s.runRoot
}
func (s *store) GraphDriverName() string {
return s.graphDriverName
}
func (s *store) GraphRoot() string {
return s.graphRoot
}
func (s *store) ImageStore() string {
return s.imageStoreDir
}
func (s *store) TransientStore() bool {
return s.transientStore
}
func (s *store) GraphOptions() []string {
return s.graphOptions
}
func (s *store) PullOptions() map[string]string {
cp := make(map[string]string, len(s.pullOptions))
maps.Copy(cp, s.pullOptions)
return cp
}
func (s *store) UIDMap() []idtools.IDMap {
return copySlicePreferringNil(s.uidMap)
}
func (s *store) GIDMap() []idtools.IDMap {
return copySlicePreferringNil(s.gidMap)
}
// This must only be called when constructing store; it writes to fields that are assumed to be constant after construction.
func (s *store) load() error {
var driver drivers.Driver
if err := func() error { // A scope for defer
s.graphLock.Lock()
defer s.graphLock.Unlock()
lastWrite, err := s.graphLock.GetLastWrite()
if err != nil {
return err
}
s.graphLockLastWrite = lastWrite
driver, err = s.createGraphDriverLocked()
if err != nil {
return err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
return nil
}(); err != nil {
return err
}
driverPrefix := s.graphDriverName + "-"
imgStoreRoot := s.imageStoreDir
if imgStoreRoot == "" {
imgStoreRoot = s.graphRoot
}
gipath := filepath.Join(imgStoreRoot, driverPrefix+"images")
if err := os.MkdirAll(gipath, 0o700); err != nil {
return err
}
imageStore, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = imageStore
s.rwImageStores = []rwImageStore{imageStore}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0o700); err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0o700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath, rcpath, s.transientStore)
if err != nil {
return err
}
s.containerStore = rcs
additionalImageStores := s.graphDriver.AdditionalImageStores()
if s.imageStoreDir != "" {
additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
}
for _, store := range additionalImageStores {
gipath := filepath.Join(store, driverPrefix+"images")
var ris roImageStore