mirror of https://github.com/k3d-io/k3d
fix: k3d version ls (now via crane) (#1286)
parent
c511600862
commit
2d5db00546
@ -0,0 +1,202 @@ |
|||||||
|
|
||||||
|
Apache License |
||||||
|
Version 2.0, January 2004 |
||||||
|
http://www.apache.org/licenses/ |
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||||
|
|
||||||
|
1. Definitions. |
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||||
|
the copyright owner that is granting the License. |
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||||
|
other entities that control, are controlled by, or are under common |
||||||
|
control with that entity. For the purposes of this definition, |
||||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||||
|
direction or management of such entity, whether by contract or |
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||||
|
exercising permissions granted by this License. |
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, |
||||||
|
including but not limited to software source code, documentation |
||||||
|
source, and configuration files. |
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical |
||||||
|
transformation or translation of a Source form, including but |
||||||
|
not limited to compiled object code, generated documentation, |
||||||
|
and conversions to other media types. |
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||||
|
Object form, made available under the License, as indicated by a |
||||||
|
copyright notice that is included in or attached to the work |
||||||
|
(an example is provided in the Appendix below). |
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||||
|
form, that is based on (or derived from) the Work and for which the |
||||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||||
|
of this License, Derivative Works shall not include works that remain |
||||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||||
|
the Work and Derivative Works thereof. |
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including |
||||||
|
the original version of the Work and any modifications or additions |
||||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||||
|
means any form of electronic, verbal, or written communication sent |
||||||
|
to the Licensor or its representatives, including but not limited to |
||||||
|
communication on electronic mailing lists, source code control systems, |
||||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||||
|
excluding communication that is conspicuously marked or otherwise |
||||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||||
|
subsequently incorporated within the Work. |
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||||
|
Work and such Derivative Works in Source or Object form. |
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
(except as stated in this section) patent license to make, have made, |
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||||
|
where such license applies only to those patent claims licensable |
||||||
|
by such Contributor that are necessarily infringed by their |
||||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||||
|
institute patent litigation against any entity (including a |
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||||
|
or a Contribution incorporated within the Work constitutes direct |
||||||
|
or contributory patent infringement, then any patent licenses |
||||||
|
granted to You under this License for that Work shall terminate |
||||||
|
as of the date such litigation is filed. |
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||||
|
Work or Derivative Works thereof in any medium, with or without |
||||||
|
modifications, and in Source or Object form, provided that You |
||||||
|
meet the following conditions: |
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or |
||||||
|
Derivative Works a copy of this License; and |
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices |
||||||
|
stating that You changed the files; and |
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||||
|
that You distribute, all copyright, patent, trademark, and |
||||||
|
attribution notices from the Source form of the Work, |
||||||
|
excluding those notices that do not pertain to any part of |
||||||
|
the Derivative Works; and |
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||||
|
distribution, then any Derivative Works that You distribute must |
||||||
|
include a readable copy of the attribution notices contained |
||||||
|
within such NOTICE file, excluding those notices that do not |
||||||
|
pertain to any part of the Derivative Works, in at least one |
||||||
|
of the following places: within a NOTICE text file distributed |
||||||
|
as part of the Derivative Works; within the Source form or |
||||||
|
documentation, if provided along with the Derivative Works; or, |
||||||
|
within a display generated by the Derivative Works, if and |
||||||
|
wherever such third-party notices normally appear. The contents |
||||||
|
of the NOTICE file are for informational purposes only and |
||||||
|
do not modify the License. You may add Your own attribution |
||||||
|
notices within Derivative Works that You distribute, alongside |
||||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||||
|
that such additional attribution notices cannot be construed |
||||||
|
as modifying the License. |
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and |
||||||
|
may provide additional or different license terms and conditions |
||||||
|
for use, reproduction, or distribution of Your modifications, or |
||||||
|
for any such Derivative Works as a whole, provided Your use, |
||||||
|
reproduction, and distribution of the Work otherwise complies with |
||||||
|
the conditions stated in this License. |
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||||
|
by You to the Licensor shall be under the terms and conditions of |
||||||
|
this License, without any additional terms or conditions. |
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||||
|
the terms of any separate license agreement you may have executed |
||||||
|
with Licensor regarding such Contributions. |
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||||
|
except as required for reasonable and customary use in describing the |
||||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||||
|
agreed to in writing, Licensor provides the Work (and each |
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||||
|
implied, including, without limitation, any warranties or conditions |
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||||
|
appropriateness of using or redistributing the Work and assume any |
||||||
|
risks associated with Your exercise of permissions under this License. |
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||||
|
whether in tort (including negligence), contract, or otherwise, |
||||||
|
unless required by applicable law (such as deliberate and grossly |
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||||
|
liable to You for damages, including any direct, indirect, special, |
||||||
|
incidental, or consequential damages of any character arising as a |
||||||
|
result of this License or out of the use or inability to use the |
||||||
|
Work (including but not limited to damages for loss of goodwill, |
||||||
|
work stoppage, computer failure or malfunction, or any and all |
||||||
|
other commercial damages or losses), even if such Contributor |
||||||
|
has been advised of the possibility of such damages. |
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||||
|
or other liability obligations and/or rights consistent with this |
||||||
|
License. However, in accepting such obligations, You may act only |
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||||
|
of any other Contributor, and only if You agree to indemnify, |
||||||
|
defend, and hold each Contributor harmless for any liability |
||||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||||
|
of your accepting any such warranty or additional liability. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work. |
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following |
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]" |
||||||
|
replaced with your own identifying information. (Don't include |
||||||
|
the brackets!) The text should be enclosed in the appropriate |
||||||
|
comment syntax for the file format. We also recommend that a |
||||||
|
file or class name and description of purpose be included on the |
||||||
|
same "printed page" as the copyright notice for easier |
||||||
|
identification within third-party archives. |
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner] |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
@ -0,0 +1,662 @@ |
|||||||
|
/* |
||||||
|
Copyright The containerd Authors. |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
/* |
||||||
|
Copyright 2019 The Go Authors. All rights reserved. |
||||||
|
Use of this source code is governed by a BSD-style |
||||||
|
license that can be found in the LICENSE file. |
||||||
|
*/ |
||||||
|
|
||||||
|
package estargz |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"compress/gzip" |
||||||
|
"context" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"path" |
||||||
|
"runtime" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/containerd/stargz-snapshotter/estargz/errorutil" |
||||||
|
"github.com/klauspost/compress/zstd" |
||||||
|
digest "github.com/opencontainers/go-digest" |
||||||
|
"golang.org/x/sync/errgroup" |
||||||
|
) |
||||||
|
|
||||||
|
type options struct { |
||||||
|
chunkSize int |
||||||
|
compressionLevel int |
||||||
|
prioritizedFiles []string |
||||||
|
missedPrioritizedFiles *[]string |
||||||
|
compression Compression |
||||||
|
ctx context.Context |
||||||
|
} |
||||||
|
|
||||||
|
type Option func(o *options) error |
||||||
|
|
||||||
|
// WithChunkSize option specifies the chunk size of eStargz blob to build.
|
||||||
|
func WithChunkSize(chunkSize int) Option { |
||||||
|
return func(o *options) error { |
||||||
|
o.chunkSize = chunkSize |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithCompressionLevel option specifies the gzip compression level.
|
||||||
|
// The default is gzip.BestCompression.
|
||||||
|
// See also: https://godoc.org/compress/gzip#pkg-constants
|
||||||
|
func WithCompressionLevel(level int) Option { |
||||||
|
return func(o *options) error { |
||||||
|
o.compressionLevel = level |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithPrioritizedFiles option specifies the list of prioritized files.
|
||||||
|
// These files must be complete paths that are absolute or relative to "/"
|
||||||
|
// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar"
|
||||||
|
// are treated as "/foo/bar".
|
||||||
|
func WithPrioritizedFiles(files []string) Option { |
||||||
|
return func(o *options) error { |
||||||
|
o.prioritizedFiles = files |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithAllowPrioritizeNotFound makes Build continue the execution even if some
|
||||||
|
// of prioritized files specified by WithPrioritizedFiles option aren't found
|
||||||
|
// in the input tar. Instead, this records all missed file names to the passed
|
||||||
|
// slice.
|
||||||
|
func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { |
||||||
|
return func(o *options) error { |
||||||
|
if missedFiles == nil { |
||||||
|
return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") |
||||||
|
} |
||||||
|
o.missedPrioritizedFiles = missedFiles |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithCompression specifies compression algorithm to be used.
|
||||||
|
// Default is gzip.
|
||||||
|
func WithCompression(compression Compression) Option { |
||||||
|
return func(o *options) error { |
||||||
|
o.compression = compression |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithContext specifies a context that can be used for clean canceleration.
|
||||||
|
func WithContext(ctx context.Context) Option { |
||||||
|
return func(o *options) error { |
||||||
|
o.ctx = ctx |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Blob is an eStargz blob.
|
||||||
|
type Blob struct { |
||||||
|
io.ReadCloser |
||||||
|
diffID digest.Digester |
||||||
|
tocDigest digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
// DiffID returns the digest of uncompressed blob.
|
||||||
|
// It is only valid to call DiffID after Close.
|
||||||
|
func (b *Blob) DiffID() digest.Digest { |
||||||
|
return b.diffID.Digest() |
||||||
|
} |
||||||
|
|
||||||
|
// TOCDigest returns the digest of uncompressed TOC JSON.
|
||||||
|
func (b *Blob) TOCDigest() digest.Digest { |
||||||
|
return b.tocDigest |
||||||
|
} |
||||||
|
|
||||||
|
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
|
||||||
|
// or plain tar) passed through the argument. If there are some prioritized files are listed in
|
||||||
|
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
|
||||||
|
// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several
|
||||||
|
// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs.
|
||||||
|
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { |
||||||
|
var opts options |
||||||
|
opts.compressionLevel = gzip.BestCompression // BestCompression by default
|
||||||
|
for _, o := range opt { |
||||||
|
if err := o(&opts); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
if opts.compression == nil { |
||||||
|
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) |
||||||
|
} |
||||||
|
layerFiles := newTempFiles() |
||||||
|
ctx := opts.ctx |
||||||
|
if ctx == nil { |
||||||
|
ctx = context.Background() |
||||||
|
} |
||||||
|
done := make(chan struct{}) |
||||||
|
defer close(done) |
||||||
|
go func() { |
||||||
|
select { |
||||||
|
case <-done: |
||||||
|
// nop
|
||||||
|
case <-ctx.Done(): |
||||||
|
layerFiles.CleanupAll() |
||||||
|
} |
||||||
|
}() |
||||||
|
defer func() { |
||||||
|
if rErr != nil { |
||||||
|
if err := layerFiles.CleanupAll(); err != nil { |
||||||
|
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) |
||||||
|
} |
||||||
|
} |
||||||
|
if cErr := ctx.Err(); cErr != nil { |
||||||
|
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) |
||||||
|
} |
||||||
|
}() |
||||||
|
tarBlob, err := decompressBlob(tarBlob, layerFiles) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) |
||||||
|
writers := make([]*Writer, len(tarParts)) |
||||||
|
payloads := make([]*os.File, len(tarParts)) |
||||||
|
var mu sync.Mutex |
||||||
|
var eg errgroup.Group |
||||||
|
for i, parts := range tarParts { |
||||||
|
i, parts := i, parts |
||||||
|
// builds verifiable stargz sub-blobs
|
||||||
|
eg.Go(func() error { |
||||||
|
esgzFile, err := layerFiles.TempFile("", "esgzdata") |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
sw := NewWriterWithCompressor(esgzFile, opts.compression) |
||||||
|
sw.ChunkSize = opts.chunkSize |
||||||
|
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
mu.Lock() |
||||||
|
writers[i] = sw |
||||||
|
payloads[i] = esgzFile |
||||||
|
mu.Unlock() |
||||||
|
return nil |
||||||
|
}) |
||||||
|
} |
||||||
|
if err := eg.Wait(); err != nil { |
||||||
|
rErr = err |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) |
||||||
|
if err != nil { |
||||||
|
rErr = err |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
var rs []io.Reader |
||||||
|
for _, p := range payloads { |
||||||
|
fs, err := fileSectionReader(p) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
rs = append(rs, fs) |
||||||
|
} |
||||||
|
diffID := digest.Canonical.Digester() |
||||||
|
pr, pw := io.Pipe() |
||||||
|
go func() { |
||||||
|
r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) |
||||||
|
if err != nil { |
||||||
|
pw.CloseWithError(err) |
||||||
|
return |
||||||
|
} |
||||||
|
defer r.Close() |
||||||
|
if _, err := io.Copy(diffID.Hash(), r); err != nil { |
||||||
|
pw.CloseWithError(err) |
||||||
|
return |
||||||
|
} |
||||||
|
pw.Close() |
||||||
|
}() |
||||||
|
return &Blob{ |
||||||
|
ReadCloser: readCloser{ |
||||||
|
Reader: pr, |
||||||
|
closeFunc: layerFiles.CleanupAll, |
||||||
|
}, |
||||||
|
tocDigest: tocDgst, |
||||||
|
diffID: diffID, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// closeWithCombine takes unclosed Writers and close them. This also returns the
|
||||||
|
// toc that combined all Writers into.
|
||||||
|
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
||||||
|
// combined into a single eStargz and tocAndFooter returned by this function can
|
||||||
|
// be appended at the tail of that combined blob.
|
||||||
|
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { |
||||||
|
if len(ws) == 0 { |
||||||
|
return nil, "", fmt.Errorf("at least one writer must be passed") |
||||||
|
} |
||||||
|
for _, w := range ws { |
||||||
|
if w.closed { |
||||||
|
return nil, "", fmt.Errorf("writer must be unclosed") |
||||||
|
} |
||||||
|
defer func(w *Writer) { w.closed = true }(w) |
||||||
|
if err := w.closeGz(); err != nil { |
||||||
|
return nil, "", err |
||||||
|
} |
||||||
|
if err := w.bw.Flush(); err != nil { |
||||||
|
return nil, "", err |
||||||
|
} |
||||||
|
} |
||||||
|
var ( |
||||||
|
mtoc = new(JTOC) |
||||||
|
currentOffset int64 |
||||||
|
) |
||||||
|
mtoc.Version = ws[0].toc.Version |
||||||
|
for _, w := range ws { |
||||||
|
for _, e := range w.toc.Entries { |
||||||
|
// Recalculate Offset of non-empty files/chunks
|
||||||
|
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { |
||||||
|
e.Offset += currentOffset |
||||||
|
} |
||||||
|
mtoc.Entries = append(mtoc.Entries, e) |
||||||
|
} |
||||||
|
if w.toc.Version > mtoc.Version { |
||||||
|
mtoc.Version = w.toc.Version |
||||||
|
} |
||||||
|
currentOffset += w.cw.n |
||||||
|
} |
||||||
|
|
||||||
|
return tocAndFooter(ws[0].compressor, mtoc, currentOffset) |
||||||
|
} |
||||||
|
|
||||||
|
func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { |
||||||
|
buf := new(bytes.Buffer) |
||||||
|
tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, "", err |
||||||
|
} |
||||||
|
return buf, tocDigest, nil |
||||||
|
} |
||||||
|
|
||||||
|
// divideEntries divides passed entries to the parts at least the number specified by the
|
||||||
|
// argument.
|
||||||
|
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { |
||||||
|
var estimatedSize int64 |
||||||
|
for _, e := range entries { |
||||||
|
estimatedSize += e.header.Size |
||||||
|
} |
||||||
|
unitSize := estimatedSize / int64(minPartsNum) |
||||||
|
var ( |
||||||
|
nextEnd = unitSize |
||||||
|
offset int64 |
||||||
|
) |
||||||
|
set = append(set, []*entry{}) |
||||||
|
for _, e := range entries { |
||||||
|
set[len(set)-1] = append(set[len(set)-1], e) |
||||||
|
offset += e.header.Size |
||||||
|
if offset > nextEnd { |
||||||
|
set = append(set, []*entry{}) |
||||||
|
nextEnd += unitSize |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
var errNotFound = errors.New("not found") |
||||||
|
|
||||||
|
// sortEntries reads the specified tar blob and returns a list of tar entries.
|
||||||
|
// If some of prioritized files are specified, the list starts from these
|
||||||
|
// files with keeping the order specified by the argument.
|
||||||
|
func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { |
||||||
|
|
||||||
|
// Import tar file.
|
||||||
|
intar, err := importTar(in) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to sort: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Sort the tar file respecting to the prioritized files list.
|
||||||
|
sorted := &tarFile{} |
||||||
|
for _, l := range prioritized { |
||||||
|
if err := moveRec(l, intar, sorted); err != nil { |
||||||
|
if errors.Is(err, errNotFound) && missedPrioritized != nil { |
||||||
|
*missedPrioritized = append(*missedPrioritized, l) |
||||||
|
continue // allow not found
|
||||||
|
} |
||||||
|
return nil, fmt.Errorf("failed to sort tar entries: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
if len(prioritized) == 0 { |
||||||
|
sorted.add(&entry{ |
||||||
|
header: &tar.Header{ |
||||||
|
Name: NoPrefetchLandmark, |
||||||
|
Typeflag: tar.TypeReg, |
||||||
|
Size: int64(len([]byte{landmarkContents})), |
||||||
|
}, |
||||||
|
payload: bytes.NewReader([]byte{landmarkContents}), |
||||||
|
}) |
||||||
|
} else { |
||||||
|
sorted.add(&entry{ |
||||||
|
header: &tar.Header{ |
||||||
|
Name: PrefetchLandmark, |
||||||
|
Typeflag: tar.TypeReg, |
||||||
|
Size: int64(len([]byte{landmarkContents})), |
||||||
|
}, |
||||||
|
payload: bytes.NewReader([]byte{landmarkContents}), |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
// Dump all entry and concatinate them.
|
||||||
|
return append(sorted.dump(), intar.dump()...), nil |
||||||
|
} |
||||||
|
|
||||||
|
// readerFromEntries returns a reader of tar archive that contains entries passed
|
||||||
|
// through the arguments.
|
||||||
|
func readerFromEntries(entries ...*entry) io.Reader { |
||||||
|
pr, pw := io.Pipe() |
||||||
|
go func() { |
||||||
|
tw := tar.NewWriter(pw) |
||||||
|
defer tw.Close() |
||||||
|
for _, entry := range entries { |
||||||
|
if err := tw.WriteHeader(entry.header); err != nil { |
||||||
|
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) |
||||||
|
return |
||||||
|
} |
||||||
|
if _, err := io.Copy(tw, entry.payload); err != nil { |
||||||
|
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
pw.Close() |
||||||
|
}() |
||||||
|
return pr |
||||||
|
} |
||||||
|
|
||||||
|
func importTar(in io.ReaderAt) (*tarFile, error) { |
||||||
|
tf := &tarFile{} |
||||||
|
pw, err := newCountReader(in) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to make position watcher: %w", err) |
||||||
|
} |
||||||
|
tr := tar.NewReader(pw) |
||||||
|
|
||||||
|
// Walk through all nodes.
|
||||||
|
for { |
||||||
|
// Fetch and parse next header.
|
||||||
|
h, err := tr.Next() |
||||||
|
if err != nil { |
||||||
|
if err == io.EOF { |
||||||
|
break |
||||||
|
} else { |
||||||
|
return nil, fmt.Errorf("failed to parse tar file, %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
switch cleanEntryName(h.Name) { |
||||||
|
case PrefetchLandmark, NoPrefetchLandmark: |
||||||
|
// Ignore existing landmark
|
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Add entry. If it already exists, replace it.
|
||||||
|
if _, ok := tf.get(h.Name); ok { |
||||||
|
tf.remove(h.Name) |
||||||
|
} |
||||||
|
tf.add(&entry{ |
||||||
|
header: h, |
||||||
|
payload: io.NewSectionReader(in, pw.currentPos(), h.Size), |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
return tf, nil |
||||||
|
} |
||||||
|
|
||||||
|
func moveRec(name string, in *tarFile, out *tarFile) error { |
||||||
|
name = cleanEntryName(name) |
||||||
|
if name == "" { // root directory. stop recursion.
|
||||||
|
if e, ok := in.get(name); ok { |
||||||
|
// entry of the root directory exists. we should move it as well.
|
||||||
|
// this case will occur if tar entries are prefixed with "./", "/", etc.
|
||||||
|
out.add(e) |
||||||
|
in.remove(name) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
_, okIn := in.get(name) |
||||||
|
_, okOut := out.get(name) |
||||||
|
if !okIn && !okOut { |
||||||
|
return fmt.Errorf("file: %q: %w", name, errNotFound) |
||||||
|
} |
||||||
|
|
||||||
|
parent, _ := path.Split(strings.TrimSuffix(name, "/")) |
||||||
|
if err := moveRec(parent, in, out); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { |
||||||
|
if err := moveRec(e.header.Linkname, in, out); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
if e, ok := in.get(name); ok { |
||||||
|
out.add(e) |
||||||
|
in.remove(name) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
type entry struct { |
||||||
|
header *tar.Header |
||||||
|
payload io.ReadSeeker |
||||||
|
} |
||||||
|
|
||||||
|
type tarFile struct { |
||||||
|
index map[string]*entry |
||||||
|
stream []*entry |
||||||
|
} |
||||||
|
|
||||||
|
func (f *tarFile) add(e *entry) { |
||||||
|
if f.index == nil { |
||||||
|
f.index = make(map[string]*entry) |
||||||
|
} |
||||||
|
f.index[cleanEntryName(e.header.Name)] = e |
||||||
|
f.stream = append(f.stream, e) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *tarFile) remove(name string) { |
||||||
|
name = cleanEntryName(name) |
||||||
|
if f.index != nil { |
||||||
|
delete(f.index, name) |
||||||
|
} |
||||||
|
var filtered []*entry |
||||||
|
for _, e := range f.stream { |
||||||
|
if cleanEntryName(e.header.Name) == name { |
||||||
|
continue |
||||||
|
} |
||||||
|
filtered = append(filtered, e) |
||||||
|
} |
||||||
|
f.stream = filtered |
||||||
|
} |
||||||
|
|
||||||
|
func (f *tarFile) get(name string) (e *entry, ok bool) { |
||||||
|
if f.index == nil { |
||||||
|
return nil, false |
||||||
|
} |
||||||
|
e, ok = f.index[cleanEntryName(name)] |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func (f *tarFile) dump() []*entry { |
||||||
|
return f.stream |
||||||
|
} |
||||||
|
|
||||||
|
type readCloser struct { |
||||||
|
io.Reader |
||||||
|
closeFunc func() error |
||||||
|
} |
||||||
|
|
||||||
|
func (rc readCloser) Close() error { |
||||||
|
return rc.closeFunc() |
||||||
|
} |
||||||
|
|
||||||
|
func fileSectionReader(file *os.File) (*io.SectionReader, error) { |
||||||
|
info, err := file.Stat() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return io.NewSectionReader(file, 0, info.Size()), nil |
||||||
|
} |
||||||
|
|
||||||
|
func newTempFiles() *tempFiles { |
||||||
|
return &tempFiles{} |
||||||
|
} |
||||||
|
|
||||||
|
type tempFiles struct { |
||||||
|
files []*os.File |
||||||
|
filesMu sync.Mutex |
||||||
|
cleanupOnce sync.Once |
||||||
|
} |
||||||
|
|
||||||
|
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { |
||||||
|
f, err := os.CreateTemp(dir, pattern) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
tf.filesMu.Lock() |
||||||
|
tf.files = append(tf.files, f) |
||||||
|
tf.filesMu.Unlock() |
||||||
|
return f, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (tf *tempFiles) CleanupAll() (err error) { |
||||||
|
tf.cleanupOnce.Do(func() { |
||||||
|
err = tf.cleanupAll() |
||||||
|
}) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func (tf *tempFiles) cleanupAll() error { |
||||||
|
tf.filesMu.Lock() |
||||||
|
defer tf.filesMu.Unlock() |
||||||
|
var allErr []error |
||||||
|
for _, f := range tf.files { |
||||||
|
if err := f.Close(); err != nil { |
||||||
|
allErr = append(allErr, err) |
||||||
|
} |
||||||
|
if err := os.Remove(f.Name()); err != nil { |
||||||
|
allErr = append(allErr, err) |
||||||
|
} |
||||||
|
} |
||||||
|
tf.files = nil |
||||||
|
return errorutil.Aggregate(allErr) |
||||||
|
} |
||||||
|
|
||||||
|
func newCountReader(r io.ReaderAt) (*countReader, error) { |
||||||
|
pos := int64(0) |
||||||
|
return &countReader{r: r, cPos: &pos}, nil |
||||||
|
} |
||||||
|
|
||||||
|
type countReader struct { |
||||||
|
r io.ReaderAt |
||||||
|
cPos *int64 |
||||||
|
|
||||||
|
mu sync.Mutex |
||||||
|
} |
||||||
|
|
||||||
|
func (cr *countReader) Read(p []byte) (int, error) { |
||||||
|
cr.mu.Lock() |
||||||
|
defer cr.mu.Unlock() |
||||||
|
|
||||||
|
n, err := cr.r.ReadAt(p, *cr.cPos) |
||||||
|
if err == nil { |
||||||
|
*cr.cPos += int64(n) |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
func (cr *countReader) Seek(offset int64, whence int) (int64, error) { |
||||||
|
cr.mu.Lock() |
||||||
|
defer cr.mu.Unlock() |
||||||
|
|
||||||
|
switch whence { |
||||||
|
default: |
||||||
|
return 0, fmt.Errorf("Unknown whence: %v", whence) |
||||||
|
case io.SeekStart: |
||||||
|
case io.SeekCurrent: |
||||||
|
offset += *cr.cPos |
||||||
|
case io.SeekEnd: |
||||||
|
return 0, fmt.Errorf("Unsupported whence: %v", whence) |
||||||
|
} |
||||||
|
|
||||||
|
if offset < 0 { |
||||||
|
return 0, fmt.Errorf("invalid offset") |
||||||
|
} |
||||||
|
*cr.cPos = offset |
||||||
|
return offset, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (cr *countReader) currentPos() int64 { |
||||||
|
cr.mu.Lock() |
||||||
|
defer cr.mu.Unlock() |
||||||
|
|
||||||
|
return *cr.cPos |
||||||
|
} |
||||||
|
|
||||||
|
func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { |
||||||
|
if org.Size() < 4 { |
||||||
|
return org, nil |
||||||
|
} |
||||||
|
src := make([]byte, 4) |
||||||
|
if _, err := org.Read(src); err != nil && err != io.EOF { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
var dR io.Reader |
||||||
|
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { |
||||||
|
// gzip
|
||||||
|
dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
defer dgR.Close() |
||||||
|
dR = io.Reader(dgR) |
||||||
|
} else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { |
||||||
|
// zstd
|
||||||
|
dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
defer dzR.Close() |
||||||
|
dR = io.Reader(dzR) |
||||||
|
} else { |
||||||
|
// uncompressed
|
||||||
|
return io.NewSectionReader(org, 0, org.Size()), nil |
||||||
|
} |
||||||
|
b, err := tmp.TempFile("", "uncompresseddata") |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if _, err := io.Copy(b, dR); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return fileSectionReader(b) |
||||||
|
} |
@ -0,0 +1,40 @@ |
|||||||
|
/* |
||||||
|
Copyright The containerd Authors. |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package errorutil |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
// Aggregate combines a list of errors into a single new error.
|
||||||
|
func Aggregate(errs []error) error { |
||||||
|
switch len(errs) { |
||||||
|
case 0: |
||||||
|
return nil |
||||||
|
case 1: |
||||||
|
return errs[0] |
||||||
|
default: |
||||||
|
points := make([]string, len(errs)+1) |
||||||
|
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) |
||||||
|
for i, err := range errs { |
||||||
|
points[i+1] = fmt.Sprintf("* %s", err) |
||||||
|
} |
||||||
|
return errors.New(strings.Join(points, "\n\t")) |
||||||
|
} |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,237 @@ |
|||||||
|
/* |
||||||
|
Copyright The containerd Authors. |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
/* |
||||||
|
Copyright 2019 The Go Authors. All rights reserved. |
||||||
|
Use of this source code is governed by a BSD-style |
||||||
|
license that can be found in the LICENSE file. |
||||||
|
*/ |
||||||
|
|
||||||
|
package estargz |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"compress/gzip" |
||||||
|
"encoding/binary" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"hash" |
||||||
|
"io" |
||||||
|
"strconv" |
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest" |
||||||
|
) |
||||||
|
|
||||||
|
type gzipCompression struct { |
||||||
|
*GzipCompressor |
||||||
|
*GzipDecompressor |
||||||
|
} |
||||||
|
|
||||||
|
func newGzipCompressionWithLevel(level int) Compression { |
||||||
|
return &gzipCompression{ |
||||||
|
&GzipCompressor{level}, |
||||||
|
&GzipDecompressor{}, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func NewGzipCompressor() *GzipCompressor { |
||||||
|
return &GzipCompressor{gzip.BestCompression} |
||||||
|
} |
||||||
|
|
||||||
|
func NewGzipCompressorWithLevel(level int) *GzipCompressor { |
||||||
|
return &GzipCompressor{level} |
||||||
|
} |
||||||
|
|
||||||
|
type GzipCompressor struct { |
||||||
|
compressionLevel int |
||||||
|
} |
||||||
|
|
||||||
|
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { |
||||||
|
return gzip.NewWriterLevel(w, gc.compressionLevel) |
||||||
|
} |
||||||
|
|
||||||
|
func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { |
||||||
|
tocJSON, err := json.MarshalIndent(toc, "", "\t") |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) |
||||||
|
gw := io.Writer(gz) |
||||||
|
if diffHash != nil { |
||||||
|
gw = io.MultiWriter(gz, diffHash) |
||||||
|
} |
||||||
|
tw := tar.NewWriter(gw) |
||||||
|
if err := tw.WriteHeader(&tar.Header{ |
||||||
|
Typeflag: tar.TypeReg, |
||||||
|
Name: TOCTarName, |
||||||
|
Size: int64(len(tocJSON)), |
||||||
|
}); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
if _, err := tw.Write(tocJSON); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
|
||||||
|
if err := tw.Close(); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
if err := gz.Close(); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
if _, err := w.Write(gzipFooterBytes(off)); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
return digest.FromBytes(tocJSON), nil |
||||||
|
} |
||||||
|
|
||||||
|
// gzipFooterBytes returns the 51 bytes footer.
|
||||||
|
func gzipFooterBytes(tocOff int64) []byte { |
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) |
||||||
|
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
|
||||||
|
|
||||||
|
// Extra header indicating the offset of TOCJSON
|
||||||
|
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
|
||||||
|
header := make([]byte, 4) |
||||||
|
header[0], header[1] = 'S', 'G' |
||||||
|
subfield := fmt.Sprintf("%016xSTARGZ", tocOff) |
||||||
|
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
|
||||||
|
gz.Header.Extra = append(header, []byte(subfield)...) |
||||||
|
gz.Close() |
||||||
|
if buf.Len() != FooterSize { |
||||||
|
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) |
||||||
|
} |
||||||
|
return buf.Bytes() |
||||||
|
} |
||||||
|
|
||||||
|
type GzipDecompressor struct{} |
||||||
|
|
||||||
|
func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { |
||||||
|
return gzip.NewReader(r) |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { |
||||||
|
return parseTOCEStargz(r) |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { |
||||||
|
if len(p) != FooterSize { |
||||||
|
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) |
||||||
|
} |
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(p)) |
||||||
|
if err != nil { |
||||||
|
return 0, 0, 0, err |
||||||
|
} |
||||||
|
defer zr.Close() |
||||||
|
extra := zr.Header.Extra |
||||||
|
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] |
||||||
|
if si1 != 'S' || si2 != 'G' { |
||||||
|
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) |
||||||
|
} |
||||||
|
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { |
||||||
|
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) |
||||||
|
} |
||||||
|
if string(subfield[16:]) != "STARGZ" { |
||||||
|
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") |
||||||
|
} |
||||||
|
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) |
||||||
|
if err != nil { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) |
||||||
|
} |
||||||
|
return tocOffset, tocOffset, 0, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *GzipDecompressor) FooterSize() int64 { |
||||||
|
return FooterSize |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { |
||||||
|
return decompressTOCEStargz(r) |
||||||
|
} |
||||||
|
|
||||||
|
type LegacyGzipDecompressor struct{} |
||||||
|
|
||||||
|
func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { |
||||||
|
return gzip.NewReader(r) |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { |
||||||
|
return parseTOCEStargz(r) |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { |
||||||
|
if len(p) != legacyFooterSize { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) |
||||||
|
} |
||||||
|
zr, err := gzip.NewReader(bytes.NewReader(p)) |
||||||
|
if err != nil { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) |
||||||
|
} |
||||||
|
defer zr.Close() |
||||||
|
extra := zr.Header.Extra |
||||||
|
if len(extra) != 16+len("STARGZ") { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") |
||||||
|
} |
||||||
|
if string(extra[16:]) != "STARGZ" { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") |
||||||
|
} |
||||||
|
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) |
||||||
|
if err != nil { |
||||||
|
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) |
||||||
|
} |
||||||
|
return tocOffset, tocOffset, 0, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *LegacyGzipDecompressor) FooterSize() int64 { |
||||||
|
return legacyFooterSize |
||||||
|
} |
||||||
|
|
||||||
|
func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { |
||||||
|
return decompressTOCEStargz(r) |
||||||
|
} |
||||||
|
|
||||||
|
func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { |
||||||
|
tr, err := decompressTOCEStargz(r) |
||||||
|
if err != nil { |
||||||
|
return nil, "", err |
||||||
|
} |
||||||
|
dgstr := digest.Canonical.Digester() |
||||||
|
toc = new(JTOC) |
||||||
|
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { |
||||||
|
return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) |
||||||
|
} |
||||||
|
if err := tr.Close(); err != nil { |
||||||
|
return nil, "", err |
||||||
|
} |
||||||
|
return toc, dgstr.Digest(), nil |
||||||
|
} |
||||||
|
|
||||||
|
func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { |
||||||
|
zr, err := gzip.NewReader(r) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("malformed TOC gzip header: %v", err) |
||||||
|
} |
||||||
|
zr.Multistream(false) |
||||||
|
tr := tar.NewReader(zr) |
||||||
|
h, err := tr.Next() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) |
||||||
|
} |
||||||
|
if h.Name != TOCTarName { |
||||||
|
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) |
||||||
|
} |
||||||
|
return readCloser{tr, zr.Close}, nil |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,317 @@ |
|||||||
|
/* |
||||||
|
Copyright The containerd Authors. |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
/* |
||||||
|
Copyright 2019 The Go Authors. All rights reserved. |
||||||
|
Use of this source code is governed by a BSD-style |
||||||
|
license that can be found in the LICENSE file. |
||||||
|
*/ |
||||||
|
|
||||||
|
package estargz |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"hash" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"path" |
||||||
|
"time" |
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
// TOCTarName is the name of the JSON file in the tar archive in the
|
||||||
|
// table of contents gzip stream.
|
||||||
|
TOCTarName = "stargz.index.json" |
||||||
|
|
||||||
|
// FooterSize is the number of bytes in the footer
|
||||||
|
//
|
||||||
|
// The footer is an empty gzip stream with no compression and an Extra
|
||||||
|
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
|
||||||
|
// number is the offset to the gzip stream of JSON TOC.
|
||||||
|
//
|
||||||
|
// 51 comes from:
|
||||||
|
//
|
||||||
|
// 10 bytes gzip header
|
||||||
|
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
|
||||||
|
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
|
||||||
|
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
|
||||||
|
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
|
||||||
|
// 5 bytes flate header
|
||||||
|
// 8 bytes gzip footer
|
||||||
|
// (End of the eStargz blob)
|
||||||
|
//
|
||||||
|
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
|
||||||
|
FooterSize = 51 |
||||||
|
|
||||||
|
// legacyFooterSize is the number of bytes in the legacy stargz footer.
|
||||||
|
//
|
||||||
|
// 47 comes from:
|
||||||
|
//
|
||||||
|
// 10 byte gzip header +
|
||||||
|
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
|
||||||
|
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
|
||||||
|
// 5 byte flate header
|
||||||
|
// 8 byte gzip footer (two little endian uint32s: digest, size)
|
||||||
|
legacyFooterSize = 47 |
||||||
|
|
||||||
|
// TOCJSONDigestAnnotation is an annotation for an image layer. This stores the
|
||||||
|
// digest of the TOC JSON.
|
||||||
|
// This annotation is valid only when it is specified in `.[]layers.annotations`
|
||||||
|
// of an image manifest.
|
||||||
|
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" |
||||||
|
|
||||||
|
// StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy
|
||||||
|
// pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size
|
||||||
|
// to the runtime but current OCI image doesn't ship this information by default. So we store this
|
||||||
|
// to the special annotation.
|
||||||
|
StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" |
||||||
|
|
||||||
|
// PrefetchLandmark is a file entry which indicates the end position of
|
||||||
|
// prefetch in the stargz file.
|
||||||
|
PrefetchLandmark = ".prefetch.landmark" |
||||||
|
|
||||||
|
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
|
||||||
|
// occur in the stargz file.
|
||||||
|
NoPrefetchLandmark = ".no.prefetch.landmark" |
||||||
|
|
||||||
|
landmarkContents = 0xf |
||||||
|
) |
||||||
|
|
||||||
|
// JTOC is the JSON-serialized table of contents index of the files in the stargz file.
|
||||||
|
type JTOC struct { |
||||||
|
Version int `json:"version"` |
||||||
|
Entries []*TOCEntry `json:"entries"` |
||||||
|
} |
||||||
|
|
||||||
|
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
|
||||||
|
type TOCEntry struct { |
||||||
|
// Name is the tar entry's name. It is the complete path
|
||||||
|
// stored in the tar file, not just the base name.
|
||||||
|
Name string `json:"name"` |
||||||
|
|
||||||
|
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
|
||||||
|
// "block", "fifo", or "chunk".
|
||||||
|
// The "chunk" type is used for regular file data chunks past the first
|
||||||
|
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
|
||||||
|
// ChunkOffset, and ChunkSize populated.
|
||||||
|
Type string `json:"type"` |
||||||
|
|
||||||
|
// Size, for regular files, is the logical size of the file.
|
||||||
|
Size int64 `json:"size,omitempty"` |
||||||
|
|
||||||
|
// ModTime3339 is the modification time of the tar entry. Empty
|
||||||
|
// means zero or unknown. Otherwise it's in UTC RFC3339
|
||||||
|
// format. Use the ModTime method to access the time.Time value.
|
||||||
|
ModTime3339 string `json:"modtime,omitempty"` |
||||||
|
modTime time.Time |
||||||
|
|
||||||
|
// LinkName, for symlinks and hardlinks, is the link target.
|
||||||
|
LinkName string `json:"linkName,omitempty"` |
||||||
|
|
||||||
|
// Mode is the permission and mode bits.
|
||||||
|
Mode int64 `json:"mode,omitempty"` |
||||||
|
|
||||||
|
// UID is the user ID of the owner.
|
||||||
|
UID int `json:"uid,omitempty"` |
||||||
|
|
||||||
|
// GID is the group ID of the owner.
|
||||||
|
GID int `json:"gid,omitempty"` |
||||||
|
|
||||||
|
// Uname is the username of the owner.
|
||||||
|
//
|
||||||
|
// In the serialized JSON, this field may only be present for
|
||||||
|
// the first entry with the same UID.
|
||||||
|
Uname string `json:"userName,omitempty"` |
||||||
|
|
||||||
|
// Gname is the group name of the owner.
|
||||||
|
//
|
||||||
|
// In the serialized JSON, this field may only be present for
|
||||||
|
// the first entry with the same GID.
|
||||||
|
Gname string `json:"groupName,omitempty"` |
||||||
|
|
||||||
|
// Offset, for regular files, provides the offset in the
|
||||||
|
// stargz file to the file's data bytes. See ChunkOffset and
|
||||||
|
// ChunkSize.
|
||||||
|
Offset int64 `json:"offset,omitempty"` |
||||||
|
|
||||||
|
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
||||||
|
|
||||||
|
// DevMajor is the major device number for "char" and "block" types.
|
||||||
|
DevMajor int `json:"devMajor,omitempty"` |
||||||
|
|
||||||
|
// DevMinor is the major device number for "char" and "block" types.
|
||||||
|
DevMinor int `json:"devMinor,omitempty"` |
||||||
|
|
||||||
|
// NumLink is the number of entry names pointing to this entry.
|
||||||
|
// Zero means one name references this entry.
|
||||||
|
// This field is calculated during runtime and not recorded in TOC JSON.
|
||||||
|
NumLink int `json:"-"` |
||||||
|
|
||||||
|
// Xattrs are the extended attribute for the entry.
|
||||||
|
Xattrs map[string][]byte `json:"xattrs,omitempty"` |
||||||
|
|
||||||
|
// Digest stores the OCI checksum for regular files payload.
|
||||||
|
// It has the form "sha256:abcdef01234....".
|
||||||
|
Digest string `json:"digest,omitempty"` |
||||||
|
|
||||||
|
// ChunkOffset is non-zero if this is a chunk of a large,
|
||||||
|
// regular file. If so, the Offset is where the gzip header of
|
||||||
|
// ChunkSize bytes at ChunkOffset in Name begin.
|
||||||
|
//
|
||||||
|
// In serialized form, a "chunkSize" JSON field of zero means
|
||||||
|
// that the chunk goes to the end of the file. After reading
|
||||||
|
// from the stargz TOC, though, the ChunkSize is initialized
|
||||||
|
// to a non-zero file for when Type is either "reg" or
|
||||||
|
// "chunk".
|
||||||
|
ChunkOffset int64 `json:"chunkOffset,omitempty"` |
||||||
|
ChunkSize int64 `json:"chunkSize,omitempty"` |
||||||
|
|
||||||
|
// ChunkDigest stores an OCI digest of the chunk. This must be formed
|
||||||
|
// as "sha256:0123abcd...".
|
||||||
|
ChunkDigest string `json:"chunkDigest,omitempty"` |
||||||
|
|
||||||
|
children map[string]*TOCEntry |
||||||
|
} |
||||||
|
|
||||||
|
// ModTime returns the entry's modification time.
|
||||||
|
func (e *TOCEntry) ModTime() time.Time { return e.modTime } |
||||||
|
|
||||||
|
// NextOffset returns the position (relative to the start of the
|
||||||
|
// stargz file) of the next gzip boundary after e.Offset.
|
||||||
|
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } |
||||||
|
|
||||||
|
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { |
||||||
|
if e.children == nil { |
||||||
|
e.children = make(map[string]*TOCEntry) |
||||||
|
} |
||||||
|
if child.Type == "dir" { |
||||||
|
e.NumLink++ // Entry ".." in the subdirectory links to this directory
|
||||||
|
} |
||||||
|
e.children[baseName] = child |
||||||
|
} |
||||||
|
|
||||||
|
// isDataType reports whether TOCEntry is a regular file or chunk (something that
|
||||||
|
// contains regular file data).
|
||||||
|
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } |
||||||
|
|
||||||
|
// Stat returns a FileInfo value representing e.
|
||||||
|
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } |
||||||
|
|
||||||
|
// ForeachChild calls f for each child item. If f returns false, iteration ends.
|
||||||
|
// If e is not a directory, f is not called.
|
||||||
|
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { |
||||||
|
for name, ent := range e.children { |
||||||
|
if !f(name, ent) { |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// LookupChild returns the directory e's child by its base name.
|
||||||
|
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { |
||||||
|
child, ok = e.children[baseName] |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
|
||||||
|
type fileInfo struct{ e *TOCEntry } |
||||||
|
|
||||||
|
var _ os.FileInfo = fileInfo{} |
||||||
|
|
||||||
|
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } |
||||||
|
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } |
||||||
|
func (fi fileInfo) Size() int64 { return fi.e.Size } |
||||||
|
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } |
||||||
|
func (fi fileInfo) Sys() interface{} { return fi.e } |
||||||
|
func (fi fileInfo) Mode() (m os.FileMode) { |
||||||
|
// TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg.
|
||||||
|
m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & |
||||||
|
(os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) |
||||||
|
switch fi.e.Type { |
||||||
|
case "dir": |
||||||
|
m |= os.ModeDir |
||||||
|
case "symlink": |
||||||
|
m |= os.ModeSymlink |
||||||
|
case "char": |
||||||
|
m |= os.ModeDevice | os.ModeCharDevice |
||||||
|
case "block": |
||||||
|
m |= os.ModeDevice |
||||||
|
case "fifo": |
||||||
|
m |= os.ModeNamedPipe |
||||||
|
} |
||||||
|
return m |
||||||
|
} |
||||||
|
|
||||||
|
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
|
||||||
|
// in a eStargz blob.
|
||||||
|
type TOCEntryVerifier interface { |
||||||
|
|
||||||
|
// Verifier provides a content verifier that can be used for verifying the
|
||||||
|
// contents of the specified TOCEntry.
|
||||||
|
Verifier(ce *TOCEntry) (digest.Verifier, error) |
||||||
|
} |
||||||
|
|
||||||
|
// Compression provides the compression helper to be used creating and parsing eStargz.
|
||||||
|
// This package provides gzip-based Compression by default, but any compression
|
||||||
|
// algorithm (e.g. zstd) can be used as long as it implements Compression.
|
||||||
|
type Compression interface { |
||||||
|
Compressor |
||||||
|
Decompressor |
||||||
|
} |
||||||
|
|
||||||
|
// Compressor represents the helper mothods to be used for creating eStargz.
|
||||||
|
type Compressor interface { |
||||||
|
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
|
||||||
|
// Everytime a chunk is written, the WriteCloser is closed and Writer is
|
||||||
|
// called again for writing the next chunk.
|
||||||
|
Writer(w io.Writer) (io.WriteCloser, error) |
||||||
|
|
||||||
|
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
|
||||||
|
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
|
||||||
|
// WriteTOCAndFooter can optionally write anything that affects DiffID calculation
|
||||||
|
// (e.g. uncompressed TOC JSON).
|
||||||
|
//
|
||||||
|
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||||
|
// to verify this blob when it's parsed.
|
||||||
|
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) |
||||||
|
} |
||||||
|
|
||||||
|
// Decompressor represents the helper mothods to be used for parsing eStargz.
|
||||||
|
type Decompressor interface { |
||||||
|
// Reader returns ReadCloser to be used for decompressing file payload.
|
||||||
|
Reader(r io.Reader) (io.ReadCloser, error) |
||||||
|
|
||||||
|
// FooterSize returns the size of the footer of this blob.
|
||||||
|
FooterSize() int64 |
||||||
|
|
||||||
|
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
|
||||||
|
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||||
|
// the top until the TOC JSON).
|
||||||
|
//
|
||||||
|
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||||
|
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||||
|
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) |
||||||
|
|
||||||
|
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||||
|
// of the underlying blob that has the range specified by ParseFooter method.
|
||||||
|
//
|
||||||
|
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||||
|
// to verify this blob. This must match to the value returned from
|
||||||
|
// Compressor.WriteTOCAndFooter that is used when creating this blob.
|
||||||
|
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) |
||||||
|
} |
@ -0,0 +1,54 @@ |
|||||||
|
// Copyright 2020 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package estargz adapts the containerd estargz package to our abstractions.
|
||||||
|
package estargz |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"io" |
||||||
|
|
||||||
|
"github.com/containerd/stargz-snapshotter/estargz" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
) |
||||||
|
|
||||||
|
// Assert that what we're returning is an io.ReadCloser
|
||||||
|
var _ io.ReadCloser = (*estargz.Blob)(nil) |
||||||
|
|
||||||
|
// ReadCloser reads uncompressed tarball input from the io.ReadCloser and
|
||||||
|
// returns:
|
||||||
|
// - An io.ReadCloser from which compressed data may be read, and
|
||||||
|
// - A v1.Hash with the hash of the estargz table of contents, or
|
||||||
|
// - An error if the estargz processing encountered a problem.
|
||||||
|
//
|
||||||
|
// Refer to estargz for the options:
|
||||||
|
// https://pkg.go.dev/github.com/containerd/stargz-snapshotter/estargz@v0.4.1#Option
|
||||||
|
func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) { |
||||||
|
defer r.Close() |
||||||
|
|
||||||
|
// TODO(#876): Avoid buffering into memory.
|
||||||
|
bs, err := io.ReadAll(r) |
||||||
|
if err != nil { |
||||||
|
return nil, v1.Hash{}, err |
||||||
|
} |
||||||
|
br := bytes.NewReader(bs) |
||||||
|
|
||||||
|
rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...) |
||||||
|
if err != nil { |
||||||
|
return nil, v1.Hash{}, err |
||||||
|
} |
||||||
|
|
||||||
|
h, err := v1.NewHash(rc.TOCDigest().String()) |
||||||
|
return rc, h, err |
||||||
|
} |
@ -0,0 +1,57 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package legacy provides methods for interacting with legacy image formats.
|
||||||
|
package legacy |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// CopySchema1 allows `[g]crane cp` to work with old images without adding
|
||||||
|
// full support for schema 1 images to this package.
|
||||||
|
func CopySchema1(desc *remote.Descriptor, srcRef, dstRef name.Reference, opts ...remote.Option) error { |
||||||
|
m := schema1{} |
||||||
|
if err := json.NewDecoder(bytes.NewReader(desc.Manifest)).Decode(&m); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
for _, layer := range m.FSLayers { |
||||||
|
src := srcRef.Context().Digest(layer.BlobSum) |
||||||
|
dst := dstRef.Context().Digest(layer.BlobSum) |
||||||
|
|
||||||
|
blob, err := remote.Layer(src, opts...) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if err := remote.WriteLayer(dst.Context(), blob, opts...); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return remote.Put(dstRef, desc, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
type fslayer struct { |
||||||
|
BlobSum string `json:"blobSum"` |
||||||
|
} |
||||||
|
|
||||||
|
type schema1 struct { |
||||||
|
FSLayers []fslayer `json:"fsLayers"` |
||||||
|
} |
@ -0,0 +1,114 @@ |
|||||||
|
// Copyright 2021 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package windows |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"path" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/internal/gzip" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
// userOwnerAndGroupSID is a magic value needed to make the binary executable
|
||||||
|
// in a Windows container.
|
||||||
|
//
|
||||||
|
// owner: BUILTIN/Users group: BUILTIN/Users ($sddlValue="O:BUG:BU")
|
||||||
|
const userOwnerAndGroupSID = "AQAAgBQAAAAkAAAAAAAAAAAAAAABAgAAAAAABSAAAAAhAgAAAQIAAAAAAAUgAAAAIQIAAA==" |
||||||
|
|
||||||
|
// Windows returns a Layer that is converted to be pullable on Windows.
|
||||||
|
func Windows(layer v1.Layer) (v1.Layer, error) { |
||||||
|
// TODO: do this lazily.
|
||||||
|
|
||||||
|
layerReader, err := layer.Uncompressed() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting layer: %w", err) |
||||||
|
} |
||||||
|
defer layerReader.Close() |
||||||
|
tarReader := tar.NewReader(layerReader) |
||||||
|
w := new(bytes.Buffer) |
||||||
|
tarWriter := tar.NewWriter(w) |
||||||
|
defer tarWriter.Close() |
||||||
|
|
||||||
|
for _, dir := range []string{"Files", "Hives"} { |
||||||
|
if err := tarWriter.WriteHeader(&tar.Header{ |
||||||
|
Name: dir, |
||||||
|
Typeflag: tar.TypeDir, |
||||||
|
// Use a fixed Mode, so that this isn't sensitive to the directory and umask
|
||||||
|
// under which it was created. Additionally, windows can only set 0222,
|
||||||
|
// 0444, or 0666, none of which are executable.
|
||||||
|
Mode: 0555, |
||||||
|
Format: tar.FormatPAX, |
||||||
|
}); err != nil { |
||||||
|
return nil, fmt.Errorf("writing %s directory: %w", dir, err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
for { |
||||||
|
header, err := tarReader.Next() |
||||||
|
if errors.Is(err, io.EOF) { |
||||||
|
break |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("reading layer: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
if strings.HasPrefix(header.Name, "Files/") { |
||||||
|
return nil, fmt.Errorf("file path %q already suitable for Windows", header.Name) |
||||||
|
} |
||||||
|
|
||||||
|
header.Name = path.Join("Files", header.Name) |
||||||
|
header.Format = tar.FormatPAX |
||||||
|
|
||||||
|
// TODO: this seems to make the file executable on Windows;
|
||||||
|
// only do this if the file should be executable.
|
||||||
|
if header.PAXRecords == nil { |
||||||
|
header.PAXRecords = map[string]string{} |
||||||
|
} |
||||||
|
header.PAXRecords["MSWINDOWS.rawsd"] = userOwnerAndGroupSID |
||||||
|
|
||||||
|
if err := tarWriter.WriteHeader(header); err != nil { |
||||||
|
return nil, fmt.Errorf("writing tar header: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
if header.Typeflag == tar.TypeReg { |
||||||
|
if _, err = io.Copy(tarWriter, tarReader); err != nil { |
||||||
|
return nil, fmt.Errorf("writing layer file: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if err := tarWriter.Close(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
b := w.Bytes() |
||||||
|
// gzip the contents, then create the layer
|
||||||
|
opener := func() (io.ReadCloser, error) { |
||||||
|
return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil |
||||||
|
} |
||||||
|
layer, err = tarball.LayerFromOpener(opener) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("creating layer: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
return layer, nil |
||||||
|
} |
@ -0,0 +1,114 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/internal/windows" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/stream" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
func isWindows(img v1.Image) (bool, error) { |
||||||
|
cfg, err := img.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return false, err |
||||||
|
} |
||||||
|
return cfg != nil && cfg.OS == "windows", nil |
||||||
|
} |
||||||
|
|
||||||
|
// Append reads a layer from path and appends it the the v1.Image base.
|
||||||
|
//
|
||||||
|
// If the base image is a Windows base image (i.e., its config.OS is
|
||||||
|
// "windows"), the contents of the tarballs will be modified to be suitable for
|
||||||
|
// a Windows container image.`,
|
||||||
|
func Append(base v1.Image, paths ...string) (v1.Image, error) { |
||||||
|
if base == nil { |
||||||
|
return nil, fmt.Errorf("invalid argument: base") |
||||||
|
} |
||||||
|
|
||||||
|
win, err := isWindows(base) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting base image: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
baseMediaType, err := base.MediaType() |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting base image media type: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
layerType := types.DockerLayer |
||||||
|
|
||||||
|
if baseMediaType == types.OCIManifestSchema1 { |
||||||
|
layerType = types.OCILayer |
||||||
|
} |
||||||
|
|
||||||
|
layers := make([]v1.Layer, 0, len(paths)) |
||||||
|
for _, path := range paths { |
||||||
|
layer, err := getLayer(path, layerType) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("reading layer %q: %w", path, err) |
||||||
|
} |
||||||
|
|
||||||
|
if win { |
||||||
|
layer, err = windows.Windows(layer) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("converting %q for Windows: %w", path, err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
layers = append(layers, layer) |
||||||
|
} |
||||||
|
|
||||||
|
return mutate.AppendLayers(base, layers...) |
||||||
|
} |
||||||
|
|
||||||
|
func getLayer(path string, layerType types.MediaType) (v1.Layer, error) { |
||||||
|
f, err := streamFile(path) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if f != nil { |
||||||
|
return stream.NewLayer(f, stream.WithMediaType(layerType)), nil |
||||||
|
} |
||||||
|
|
||||||
|
return tarball.LayerFromFile(path, tarball.WithMediaType(layerType)) |
||||||
|
} |
||||||
|
|
||||||
|
// If we're dealing with a named pipe, trying to open it multiple times will
|
||||||
|
// fail, so we need to do a streaming upload.
|
||||||
|
//
|
||||||
|
// returns nil, nil for non-streaming files
|
||||||
|
func streamFile(path string) (*os.File, error) { |
||||||
|
if path == "-" { |
||||||
|
return os.Stdin, nil |
||||||
|
} |
||||||
|
fi, err := os.Stat(path) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if !fi.Mode().IsRegular() { |
||||||
|
return os.Open(path) |
||||||
|
} |
||||||
|
|
||||||
|
return nil, nil |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// Catalog returns the repositories in a registry's catalog.
|
||||||
|
func Catalog(src string, opt ...Option) (res []string, err error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
reg, err := name.NewRegistry(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// This context gets overridden by remote.WithContext, which is set by
|
||||||
|
// crane.WithContext.
|
||||||
|
return remote.Catalog(context.Background(), reg, o.Remote...) |
||||||
|
} |
@ -0,0 +1,24 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
// Config returns the config file for the remote image ref.
|
||||||
|
func Config(ref string, opt ...Option) ([]byte, error) { |
||||||
|
i, _, err := getImage(ref, opt...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return i.RawConfigFile() |
||||||
|
} |
@ -0,0 +1,88 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/internal/legacy" |
||||||
|
"github.com/google/go-containerregistry/pkg/logs" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
// Copy copies a remote image or index from src to dst.
|
||||||
|
func Copy(src, dst string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
srcRef, err := name.ParseReference(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
dstRef, err := name.ParseReference(dst, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference for %q: %w", dst, err) |
||||||
|
} |
||||||
|
|
||||||
|
logs.Progress.Printf("Copying from %v to %v", srcRef, dstRef) |
||||||
|
desc, err := remote.Get(srcRef, o.Remote...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("fetching %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
switch desc.MediaType { |
||||||
|
case types.OCIImageIndex, types.DockerManifestList: |
||||||
|
// Handle indexes separately.
|
||||||
|
if o.Platform != nil { |
||||||
|
// If platform is explicitly set, don't copy the whole index, just the appropriate image.
|
||||||
|
if err := copyImage(desc, dstRef, o); err != nil { |
||||||
|
return fmt.Errorf("failed to copy image: %w", err) |
||||||
|
} |
||||||
|
} else { |
||||||
|
if err := copyIndex(desc, dstRef, o); err != nil { |
||||||
|
return fmt.Errorf("failed to copy index: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: |
||||||
|
// Handle schema 1 images separately.
|
||||||
|
if err := legacy.CopySchema1(desc, srcRef, dstRef, o.Remote...); err != nil { |
||||||
|
return fmt.Errorf("failed to copy schema 1 image: %w", err) |
||||||
|
} |
||||||
|
default: |
||||||
|
// Assume anything else is an image, since some registries don't set mediaTypes properly.
|
||||||
|
if err := copyImage(desc, dstRef, o); err != nil { |
||||||
|
return fmt.Errorf("failed to copy image: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func copyImage(desc *remote.Descriptor, dstRef name.Reference, o Options) error { |
||||||
|
img, err := desc.Image() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return remote.Write(dstRef, img, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
func copyIndex(desc *remote.Descriptor, dstRef name.Reference, o Options) error { |
||||||
|
idx, err := desc.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return remote.WriteIndex(dstRef, idx, o.Remote...) |
||||||
|
} |
@ -0,0 +1,33 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// Delete deletes the remote reference at src.
|
||||||
|
func Delete(src string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.Delete(ref, o.Remote...) |
||||||
|
} |
@ -0,0 +1,52 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import "github.com/google/go-containerregistry/pkg/logs" |
||||||
|
|
||||||
|
// Digest returns the sha256 hash of the remote image at ref.
|
||||||
|
func Digest(ref string, opt ...Option) (string, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
if o.Platform != nil { |
||||||
|
desc, err := getManifest(ref, opt...) |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
if !desc.MediaType.IsIndex() { |
||||||
|
return desc.Digest.String(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// TODO: does not work for indexes which contain schema v1 manifests
|
||||||
|
img, err := desc.Image() |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
digest, err := img.Digest() |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
return digest.String(), nil |
||||||
|
} |
||||||
|
desc, err := Head(ref, opt...) |
||||||
|
if err != nil { |
||||||
|
logs.Warn.Printf("HEAD request failed, falling back on GET: %v", err) |
||||||
|
rdesc, err := getManifest(ref, opt...) |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
return rdesc.Digest.String(), nil |
||||||
|
} |
||||||
|
return desc.Digest.String(), nil |
||||||
|
} |
@ -0,0 +1,16 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package crane holds libraries used to implement the crane CLI.
|
||||||
|
package crane |
@ -0,0 +1,47 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate" |
||||||
|
) |
||||||
|
|
||||||
|
// Export writes the filesystem contents (as a tarball) of img to w.
|
||||||
|
// If img has a single layer, just write the (uncompressed) contents to w so
|
||||||
|
// that this "just works" for images that just wrap a single blob.
|
||||||
|
func Export(img v1.Image, w io.Writer) error { |
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if len(layers) == 1 { |
||||||
|
// If it's a single layer, we don't have to flatten the filesystem.
|
||||||
|
// An added perk of skipping mutate.Extract here is that this works
|
||||||
|
// for non-tarball layers.
|
||||||
|
l := layers[0] |
||||||
|
rc, err := l.Uncompressed() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
_, err = io.Copy(w, rc) |
||||||
|
return err |
||||||
|
} |
||||||
|
fs := mutate.Extract(img) |
||||||
|
_, err = io.Copy(w, fs) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,72 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"io" |
||||||
|
"sort" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
// Layer creates a layer from a single file map. These layers are reproducible and consistent.
|
||||||
|
// A filemap is a path -> file content map representing a file system.
|
||||||
|
func Layer(filemap map[string][]byte) (v1.Layer, error) { |
||||||
|
b := &bytes.Buffer{} |
||||||
|
w := tar.NewWriter(b) |
||||||
|
|
||||||
|
fn := []string{} |
||||||
|
for f := range filemap { |
||||||
|
fn = append(fn, f) |
||||||
|
} |
||||||
|
sort.Strings(fn) |
||||||
|
|
||||||
|
for _, f := range fn { |
||||||
|
c := filemap[f] |
||||||
|
if err := w.WriteHeader(&tar.Header{ |
||||||
|
Name: f, |
||||||
|
Size: int64(len(c)), |
||||||
|
}); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if _, err := w.Write(c); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
if err := w.Close(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Return a new copy of the buffer each time it's opened.
|
||||||
|
return tarball.LayerFromOpener(func() (io.ReadCloser, error) { |
||||||
|
return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
// Image creates a image with the given filemaps as its contents. These images are reproducible and consistent.
|
||||||
|
// A filemap is a path -> file content map representing a file system.
|
||||||
|
func Image(filemap map[string][]byte) (v1.Image, error) { |
||||||
|
y, err := Layer(filemap) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return mutate.AppendLayers(empty.Image, y) |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
func getImage(r string, opt ...Option) (v1.Image, name.Reference, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(r, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, fmt.Errorf("parsing reference %q: %w", r, err) |
||||||
|
} |
||||||
|
img, err := remote.Image(ref, o.Remote...) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, fmt.Errorf("reading image %q: %w", ref, err) |
||||||
|
} |
||||||
|
return img, ref, nil |
||||||
|
} |
||||||
|
|
||||||
|
func getManifest(r string, opt ...Option) (*remote.Descriptor, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(r, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("parsing reference %q: %w", r, err) |
||||||
|
} |
||||||
|
return remote.Get(ref, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
// Head performs a HEAD request for a manifest and returns a content descriptor
|
||||||
|
// based on the registry's response.
|
||||||
|
func Head(r string, opt ...Option) (*v1.Descriptor, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(r, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return remote.Head(ref, o.Remote...) |
||||||
|
} |
@ -0,0 +1,33 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// ListTags returns the tags in repository src.
|
||||||
|
func ListTags(src string, opt ...Option) ([]string, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
repo, err := name.NewRepository(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("parsing repo %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.List(repo, o.Remote...) |
||||||
|
} |
@ -0,0 +1,32 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
// Manifest returns the manifest for the remote image or index ref.
|
||||||
|
func Manifest(ref string, opt ...Option) ([]byte, error) { |
||||||
|
desc, err := getManifest(ref, opt...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
o := makeOptions(opt...) |
||||||
|
if o.Platform != nil { |
||||||
|
img, err := desc.Image() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return img.RawManifest() |
||||||
|
} |
||||||
|
return desc.Manifest, nil |
||||||
|
} |
@ -0,0 +1,237 @@ |
|||||||
|
// Copyright 2020 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/containerd/stargz-snapshotter/estargz" |
||||||
|
"github.com/google/go-containerregistry/pkg/logs" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
// Optimize optimizes a remote image or index from src to dst.
|
||||||
|
// THIS API IS EXPERIMENTAL AND SUBJECT TO CHANGE WITHOUT WARNING.
|
||||||
|
func Optimize(src, dst string, prioritize []string, opt ...Option) error { |
||||||
|
pset := newStringSet(prioritize) |
||||||
|
o := makeOptions(opt...) |
||||||
|
srcRef, err := name.ParseReference(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
dstRef, err := name.ParseReference(dst, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference for %q: %w", dst, err) |
||||||
|
} |
||||||
|
|
||||||
|
logs.Progress.Printf("Optimizing from %v to %v", srcRef, dstRef) |
||||||
|
desc, err := remote.Get(srcRef, o.Remote...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("fetching %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
switch desc.MediaType { |
||||||
|
case types.OCIImageIndex, types.DockerManifestList: |
||||||
|
// Handle indexes separately.
|
||||||
|
if o.Platform != nil { |
||||||
|
// If platform is explicitly set, don't optimize the whole index, just the appropriate image.
|
||||||
|
if err := optimizeAndPushImage(desc, dstRef, pset, o); err != nil { |
||||||
|
return fmt.Errorf("failed to optimize image: %w", err) |
||||||
|
} |
||||||
|
} else { |
||||||
|
if err := optimizeAndPushIndex(desc, dstRef, pset, o); err != nil { |
||||||
|
return fmt.Errorf("failed to optimize index: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: |
||||||
|
return errors.New("docker schema 1 images are not supported") |
||||||
|
|
||||||
|
default: |
||||||
|
// Assume anything else is an image, since some registries don't set mediaTypes properly.
|
||||||
|
if err := optimizeAndPushImage(desc, dstRef, pset, o); err != nil { |
||||||
|
return fmt.Errorf("failed to optimize image: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func optimizeAndPushImage(desc *remote.Descriptor, dstRef name.Reference, prioritize stringSet, o Options) error { |
||||||
|
img, err := desc.Image() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
missing, oimg, err := optimizeImage(img, prioritize) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if len(missing) > 0 { |
||||||
|
return fmt.Errorf("the following prioritized files were missing from image: %v", missing.List()) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.Write(dstRef, oimg, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
func optimizeImage(img v1.Image, prioritize stringSet) (stringSet, v1.Image, error) { |
||||||
|
cfg, err := img.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
ocfg := cfg.DeepCopy() |
||||||
|
ocfg.History = nil |
||||||
|
ocfg.RootFS.DiffIDs = nil |
||||||
|
|
||||||
|
oimg, err := mutate.ConfigFile(empty.Image, ocfg) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
missingFromImage := newStringSet(prioritize.List()) |
||||||
|
olayers := make([]mutate.Addendum, 0, len(layers)) |
||||||
|
for _, layer := range layers { |
||||||
|
missingFromLayer := []string{} |
||||||
|
olayer, err := tarball.LayerFromOpener(layer.Uncompressed, |
||||||
|
tarball.WithEstargz, |
||||||
|
tarball.WithEstargzOptions( |
||||||
|
estargz.WithPrioritizedFiles(prioritize.List()), |
||||||
|
estargz.WithAllowPrioritizeNotFound(&missingFromLayer), |
||||||
|
)) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
missingFromImage = missingFromImage.Intersection(newStringSet(missingFromLayer)) |
||||||
|
|
||||||
|
olayers = append(olayers, mutate.Addendum{ |
||||||
|
Layer: olayer, |
||||||
|
MediaType: types.DockerLayer, |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
oimg, err = mutate.Append(oimg, olayers...) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
return missingFromImage, oimg, nil |
||||||
|
} |
||||||
|
|
||||||
|
func optimizeAndPushIndex(desc *remote.Descriptor, dstRef name.Reference, prioritize stringSet, o Options) error { |
||||||
|
idx, err := desc.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
missing, oidx, err := optimizeIndex(idx, prioritize) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if len(missing) > 0 { |
||||||
|
return fmt.Errorf("the following prioritized files were missing from all images: %v", missing.List()) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.WriteIndex(dstRef, oidx, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
func optimizeIndex(idx v1.ImageIndex, prioritize stringSet) (stringSet, v1.ImageIndex, error) { |
||||||
|
im, err := idx.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
missingFromIndex := newStringSet(prioritize.List()) |
||||||
|
|
||||||
|
// Build an image for each child from the base and append it to a new index to produce the result.
|
||||||
|
adds := make([]mutate.IndexAddendum, 0, len(im.Manifests)) |
||||||
|
for _, desc := range im.Manifests { |
||||||
|
img, err := idx.Image(desc.Digest) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
missingFromImage, oimg, err := optimizeImage(img, prioritize) |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
missingFromIndex = missingFromIndex.Intersection(missingFromImage) |
||||||
|
adds = append(adds, mutate.IndexAddendum{ |
||||||
|
Add: oimg, |
||||||
|
Descriptor: v1.Descriptor{ |
||||||
|
URLs: desc.URLs, |
||||||
|
MediaType: desc.MediaType, |
||||||
|
Annotations: desc.Annotations, |
||||||
|
Platform: desc.Platform, |
||||||
|
}, |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
idxType, err := idx.MediaType() |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return missingFromIndex, mutate.IndexMediaType(mutate.AppendManifests(empty.Index, adds...), idxType), nil |
||||||
|
} |
||||||
|
|
||||||
|
type stringSet map[string]struct{} |
||||||
|
|
||||||
|
func newStringSet(in []string) stringSet { |
||||||
|
ss := stringSet{} |
||||||
|
for _, s := range in { |
||||||
|
ss[s] = struct{}{} |
||||||
|
} |
||||||
|
return ss |
||||||
|
} |
||||||
|
|
||||||
|
func (s stringSet) List() []string { |
||||||
|
result := make([]string, 0, len(s)) |
||||||
|
for k := range s { |
||||||
|
result = append(result, k) |
||||||
|
} |
||||||
|
return result |
||||||
|
} |
||||||
|
|
||||||
|
func (s stringSet) Intersection(rhs stringSet) stringSet { |
||||||
|
// To appease ST1016
|
||||||
|
lhs := s |
||||||
|
|
||||||
|
// Make sure len(lhs) >= len(rhs)
|
||||||
|
if len(lhs) < len(rhs) { |
||||||
|
return rhs.Intersection(lhs) |
||||||
|
} |
||||||
|
|
||||||
|
result := stringSet{} |
||||||
|
for k := range lhs { |
||||||
|
if _, ok := rhs[k]; ok { |
||||||
|
result[k] = struct{}{} |
||||||
|
} |
||||||
|
} |
||||||
|
return result |
||||||
|
} |
@ -0,0 +1,127 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"net/http" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// Options hold the options that crane uses when calling other packages.
|
||||||
|
type Options struct { |
||||||
|
Name []name.Option |
||||||
|
Remote []remote.Option |
||||||
|
Platform *v1.Platform |
||||||
|
Keychain authn.Keychain |
||||||
|
} |
||||||
|
|
||||||
|
// GetOptions exposes the underlying []remote.Option, []name.Option, and
|
||||||
|
// platform, based on the passed Option. Generally, you shouldn't need to use
|
||||||
|
// this unless you've painted yourself into a dependency corner as we have
|
||||||
|
// with the crane and gcrane cli packages.
|
||||||
|
func GetOptions(opts ...Option) Options { |
||||||
|
return makeOptions(opts...) |
||||||
|
} |
||||||
|
|
||||||
|
func makeOptions(opts ...Option) Options { |
||||||
|
opt := Options{ |
||||||
|
Remote: []remote.Option{ |
||||||
|
remote.WithAuthFromKeychain(authn.DefaultKeychain), |
||||||
|
}, |
||||||
|
Keychain: authn.DefaultKeychain, |
||||||
|
} |
||||||
|
for _, o := range opts { |
||||||
|
o(&opt) |
||||||
|
} |
||||||
|
return opt |
||||||
|
} |
||||||
|
|
||||||
|
// Option is a functional option for crane.
|
||||||
|
type Option func(*Options) |
||||||
|
|
||||||
|
// WithTransport is a functional option for overriding the default transport
|
||||||
|
// for remote operations.
|
||||||
|
func WithTransport(t http.RoundTripper) Option { |
||||||
|
return func(o *Options) { |
||||||
|
o.Remote = append(o.Remote, remote.WithTransport(t)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Insecure is an Option that allows image references to be fetched without TLS.
|
||||||
|
func Insecure(o *Options) { |
||||||
|
o.Name = append(o.Name, name.Insecure) |
||||||
|
} |
||||||
|
|
||||||
|
// WithPlatform is an Option to specify the platform.
|
||||||
|
func WithPlatform(platform *v1.Platform) Option { |
||||||
|
return func(o *Options) { |
||||||
|
if platform != nil { |
||||||
|
o.Remote = append(o.Remote, remote.WithPlatform(*platform)) |
||||||
|
} |
||||||
|
o.Platform = platform |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithAuthFromKeychain is a functional option for overriding the default
|
||||||
|
// authenticator for remote operations, using an authn.Keychain to find
|
||||||
|
// credentials.
|
||||||
|
//
|
||||||
|
// By default, crane will use authn.DefaultKeychain.
|
||||||
|
func WithAuthFromKeychain(keys authn.Keychain) Option { |
||||||
|
return func(o *Options) { |
||||||
|
// Replace the default keychain at position 0.
|
||||||
|
o.Remote[0] = remote.WithAuthFromKeychain(keys) |
||||||
|
o.Keychain = keys |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithAuth is a functional option for overriding the default authenticator
|
||||||
|
// for remote operations.
|
||||||
|
//
|
||||||
|
// By default, crane will use authn.DefaultKeychain.
|
||||||
|
func WithAuth(auth authn.Authenticator) Option { |
||||||
|
return func(o *Options) { |
||||||
|
// Replace the default keychain at position 0.
|
||||||
|
o.Remote[0] = remote.WithAuth(auth) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithUserAgent adds the given string to the User-Agent header for any HTTP
|
||||||
|
// requests.
|
||||||
|
func WithUserAgent(ua string) Option { |
||||||
|
return func(o *Options) { |
||||||
|
o.Remote = append(o.Remote, remote.WithUserAgent(ua)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithNondistributable is an option that allows pushing non-distributable
|
||||||
|
// layers.
|
||||||
|
func WithNondistributable() Option { |
||||||
|
return func(o *Options) { |
||||||
|
o.Remote = append(o.Remote, remote.WithNondistributable) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithContext is a functional option for setting the context.
|
||||||
|
func WithContext(ctx context.Context) Option { |
||||||
|
return func(o *Options) { |
||||||
|
o.Remote = append(o.Remote, remote.WithContext(ctx)) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,142 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
|
||||||
|
legacy "github.com/google/go-containerregistry/pkg/legacy/tarball" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/layout" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
// Tag applied to images that were pulled by digest. This denotes that the
|
||||||
|
// image was (probably) never tagged with this, but lets us avoid applying the
|
||||||
|
// ":latest" tag which might be misleading.
|
||||||
|
const iWasADigestTag = "i-was-a-digest" |
||||||
|
|
||||||
|
// Pull returns a v1.Image of the remote image src.
|
||||||
|
func Pull(src string, opt ...Option) (v1.Image, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("parsing reference %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.Image(ref, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
// Save writes the v1.Image img as a tarball at path with tag src.
|
||||||
|
func Save(img v1.Image, src, path string) error { |
||||||
|
imgMap := map[string]v1.Image{src: img} |
||||||
|
return MultiSave(imgMap, path) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiSave writes collection of v1.Image img with tag as a tarball.
|
||||||
|
func MultiSave(imgMap map[string]v1.Image, path string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
tagToImage := map[name.Tag]v1.Image{} |
||||||
|
|
||||||
|
for src, img := range imgMap { |
||||||
|
ref, err := name.ParseReference(src, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing ref %q: %w", src, err) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteToFile wants a tag to write to the tarball, but we might have
|
||||||
|
// been given a digest.
|
||||||
|
// If the original ref was a tag, use that. Otherwise, if it was a
|
||||||
|
// digest, tag the image with :i-was-a-digest instead.
|
||||||
|
tag, ok := ref.(name.Tag) |
||||||
|
if !ok { |
||||||
|
d, ok := ref.(name.Digest) |
||||||
|
if !ok { |
||||||
|
return fmt.Errorf("ref wasn't a tag or digest") |
||||||
|
} |
||||||
|
tag = d.Repository.Tag(iWasADigestTag) |
||||||
|
} |
||||||
|
tagToImage[tag] = img |
||||||
|
} |
||||||
|
// no progress channel (for now)
|
||||||
|
return tarball.MultiWriteToFile(path, tagToImage) |
||||||
|
} |
||||||
|
|
||||||
|
// PullLayer returns the given layer from a registry.
|
||||||
|
func PullLayer(ref string, opt ...Option) (v1.Layer, error) { |
||||||
|
o := makeOptions(opt...) |
||||||
|
digest, err := name.NewDigest(ref, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return remote.Layer(digest, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
// SaveLegacy writes the v1.Image img as a legacy tarball at path with tag src.
|
||||||
|
func SaveLegacy(img v1.Image, src, path string) error { |
||||||
|
imgMap := map[string]v1.Image{src: img} |
||||||
|
return MultiSave(imgMap, path) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiSaveLegacy writes collection of v1.Image img with tag as a legacy tarball.
|
||||||
|
func MultiSaveLegacy(imgMap map[string]v1.Image, path string) error { |
||||||
|
refToImage := map[name.Reference]v1.Image{} |
||||||
|
|
||||||
|
for src, img := range imgMap { |
||||||
|
ref, err := name.ParseReference(src) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing ref %q: %w", src, err) |
||||||
|
} |
||||||
|
refToImage[ref] = img |
||||||
|
} |
||||||
|
|
||||||
|
w, err := os.Create(path) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer w.Close() |
||||||
|
|
||||||
|
return legacy.MultiWrite(refToImage, w) |
||||||
|
} |
||||||
|
|
||||||
|
// SaveOCI writes the v1.Image img as an OCI Image Layout at path. If a layout
|
||||||
|
// already exists at that path, it will add the image to the index.
|
||||||
|
func SaveOCI(img v1.Image, path string) error { |
||||||
|
imgMap := map[string]v1.Image{"": img} |
||||||
|
return MultiSaveOCI(imgMap, path) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiSaveOCI writes collection of v1.Image img as an OCI Image Layout at path. If a layout
|
||||||
|
// already exists at that path, it will add the image to the index.
|
||||||
|
func MultiSaveOCI(imgMap map[string]v1.Image, path string) error { |
||||||
|
p, err := layout.FromPath(path) |
||||||
|
if err != nil { |
||||||
|
p, err = layout.Write(path, empty.Index) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
for _, img := range imgMap { |
||||||
|
if err = p.AppendImage(img); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,65 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
// Load reads the tarball at path as a v1.Image.
|
||||||
|
func Load(path string, opt ...Option) (v1.Image, error) { |
||||||
|
return LoadTag(path, "") |
||||||
|
} |
||||||
|
|
||||||
|
// LoadTag reads a tag from the tarball at path as a v1.Image.
|
||||||
|
// If tag is "", will attempt to read the tarball as a single image.
|
||||||
|
func LoadTag(path, tag string, opt ...Option) (v1.Image, error) { |
||||||
|
if tag == "" { |
||||||
|
return tarball.ImageFromPath(path, nil) |
||||||
|
} |
||||||
|
|
||||||
|
o := makeOptions(opt...) |
||||||
|
t, err := name.NewTag(tag, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("parsing tag %q: %w", tag, err) |
||||||
|
} |
||||||
|
return tarball.ImageFromPath(path, &t) |
||||||
|
} |
||||||
|
|
||||||
|
// Push pushes the v1.Image img to a registry as dst.
|
||||||
|
func Push(img v1.Image, dst string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
tag, err := name.ParseReference(dst, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference %q: %w", dst, err) |
||||||
|
} |
||||||
|
return remote.Write(tag, img, o.Remote...) |
||||||
|
} |
||||||
|
|
||||||
|
// Upload pushes the v1.Layer to a given repo.
|
||||||
|
func Upload(layer v1.Layer, repo string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.NewRepository(repo, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing repo %q: %w", repo, err) |
||||||
|
} |
||||||
|
|
||||||
|
return remote.WriteLayer(ref, layer, o.Remote...) |
||||||
|
} |
@ -0,0 +1,39 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package crane |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||||
|
) |
||||||
|
|
||||||
|
// Tag adds tag to the remote img.
|
||||||
|
func Tag(img, tag string, opt ...Option) error { |
||||||
|
o := makeOptions(opt...) |
||||||
|
ref, err := name.ParseReference(img, o.Name...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("parsing reference %q: %w", img, err) |
||||||
|
} |
||||||
|
desc, err := remote.Get(ref, o.Remote...) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("fetching %q: %w", img, err) |
||||||
|
} |
||||||
|
|
||||||
|
dst := ref.Context().Tag(tag) |
||||||
|
|
||||||
|
return remote.Tag(dst, desc, o.Remote...) |
||||||
|
} |
@ -0,0 +1,33 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package legacy |
||||||
|
|
||||||
|
import ( |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
) |
||||||
|
|
||||||
|
// LayerConfigFile is the configuration file that holds the metadata describing
|
||||||
|
// a v1 layer. See:
|
||||||
|
// https://github.com/moby/moby/blob/master/image/spec/v1.md
|
||||||
|
type LayerConfigFile struct { |
||||||
|
v1.ConfigFile |
||||||
|
|
||||||
|
ContainerConfig v1.Config `json:"container_config,omitempty"` |
||||||
|
|
||||||
|
ID string `json:"id,omitempty"` |
||||||
|
Parent string `json:"parent,omitempty"` |
||||||
|
Throwaway bool `json:"throwaway,omitempty"` |
||||||
|
Comment string `json:"comment,omitempty"` |
||||||
|
} |
@ -0,0 +1,18 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package legacy provides functionality to work with docker images in the v1
|
||||||
|
// format.
|
||||||
|
// See: https://github.com/moby/moby/blob/master/image/spec/v1.md
|
||||||
|
package legacy |
@ -0,0 +1,6 @@ |
|||||||
|
# `legacy/tarball` |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball) |
||||||
|
|
||||||
|
This package implements support for writing legacy tarballs, as described |
||||||
|
[here](https://github.com/moby/moby/blob/749d90e10f989802638ae542daf54257f3bf71f2/image/spec/v1.2.md#combined-image-json--filesystem-changeset-format). |
@ -0,0 +1,18 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package tarball provides facilities for writing v1 docker images
|
||||||
|
// (https://github.com/moby/moby/blob/master/image/spec/v1.md) from/to a tarball
|
||||||
|
// on-disk.
|
||||||
|
package tarball |
@ -0,0 +1,375 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tarball |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"crypto/sha256" |
||||||
|
"encoding/hex" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"sort" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/legacy" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
// repositoriesTarDescriptor represents the repositories file inside a `docker save` tarball.
|
||||||
|
type repositoriesTarDescriptor map[string]map[string]string |
||||||
|
|
||||||
|
// v1Layer represents a layer with metadata needed by the v1 image spec https://github.com/moby/moby/blob/master/image/spec/v1.md.
|
||||||
|
type v1Layer struct { |
||||||
|
// config is the layer metadata.
|
||||||
|
config *legacy.LayerConfigFile |
||||||
|
// layer is the v1.Layer object this v1Layer represents.
|
||||||
|
layer v1.Layer |
||||||
|
} |
||||||
|
|
||||||
|
// json returns the raw bytes of the json metadata of the given v1Layer.
|
||||||
|
func (l *v1Layer) json() ([]byte, error) { |
||||||
|
return json.Marshal(l.config) |
||||||
|
} |
||||||
|
|
||||||
|
// version returns the raw bytes of the "VERSION" file of the given v1Layer.
|
||||||
|
func (l *v1Layer) version() []byte { |
||||||
|
return []byte("1.0") |
||||||
|
} |
||||||
|
|
||||||
|
// v1LayerID computes the v1 image format layer id for the given v1.Layer with the given v1 parent ID and raw image config.
|
||||||
|
func v1LayerID(layer v1.Layer, parentID string, rawConfig []byte) (string, error) { |
||||||
|
d, err := layer.Digest() |
||||||
|
if err != nil { |
||||||
|
return "", fmt.Errorf("unable to get layer digest to generate v1 layer ID: %w", err) |
||||||
|
} |
||||||
|
s := fmt.Sprintf("%s %s", d.Hex, parentID) |
||||||
|
if len(rawConfig) != 0 { |
||||||
|
s = fmt.Sprintf("%s %s", s, string(rawConfig)) |
||||||
|
} |
||||||
|
rawDigest := sha256.Sum256([]byte(s)) |
||||||
|
return hex.EncodeToString(rawDigest[:]), nil |
||||||
|
} |
||||||
|
|
||||||
|
// newTopV1Layer creates a new v1Layer for a layer other than the top layer in a v1 image tarball.
|
||||||
|
func newV1Layer(layer v1.Layer, parent *v1Layer, history v1.History) (*v1Layer, error) { |
||||||
|
parentID := "" |
||||||
|
if parent != nil { |
||||||
|
parentID = parent.config.ID |
||||||
|
} |
||||||
|
id, err := v1LayerID(layer, parentID, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("unable to generate v1 layer ID: %w", err) |
||||||
|
} |
||||||
|
result := &v1Layer{ |
||||||
|
layer: layer, |
||||||
|
config: &legacy.LayerConfigFile{ |
||||||
|
ConfigFile: v1.ConfigFile{ |
||||||
|
Created: history.Created, |
||||||
|
Author: history.Author, |
||||||
|
}, |
||||||
|
ContainerConfig: v1.Config{ |
||||||
|
Cmd: []string{history.CreatedBy}, |
||||||
|
}, |
||||||
|
ID: id, |
||||||
|
Parent: parentID, |
||||||
|
Throwaway: history.EmptyLayer, |
||||||
|
Comment: history.Comment, |
||||||
|
}, |
||||||
|
} |
||||||
|
return result, nil |
||||||
|
} |
||||||
|
|
||||||
|
// newTopV1Layer creates a new v1Layer for the top layer in a v1 image tarball.
|
||||||
|
func newTopV1Layer(layer v1.Layer, parent *v1Layer, history v1.History, imgConfig *v1.ConfigFile, rawConfig []byte) (*v1Layer, error) { |
||||||
|
result, err := newV1Layer(layer, parent, history) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
id, err := v1LayerID(layer, result.config.Parent, rawConfig) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("unable to generate v1 layer ID for top layer: %w", err) |
||||||
|
} |
||||||
|
result.config.ID = id |
||||||
|
result.config.Architecture = imgConfig.Architecture |
||||||
|
result.config.Container = imgConfig.Container |
||||||
|
result.config.DockerVersion = imgConfig.DockerVersion |
||||||
|
result.config.OS = imgConfig.OS |
||||||
|
result.config.Config = imgConfig.Config |
||||||
|
result.config.Created = imgConfig.Created |
||||||
|
return result, nil |
||||||
|
} |
||||||
|
|
||||||
|
// splitTag splits the given tagged image name <registry>/<repository>:<tag>
|
||||||
|
// into <registry>/<repository> and <tag>.
|
||||||
|
func splitTag(name string) (string, string) { |
||||||
|
// Split on ":"
|
||||||
|
parts := strings.Split(name, ":") |
||||||
|
// Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
|
||||||
|
if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { |
||||||
|
base := strings.Join(parts[:len(parts)-1], ":") |
||||||
|
tag := parts[len(parts)-1] |
||||||
|
return base, tag |
||||||
|
} |
||||||
|
return name, "" |
||||||
|
} |
||||||
|
|
||||||
|
// addTags adds the given image tags to the given "repositories" file descriptor in a v1 image tarball.
|
||||||
|
func addTags(repos repositoriesTarDescriptor, tags []string, topLayerID string) { |
||||||
|
for _, t := range tags { |
||||||
|
base, tag := splitTag(t) |
||||||
|
tagToID, ok := repos[base] |
||||||
|
if !ok { |
||||||
|
tagToID = make(map[string]string) |
||||||
|
repos[base] = tagToID |
||||||
|
} |
||||||
|
tagToID[tag] = topLayerID |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// updateLayerSources updates the given layer digest to descriptor map with the descriptor of the given layer in the given image if it's an undistributable layer.
|
||||||
|
func updateLayerSources(layerSources map[v1.Hash]v1.Descriptor, layer v1.Layer, img v1.Image) error { |
||||||
|
d, err := layer.Digest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
// Add to LayerSources if it's a foreign layer.
|
||||||
|
desc, err := partial.BlobDescriptor(img, d) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if !desc.MediaType.IsDistributable() { |
||||||
|
diffid, err := partial.BlobToDiffID(img, d) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
layerSources[diffid] = *desc |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Write is a wrapper to write a single image in V1 format and tag to a tarball.
|
||||||
|
func Write(ref name.Reference, img v1.Image, w io.Writer) error { |
||||||
|
return MultiWrite(map[name.Reference]v1.Image{ref: img}, w) |
||||||
|
} |
||||||
|
|
||||||
|
// filterEmpty filters out the history corresponding to empty layers from the
|
||||||
|
// given history.
|
||||||
|
func filterEmpty(h []v1.History) []v1.History { |
||||||
|
result := []v1.History{} |
||||||
|
for _, i := range h { |
||||||
|
if i.EmptyLayer { |
||||||
|
continue |
||||||
|
} |
||||||
|
result = append(result, i) |
||||||
|
} |
||||||
|
return result |
||||||
|
} |
||||||
|
|
||||||
|
// MultiWrite writes the contents of each image to the provided reader, in the V1 image tarball format.
|
||||||
|
// The contents are written in the following format:
|
||||||
|
// One manifest.json file at the top level containing information about several images.
|
||||||
|
// One repositories file mapping from the image <registry>/<repo name> to <tag> to the id of the top most layer.
|
||||||
|
// For every layer, a directory named with the layer ID is created with the following contents:
|
||||||
|
//
|
||||||
|
// layer.tar - The uncompressed layer tarball.
|
||||||
|
// <layer id>.json- Layer metadata json.
|
||||||
|
// VERSION- Schema version string. Always set to "1.0".
|
||||||
|
//
|
||||||
|
// One file for the config blob, named after its SHA.
|
||||||
|
func MultiWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { |
||||||
|
tf := tar.NewWriter(w) |
||||||
|
defer tf.Close() |
||||||
|
|
||||||
|
sortedImages, imageToTags := dedupRefToImage(refToImage) |
||||||
|
var m tarball.Manifest |
||||||
|
repos := make(repositoriesTarDescriptor) |
||||||
|
|
||||||
|
seenLayerIDs := make(map[string]struct{}) |
||||||
|
for _, img := range sortedImages { |
||||||
|
tags := imageToTags[img] |
||||||
|
|
||||||
|
// Write the config.
|
||||||
|
cfgName, err := img.ConfigName() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
cfgFileName := fmt.Sprintf("%s.json", cfgName.Hex) |
||||||
|
cfgBlob, err := img.RawConfigFile() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := writeTarEntry(tf, cfgFileName, bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
cfg, err := img.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Store foreign layer info.
|
||||||
|
layerSources := make(map[v1.Hash]v1.Descriptor) |
||||||
|
|
||||||
|
// Write the layers.
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
history := filterEmpty(cfg.History) |
||||||
|
// Create a blank config history if the config didn't have a history.
|
||||||
|
if len(history) == 0 && len(layers) != 0 { |
||||||
|
history = make([]v1.History, len(layers)) |
||||||
|
} else if len(layers) != len(history) { |
||||||
|
return fmt.Errorf("image config had layer history which did not match the number of layers, got len(history)=%d, len(layers)=%d, want len(history)=len(layers)", len(history), len(layers)) |
||||||
|
} |
||||||
|
layerFiles := make([]string, len(layers)) |
||||||
|
var prev *v1Layer |
||||||
|
for i, l := range layers { |
||||||
|
if err := updateLayerSources(layerSources, l, img); err != nil { |
||||||
|
return fmt.Errorf("unable to update image metadata to include undistributable layer source information: %w", err) |
||||||
|
} |
||||||
|
var cur *v1Layer |
||||||
|
if i < (len(layers) - 1) { |
||||||
|
cur, err = newV1Layer(l, prev, history[i]) |
||||||
|
} else { |
||||||
|
cur, err = newTopV1Layer(l, prev, history[i], cfg, cfgBlob) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
layerFiles[i] = fmt.Sprintf("%s/layer.tar", cur.config.ID) |
||||||
|
if _, ok := seenLayerIDs[cur.config.ID]; ok { |
||||||
|
prev = cur |
||||||
|
continue |
||||||
|
} |
||||||
|
seenLayerIDs[cur.config.ID] = struct{}{} |
||||||
|
|
||||||
|
// If the v1.Layer implements UncompressedSize efficiently, use that
|
||||||
|
// for the tar header. Otherwise, this iterates over Uncompressed().
|
||||||
|
// NOTE: If using a streaming layer, this may consume the layer.
|
||||||
|
size, err := partial.UncompressedSize(l) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
u, err := l.Uncompressed() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer u.Close() |
||||||
|
if err := writeTarEntry(tf, layerFiles[i], u, size); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
j, err := cur.json() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := writeTarEntry(tf, fmt.Sprintf("%s/json", cur.config.ID), bytes.NewReader(j), int64(len(j))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
v := cur.version() |
||||||
|
if err := writeTarEntry(tf, fmt.Sprintf("%s/VERSION", cur.config.ID), bytes.NewReader(v), int64(len(v))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
prev = cur |
||||||
|
} |
||||||
|
|
||||||
|
// Generate the tar descriptor and write it.
|
||||||
|
m = append(m, tarball.Descriptor{ |
||||||
|
Config: cfgFileName, |
||||||
|
RepoTags: tags, |
||||||
|
Layers: layerFiles, |
||||||
|
LayerSources: layerSources, |
||||||
|
}) |
||||||
|
// prev should be the top layer here. Use it to add the image tags
|
||||||
|
// to the tarball repositories file.
|
||||||
|
addTags(repos, tags, prev.config.ID) |
||||||
|
} |
||||||
|
|
||||||
|
mBytes, err := json.Marshal(m) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(mBytes), int64(len(mBytes))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
reposBytes, err := json.Marshal(&repos) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := writeTarEntry(tf, "repositories", bytes.NewReader(reposBytes), int64(len(reposBytes))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func dedupRefToImage(refToImage map[name.Reference]v1.Image) ([]v1.Image, map[v1.Image][]string) { |
||||||
|
imageToTags := make(map[v1.Image][]string) |
||||||
|
|
||||||
|
for ref, img := range refToImage { |
||||||
|
if tag, ok := ref.(name.Tag); ok { |
||||||
|
if tags, ok := imageToTags[img]; ok && tags != nil { |
||||||
|
imageToTags[img] = append(tags, tag.String()) |
||||||
|
} else { |
||||||
|
imageToTags[img] = []string{tag.String()} |
||||||
|
} |
||||||
|
} else { |
||||||
|
if _, ok := imageToTags[img]; !ok { |
||||||
|
imageToTags[img] = nil |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Force specific order on tags
|
||||||
|
imgs := []v1.Image{} |
||||||
|
for img, tags := range imageToTags { |
||||||
|
sort.Strings(tags) |
||||||
|
imgs = append(imgs, img) |
||||||
|
} |
||||||
|
|
||||||
|
sort.Slice(imgs, func(i, j int) bool { |
||||||
|
cfI, err := imgs[i].ConfigName() |
||||||
|
if err != nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
cfJ, err := imgs[j].ConfigName() |
||||||
|
if err != nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
return cfI.Hex < cfJ.Hex |
||||||
|
}) |
||||||
|
|
||||||
|
return imgs, imageToTags |
||||||
|
} |
||||||
|
|
||||||
|
// Writes a file to the provided writer with a corresponding tar header
|
||||||
|
func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { |
||||||
|
hdr := &tar.Header{ |
||||||
|
Mode: 0644, |
||||||
|
Typeflag: tar.TypeReg, |
||||||
|
Size: size, |
||||||
|
Name: path, |
||||||
|
} |
||||||
|
if err := tf.WriteHeader(hdr); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
_, err := io.Copy(tf, r) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,8 @@ |
|||||||
|
# `empty` |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) |
||||||
|
|
||||||
|
The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. |
||||||
|
This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, |
||||||
|
see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) |
||||||
|
and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). |
@ -0,0 +1,16 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package empty provides an implementation of v1.Image equivalent to "FROM scratch".
|
||||||
|
package empty |
@ -0,0 +1,52 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package empty |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
// Image is a singleton empty image, think: FROM scratch.
|
||||||
|
var Image, _ = partial.UncompressedToImage(emptyImage{}) |
||||||
|
|
||||||
|
type emptyImage struct{} |
||||||
|
|
||||||
|
// MediaType implements partial.UncompressedImageCore.
|
||||||
|
func (i emptyImage) MediaType() (types.MediaType, error) { |
||||||
|
return types.DockerManifestSchema2, nil |
||||||
|
} |
||||||
|
|
||||||
|
// RawConfigFile implements partial.UncompressedImageCore.
|
||||||
|
func (i emptyImage) RawConfigFile() ([]byte, error) { |
||||||
|
return partial.RawConfigFile(i) |
||||||
|
} |
||||||
|
|
||||||
|
// ConfigFile implements v1.Image.
|
||||||
|
func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { |
||||||
|
return &v1.ConfigFile{ |
||||||
|
RootFS: v1.RootFS{ |
||||||
|
// Some clients check this.
|
||||||
|
Type: "layers", |
||||||
|
}, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { |
||||||
|
return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) |
||||||
|
} |
@ -0,0 +1,63 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package empty |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
// Index is a singleton empty index, think: FROM scratch.
|
||||||
|
var Index = emptyIndex{} |
||||||
|
|
||||||
|
type emptyIndex struct{} |
||||||
|
|
||||||
|
func (i emptyIndex) MediaType() (types.MediaType, error) { |
||||||
|
return types.OCIImageIndex, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) Digest() (v1.Hash, error) { |
||||||
|
return partial.Digest(i) |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) Size() (int64, error) { |
||||||
|
return partial.Size(i) |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { |
||||||
|
return base(), nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) RawManifest() ([]byte, error) { |
||||||
|
return json.Marshal(base()) |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { |
||||||
|
return nil, errors.New("empty index") |
||||||
|
} |
||||||
|
|
||||||
|
func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { |
||||||
|
return nil, errors.New("empty index") |
||||||
|
} |
||||||
|
|
||||||
|
func base() *v1.IndexManifest { |
||||||
|
return &v1.IndexManifest{ |
||||||
|
SchemaVersion: 2, |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,5 @@ |
|||||||
|
# `layout` |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout) |
||||||
|
|
||||||
|
The `layout` package implements support for interacting with an [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/master/image-layout.md). |
@ -0,0 +1,37 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
) |
||||||
|
|
||||||
|
// Blob returns a blob with the given hash from the Path.
|
||||||
|
func (l Path) Blob(h v1.Hash) (io.ReadCloser, error) { |
||||||
|
return os.Open(l.blobPath(h)) |
||||||
|
} |
||||||
|
|
||||||
|
// Bytes is a convenience function to return a blob from the Path as
|
||||||
|
// a byte slice.
|
||||||
|
func (l Path) Bytes(h v1.Hash) ([]byte, error) { |
||||||
|
return os.ReadFile(l.blobPath(h)) |
||||||
|
} |
||||||
|
|
||||||
|
func (l Path) blobPath(h v1.Hash) string { |
||||||
|
return l.path("blobs", h.Algorithm, h.Hex) |
||||||
|
} |
@ -0,0 +1,19 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package layout provides facilities for reading/writing artifacts from/to
|
||||||
|
// an OCI image layout on disk, see:
|
||||||
|
//
|
||||||
|
// https://github.com/opencontainers/image-spec/blob/master/image-layout.md
|
||||||
|
package layout |
@ -0,0 +1,139 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"sync" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
type layoutImage struct { |
||||||
|
path Path |
||||||
|
desc v1.Descriptor |
||||||
|
manifestLock sync.Mutex // Protects rawManifest
|
||||||
|
rawManifest []byte |
||||||
|
} |
||||||
|
|
||||||
|
var _ partial.CompressedImageCore = (*layoutImage)(nil) |
||||||
|
|
||||||
|
// Image reads a v1.Image with digest h from the Path.
|
||||||
|
func (l Path) Image(h v1.Hash) (v1.Image, error) { |
||||||
|
ii, err := l.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return ii.Image(h) |
||||||
|
} |
||||||
|
|
||||||
|
func (li *layoutImage) MediaType() (types.MediaType, error) { |
||||||
|
return li.desc.MediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Implements WithManifest for partial.Blobset.
|
||||||
|
func (li *layoutImage) Manifest() (*v1.Manifest, error) { |
||||||
|
return partial.Manifest(li) |
||||||
|
} |
||||||
|
|
||||||
|
func (li *layoutImage) RawManifest() ([]byte, error) { |
||||||
|
li.manifestLock.Lock() |
||||||
|
defer li.manifestLock.Unlock() |
||||||
|
if li.rawManifest != nil { |
||||||
|
return li.rawManifest, nil |
||||||
|
} |
||||||
|
|
||||||
|
b, err := li.path.Bytes(li.desc.Digest) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
li.rawManifest = b |
||||||
|
return li.rawManifest, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (li *layoutImage) RawConfigFile() ([]byte, error) { |
||||||
|
manifest, err := li.Manifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return li.path.Bytes(manifest.Config.Digest) |
||||||
|
} |
||||||
|
|
||||||
|
func (li *layoutImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { |
||||||
|
manifest, err := li.Manifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if h == manifest.Config.Digest { |
||||||
|
return &compressedBlob{ |
||||||
|
path: li.path, |
||||||
|
desc: manifest.Config, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
for _, desc := range manifest.Layers { |
||||||
|
if h == desc.Digest { |
||||||
|
return &compressedBlob{ |
||||||
|
path: li.path, |
||||||
|
desc: desc, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not find layer in image: %s", h) |
||||||
|
} |
||||||
|
|
||||||
|
type compressedBlob struct { |
||||||
|
path Path |
||||||
|
desc v1.Descriptor |
||||||
|
} |
||||||
|
|
||||||
|
func (b *compressedBlob) Digest() (v1.Hash, error) { |
||||||
|
return b.desc.Digest, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (b *compressedBlob) Compressed() (io.ReadCloser, error) { |
||||||
|
return b.path.Blob(b.desc.Digest) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *compressedBlob) Size() (int64, error) { |
||||||
|
return b.desc.Size, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (b *compressedBlob) MediaType() (types.MediaType, error) { |
||||||
|
return b.desc.MediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Descriptor implements partial.withDescriptor.
|
||||||
|
func (b *compressedBlob) Descriptor() (*v1.Descriptor, error) { |
||||||
|
return &b.desc, nil |
||||||
|
} |
||||||
|
|
||||||
|
// See partial.Exists.
|
||||||
|
func (b *compressedBlob) Exists() (bool, error) { |
||||||
|
_, err := os.Stat(b.path.blobPath(b.desc.Digest)) |
||||||
|
if os.IsNotExist(err) { |
||||||
|
return false, nil |
||||||
|
} |
||||||
|
return err == nil, err |
||||||
|
} |
@ -0,0 +1,161 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
var _ v1.ImageIndex = (*layoutIndex)(nil) |
||||||
|
|
||||||
|
type layoutIndex struct { |
||||||
|
mediaType types.MediaType |
||||||
|
path Path |
||||||
|
rawIndex []byte |
||||||
|
} |
||||||
|
|
||||||
|
// ImageIndexFromPath is a convenience function which constructs a Path and returns its v1.ImageIndex.
|
||||||
|
func ImageIndexFromPath(path string) (v1.ImageIndex, error) { |
||||||
|
lp, err := FromPath(path) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return lp.ImageIndex() |
||||||
|
} |
||||||
|
|
||||||
|
// ImageIndex returns a v1.ImageIndex for the Path.
|
||||||
|
func (l Path) ImageIndex() (v1.ImageIndex, error) { |
||||||
|
rawIndex, err := os.ReadFile(l.path("index.json")) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
idx := &layoutIndex{ |
||||||
|
mediaType: types.OCIImageIndex, |
||||||
|
path: l, |
||||||
|
rawIndex: rawIndex, |
||||||
|
} |
||||||
|
|
||||||
|
return idx, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) MediaType() (types.MediaType, error) { |
||||||
|
return i.mediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) Digest() (v1.Hash, error) { |
||||||
|
return partial.Digest(i) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) Size() (int64, error) { |
||||||
|
return partial.Size(i) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) IndexManifest() (*v1.IndexManifest, error) { |
||||||
|
var index v1.IndexManifest |
||||||
|
err := json.Unmarshal(i.rawIndex, &index) |
||||||
|
return &index, err |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) RawManifest() ([]byte, error) { |
||||||
|
return i.rawIndex, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) Image(h v1.Hash) (v1.Image, error) { |
||||||
|
// Look up the digest in our manifest first to return a better error.
|
||||||
|
desc, err := i.findDescriptor(h) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if !isExpectedMediaType(desc.MediaType, types.OCIManifestSchema1, types.DockerManifestSchema2) { |
||||||
|
return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) |
||||||
|
} |
||||||
|
|
||||||
|
img := &layoutImage{ |
||||||
|
path: i.path, |
||||||
|
desc: *desc, |
||||||
|
} |
||||||
|
return partial.CompressedToImage(img) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { |
||||||
|
// Look up the digest in our manifest first to return a better error.
|
||||||
|
desc, err := i.findDescriptor(h) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if !isExpectedMediaType(desc.MediaType, types.OCIImageIndex, types.DockerManifestList) { |
||||||
|
return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) |
||||||
|
} |
||||||
|
|
||||||
|
rawIndex, err := i.path.Bytes(h) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &layoutIndex{ |
||||||
|
mediaType: desc.MediaType, |
||||||
|
path: i.path, |
||||||
|
rawIndex: rawIndex, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) Blob(h v1.Hash) (io.ReadCloser, error) { |
||||||
|
return i.path.Blob(h) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *layoutIndex) findDescriptor(h v1.Hash) (*v1.Descriptor, error) { |
||||||
|
im, err := i.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if h == (v1.Hash{}) { |
||||||
|
if len(im.Manifests) != 1 { |
||||||
|
return nil, errors.New("oci layout must contain only a single image to be used with layout.Image") |
||||||
|
} |
||||||
|
return &(im.Manifests)[0], nil |
||||||
|
} |
||||||
|
|
||||||
|
for _, desc := range im.Manifests { |
||||||
|
if desc.Digest == h { |
||||||
|
return &desc, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil, fmt.Errorf("could not find descriptor in index: %s", h) |
||||||
|
} |
||||||
|
|
||||||
|
// TODO: Pull this out into methods on types.MediaType? e.g. instead, have:
|
||||||
|
// * mt.IsIndex()
|
||||||
|
// * mt.IsImage()
|
||||||
|
func isExpectedMediaType(mt types.MediaType, expected ...types.MediaType) bool { |
||||||
|
for _, allowed := range expected { |
||||||
|
if mt == allowed { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
@ -0,0 +1,25 @@ |
|||||||
|
// Copyright 2019 The original author or authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import "path/filepath" |
||||||
|
|
||||||
|
// Path represents an OCI image layout rooted in a file system path
|
||||||
|
type Path string |
||||||
|
|
||||||
|
func (l Path) path(elem ...string) string { |
||||||
|
complete := []string{string(l)} |
||||||
|
return filepath.Join(append(complete, elem...)...) |
||||||
|
} |
@ -0,0 +1,71 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
|
||||||
|
// Option is a functional option for Layout.
|
||||||
|
type Option func(*options) |
||||||
|
|
||||||
|
type options struct { |
||||||
|
descOpts []descriptorOption |
||||||
|
} |
||||||
|
|
||||||
|
func makeOptions(opts ...Option) *options { |
||||||
|
o := &options{ |
||||||
|
descOpts: []descriptorOption{}, |
||||||
|
} |
||||||
|
for _, apply := range opts { |
||||||
|
apply(o) |
||||||
|
} |
||||||
|
return o |
||||||
|
} |
||||||
|
|
||||||
|
type descriptorOption func(*v1.Descriptor) |
||||||
|
|
||||||
|
// WithAnnotations adds annotations to the artifact descriptor.
|
||||||
|
func WithAnnotations(annotations map[string]string) Option { |
||||||
|
return func(o *options) { |
||||||
|
o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { |
||||||
|
if desc.Annotations == nil { |
||||||
|
desc.Annotations = make(map[string]string) |
||||||
|
} |
||||||
|
for k, v := range annotations { |
||||||
|
desc.Annotations[k] = v |
||||||
|
} |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithURLs adds urls to the artifact descriptor.
|
||||||
|
func WithURLs(urls []string) Option { |
||||||
|
return func(o *options) { |
||||||
|
o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { |
||||||
|
if desc.URLs == nil { |
||||||
|
desc.URLs = []string{} |
||||||
|
} |
||||||
|
desc.URLs = append(desc.URLs, urls...) |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithPlatform sets the platform of the artifact descriptor.
|
||||||
|
func WithPlatform(platform v1.Platform) Option { |
||||||
|
return func(o *options) { |
||||||
|
o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { |
||||||
|
desc.Platform = &platform |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,32 @@ |
|||||||
|
// Copyright 2019 The original author or authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import ( |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
) |
||||||
|
|
||||||
|
// FromPath reads an OCI image layout at path and constructs a layout.Path.
|
||||||
|
func FromPath(path string) (Path, error) { |
||||||
|
// TODO: check oci-layout exists
|
||||||
|
|
||||||
|
_, err := os.Stat(filepath.Join(path, "index.json")) |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
|
||||||
|
return Path(path), nil |
||||||
|
} |
@ -0,0 +1,481 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package layout |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/logs" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/match" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/stream" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
"golang.org/x/sync/errgroup" |
||||||
|
) |
||||||
|
|
||||||
|
var layoutFile = `{ |
||||||
|
"imageLayoutVersion": "1.0.0" |
||||||
|
}` |
||||||
|
|
||||||
|
// AppendImage writes a v1.Image to the Path and updates
|
||||||
|
// the index.json to reference it.
|
||||||
|
func (l Path) AppendImage(img v1.Image, options ...Option) error { |
||||||
|
if err := l.WriteImage(img); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
desc, err := partial.Descriptor(img) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
o := makeOptions(options...) |
||||||
|
for _, opt := range o.descOpts { |
||||||
|
opt(desc) |
||||||
|
} |
||||||
|
|
||||||
|
return l.AppendDescriptor(*desc) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendIndex writes a v1.ImageIndex to the Path and updates
|
||||||
|
// the index.json to reference it.
|
||||||
|
func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error { |
||||||
|
if err := l.WriteIndex(ii); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
desc, err := partial.Descriptor(ii) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
o := makeOptions(options...) |
||||||
|
for _, opt := range o.descOpts { |
||||||
|
opt(desc) |
||||||
|
} |
||||||
|
|
||||||
|
return l.AppendDescriptor(*desc) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendDescriptor adds a descriptor to the index.json of the Path.
|
||||||
|
func (l Path) AppendDescriptor(desc v1.Descriptor) error { |
||||||
|
ii, err := l.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
index, err := ii.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
index.Manifests = append(index.Manifests, desc) |
||||||
|
|
||||||
|
rawIndex, err := json.MarshalIndent(index, "", " ") |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.WriteFile("index.json", rawIndex, os.ModePerm) |
||||||
|
} |
||||||
|
|
||||||
|
// ReplaceImage writes a v1.Image to the Path and updates
|
||||||
|
// the index.json to reference it, replacing any existing one that matches matcher, if found.
|
||||||
|
func (l Path) ReplaceImage(img v1.Image, matcher match.Matcher, options ...Option) error { |
||||||
|
if err := l.WriteImage(img); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.replaceDescriptor(img, matcher, options...) |
||||||
|
} |
||||||
|
|
||||||
|
// ReplaceIndex writes a v1.ImageIndex to the Path and updates
|
||||||
|
// the index.json to reference it, replacing any existing one that matches matcher, if found.
|
||||||
|
func (l Path) ReplaceIndex(ii v1.ImageIndex, matcher match.Matcher, options ...Option) error { |
||||||
|
if err := l.WriteIndex(ii); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.replaceDescriptor(ii, matcher, options...) |
||||||
|
} |
||||||
|
|
||||||
|
// replaceDescriptor adds a descriptor to the index.json of the Path, replacing
|
||||||
|
// any one matching matcher, if found.
|
||||||
|
func (l Path) replaceDescriptor(append mutate.Appendable, matcher match.Matcher, options ...Option) error { |
||||||
|
ii, err := l.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
desc, err := partial.Descriptor(append) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
o := makeOptions(options...) |
||||||
|
for _, opt := range o.descOpts { |
||||||
|
opt(desc) |
||||||
|
} |
||||||
|
|
||||||
|
add := mutate.IndexAddendum{ |
||||||
|
Add: append, |
||||||
|
Descriptor: *desc, |
||||||
|
} |
||||||
|
ii = mutate.AppendManifests(mutate.RemoveManifests(ii, matcher), add) |
||||||
|
|
||||||
|
index, err := ii.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
rawIndex, err := json.MarshalIndent(index, "", " ") |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.WriteFile("index.json", rawIndex, os.ModePerm) |
||||||
|
} |
||||||
|
|
||||||
|
// RemoveDescriptors removes any descriptors that match the match.Matcher from the index.json of the Path.
|
||||||
|
func (l Path) RemoveDescriptors(matcher match.Matcher) error { |
||||||
|
ii, err := l.ImageIndex() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
ii = mutate.RemoveManifests(ii, matcher) |
||||||
|
|
||||||
|
index, err := ii.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
rawIndex, err := json.MarshalIndent(index, "", " ") |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.WriteFile("index.json", rawIndex, os.ModePerm) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteFile write a file with arbitrary data at an arbitrary location in a v1
|
||||||
|
// layout. Used mostly internally to write files like "oci-layout" and
|
||||||
|
// "index.json", also can be used to write other arbitrary files. Do *not* use
|
||||||
|
// this to write blobs. Use only WriteBlob() for that.
|
||||||
|
func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error { |
||||||
|
if err := os.MkdirAll(l.path(), os.ModePerm); err != nil && !os.IsExist(err) { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return os.WriteFile(l.path(name), data, perm) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at
|
||||||
|
// blobs/{hash.Algorithm}/{hash.Hex}.
|
||||||
|
func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error { |
||||||
|
return l.writeBlob(hash, -1, r, nil) |
||||||
|
} |
||||||
|
|
||||||
|
func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func() (v1.Hash, error)) error { |
||||||
|
if hash.Hex == "" && renamer == nil { |
||||||
|
panic("writeBlob called an invalid hash and no renamer") |
||||||
|
} |
||||||
|
|
||||||
|
dir := l.path("blobs", hash.Algorithm) |
||||||
|
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Check if blob already exists and is the correct size
|
||||||
|
file := filepath.Join(dir, hash.Hex) |
||||||
|
if s, err := os.Stat(file); err == nil && !s.IsDir() && (s.Size() == size || size == -1) { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// If a renamer func was provided write to a temporary file
|
||||||
|
open := func() (*os.File, error) { return os.Create(file) } |
||||||
|
if renamer != nil { |
||||||
|
open = func() (*os.File, error) { return os.CreateTemp(dir, hash.Hex) } |
||||||
|
} |
||||||
|
w, err := open() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if renamer != nil { |
||||||
|
// Delete temp file if an error is encountered before renaming
|
||||||
|
defer func() { |
||||||
|
if err := os.Remove(w.Name()); err != nil && !errors.Is(err, os.ErrNotExist) { |
||||||
|
logs.Warn.Printf("error removing temporary file after encountering an error while writing blob: %v", err) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
defer w.Close() |
||||||
|
|
||||||
|
// Write to file and exit if not renaming
|
||||||
|
if n, err := io.Copy(w, rc); err != nil || renamer == nil { |
||||||
|
return err |
||||||
|
} else if size != -1 && n != size { |
||||||
|
return fmt.Errorf("expected blob size %d, but only wrote %d", size, n) |
||||||
|
} |
||||||
|
|
||||||
|
// Always close reader before renaming, since Close computes the digest in
|
||||||
|
// the case of streaming layers. If Close is not called explicitly, it will
|
||||||
|
// occur in a goroutine that is not guaranteed to succeed before renamer is
|
||||||
|
// called. When renamer is the layer's Digest method, it can return
|
||||||
|
// ErrNotComputed.
|
||||||
|
if err := rc.Close(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Always close file before renaming
|
||||||
|
if err := w.Close(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Rename file based on the final hash
|
||||||
|
finalHash, err := renamer() |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("error getting final digest of layer: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
renamePath := l.path("blobs", finalHash.Algorithm, finalHash.Hex) |
||||||
|
return os.Rename(w.Name(), renamePath) |
||||||
|
} |
||||||
|
|
||||||
|
// writeLayer writes the compressed layer to a blob. Unlike WriteBlob it will
|
||||||
|
// write to a temporary file (suffixed with .tmp) within the layout until the
|
||||||
|
// compressed reader is fully consumed and written to disk. Also unlike
|
||||||
|
// WriteBlob, it will not skip writing and exit without error when a blob file
|
||||||
|
// exists, but does not have the correct size. (The blob hash is not
|
||||||
|
// considered, because it may be expensive to compute.)
|
||||||
|
func (l Path) writeLayer(layer v1.Layer) error { |
||||||
|
d, err := layer.Digest() |
||||||
|
if errors.Is(err, stream.ErrNotComputed) { |
||||||
|
// Allow digest errors, since streams may not have calculated the hash
|
||||||
|
// yet. Instead, use an empty value, which will be transformed into a
|
||||||
|
// random file name with `os.CreateTemp` and the final digest will be
|
||||||
|
// calculated after writing to a temp file and before renaming to the
|
||||||
|
// final path.
|
||||||
|
d = v1.Hash{Algorithm: "sha256", Hex: ""} |
||||||
|
} else if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
s, err := layer.Size() |
||||||
|
if errors.Is(err, stream.ErrNotComputed) { |
||||||
|
// Allow size errors, since streams may not have calculated the size
|
||||||
|
// yet. Instead, use zero as a sentinel value meaning that no size
|
||||||
|
// comparison can be done and any sized blob file should be considered
|
||||||
|
// valid and not overwritten.
|
||||||
|
//
|
||||||
|
// TODO: Provide an option to always overwrite blobs.
|
||||||
|
s = -1 |
||||||
|
} else if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
r, err := layer.Compressed() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if err := l.writeBlob(d, s, r, layer.Digest); err != nil { |
||||||
|
return fmt.Errorf("error writing layer: %w", err) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// RemoveBlob removes a file from the blobs directory in the Path
|
||||||
|
// at blobs/{hash.Algorithm}/{hash.Hex}
|
||||||
|
// It does *not* remove any reference to it from other manifests or indexes, or
|
||||||
|
// from the root index.json.
|
||||||
|
func (l Path) RemoveBlob(hash v1.Hash) error { |
||||||
|
dir := l.path("blobs", hash.Algorithm) |
||||||
|
err := os.Remove(filepath.Join(dir, hash.Hex)) |
||||||
|
if err != nil && !os.IsNotExist(err) { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// WriteImage writes an image, including its manifest, config and all of its
|
||||||
|
// layers, to the blobs directory. If any blob already exists, as determined by
|
||||||
|
// the hash filename, does not write it.
|
||||||
|
// This function does *not* update the `index.json` file. If you want to write the
|
||||||
|
// image and also update the `index.json`, call AppendImage(), which wraps this
|
||||||
|
// and also updates the `index.json`.
|
||||||
|
func (l Path) WriteImage(img v1.Image) error { |
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Write the layers concurrently.
|
||||||
|
var g errgroup.Group |
||||||
|
for _, layer := range layers { |
||||||
|
layer := layer |
||||||
|
g.Go(func() error { |
||||||
|
return l.writeLayer(layer) |
||||||
|
}) |
||||||
|
} |
||||||
|
if err := g.Wait(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Write the config.
|
||||||
|
cfgName, err := img.ConfigName() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
cfgBlob, err := img.RawConfigFile() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := l.WriteBlob(cfgName, io.NopCloser(bytes.NewReader(cfgBlob))); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Write the img manifest.
|
||||||
|
d, err := img.Digest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
manifest, err := img.RawManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.WriteBlob(d, io.NopCloser(bytes.NewReader(manifest))) |
||||||
|
} |
||||||
|
|
||||||
|
type withLayer interface { |
||||||
|
Layer(v1.Hash) (v1.Layer, error) |
||||||
|
} |
||||||
|
|
||||||
|
type withBlob interface { |
||||||
|
Blob(v1.Hash) (io.ReadCloser, error) |
||||||
|
} |
||||||
|
|
||||||
|
func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error { |
||||||
|
index, err := ii.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Walk the descriptors and write any v1.Image or v1.ImageIndex that we find.
|
||||||
|
// If we come across something we don't expect, just write it as a blob.
|
||||||
|
for _, desc := range index.Manifests { |
||||||
|
switch desc.MediaType { |
||||||
|
case types.OCIImageIndex, types.DockerManifestList: |
||||||
|
ii, err := ii.ImageIndex(desc.Digest) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := l.WriteIndex(ii); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||||
|
img, err := ii.Image(desc.Digest) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := l.WriteImage(img); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
default: |
||||||
|
// TODO: The layout could reference arbitrary things, which we should
|
||||||
|
// probably just pass through.
|
||||||
|
|
||||||
|
var blob io.ReadCloser |
||||||
|
// Workaround for #819.
|
||||||
|
if wl, ok := ii.(withLayer); ok { |
||||||
|
layer, lerr := wl.Layer(desc.Digest) |
||||||
|
if lerr != nil { |
||||||
|
return lerr |
||||||
|
} |
||||||
|
blob, err = layer.Compressed() |
||||||
|
} else if wb, ok := ii.(withBlob); ok { |
||||||
|
blob, err = wb.Blob(desc.Digest) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := l.WriteBlob(desc.Digest, blob); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
rawIndex, err := ii.RawManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return l.WriteFile(indexFile, rawIndex, os.ModePerm) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteIndex writes an index to the blobs directory. Walks down the children,
|
||||||
|
// including its children manifests and/or indexes, and down the tree until all of
|
||||||
|
// config and all layers, have been written. If any blob already exists, as determined by
|
||||||
|
// the hash filename, does not write it.
|
||||||
|
// This function does *not* update the `index.json` file. If you want to write the
|
||||||
|
// index and also update the `index.json`, call AppendIndex(), which wraps this
|
||||||
|
// and also updates the `index.json`.
|
||||||
|
func (l Path) WriteIndex(ii v1.ImageIndex) error { |
||||||
|
// Always just write oci-layout file, since it's small.
|
||||||
|
if err := l.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
h, err := ii.Digest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
indexFile := filepath.Join("blobs", h.Algorithm, h.Hex) |
||||||
|
return l.writeIndexToFile(indexFile, ii) |
||||||
|
} |
||||||
|
|
||||||
|
// Write constructs a Path at path from an ImageIndex.
|
||||||
|
//
|
||||||
|
// The contents are written in the following format:
|
||||||
|
// At the top level, there is:
|
||||||
|
//
|
||||||
|
// One oci-layout file containing the version of this image-layout.
|
||||||
|
// One index.json file listing descriptors for the contained images.
|
||||||
|
//
|
||||||
|
// Under blobs/, there is, for each image:
|
||||||
|
//
|
||||||
|
// One file for each layer, named after the layer's SHA.
|
||||||
|
// One file for each config blob, named after its SHA.
|
||||||
|
// One file for each manifest blob, named after its SHA.
|
||||||
|
func Write(path string, ii v1.ImageIndex) (Path, error) { |
||||||
|
lp := Path(path) |
||||||
|
// Always just write oci-layout file, since it's small.
|
||||||
|
if err := lp.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
|
||||||
|
// TODO create blobs/ in case there is a blobs file which would prevent the directory from being created
|
||||||
|
|
||||||
|
return lp, lp.writeIndexToFile("index.json", ii) |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
# `mutate` |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) |
||||||
|
|
||||||
|
The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only |
||||||
|
accessor methods, so they are essentially immutable. If you want to change |
||||||
|
something about them, you need to produce a new instance of that interface. |
||||||
|
|
||||||
|
A common use case for this library is to read an image from somewhere (a source), |
||||||
|
change something about it, and write the image somewhere else (a sink). |
||||||
|
|
||||||
|
Graphically, this looks something like: |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="/images/mutate.dot.svg" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
## Mutations |
||||||
|
|
||||||
|
This is obviously not a comprehensive set of useful transformations (PRs welcome!), |
||||||
|
but a rough summary of what the `mutate` package currently does: |
||||||
|
|
||||||
|
### `Config` and `ConfigFile` |
||||||
|
|
||||||
|
These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), |
||||||
|
e.g. to change the entrypoint, environment, author, etc. |
||||||
|
|
||||||
|
### `Time`, `Canonical`, and `CreatedAt` |
||||||
|
|
||||||
|
These are useful in the context of [reproducible builds](https://reproducible-builds.org/), |
||||||
|
where you may want to strip timestamps and other non-reproducible information. |
||||||
|
|
||||||
|
### `Append`, `AppendLayers`, and `AppendManifests` |
||||||
|
|
||||||
|
These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with |
||||||
|
new layers or manifests. |
||||||
|
|
||||||
|
For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. |
||||||
|
|
||||||
|
### `MediaType` and `IndexMediaType` |
||||||
|
|
||||||
|
Sometimes, it is necessary to change the media type of an image or index, |
||||||
|
e.g. to appease a registry with strict validation of images (_looking at you, GCR_). |
||||||
|
|
||||||
|
### `Rebase` |
||||||
|
|
||||||
|
Rebase has [its own README](/cmd/crane/rebase.md). |
||||||
|
|
||||||
|
This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). |
||||||
|
|
||||||
|
### `Extract` |
||||||
|
|
||||||
|
Extract will flatten an image filesystem into a single tar stream, |
||||||
|
respecting whiteout files. |
||||||
|
|
||||||
|
This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). |
@ -0,0 +1,16 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package mutate provides facilities for mutating v1.Images of any kind.
|
||||||
|
package mutate |
@ -0,0 +1,285 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mutate |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/stream" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
type image struct { |
||||||
|
base v1.Image |
||||||
|
adds []Addendum |
||||||
|
|
||||||
|
computed bool |
||||||
|
configFile *v1.ConfigFile |
||||||
|
manifest *v1.Manifest |
||||||
|
annotations map[string]string |
||||||
|
mediaType *types.MediaType |
||||||
|
configMediaType *types.MediaType |
||||||
|
diffIDMap map[v1.Hash]v1.Layer |
||||||
|
digestMap map[v1.Hash]v1.Layer |
||||||
|
} |
||||||
|
|
||||||
|
var _ v1.Image = (*image)(nil) |
||||||
|
|
||||||
|
func (i *image) MediaType() (types.MediaType, error) { |
||||||
|
if i.mediaType != nil { |
||||||
|
return *i.mediaType, nil |
||||||
|
} |
||||||
|
return i.base.MediaType() |
||||||
|
} |
||||||
|
|
||||||
|
func (i *image) compute() error { |
||||||
|
// Don't re-compute if already computed.
|
||||||
|
if i.computed { |
||||||
|
return nil |
||||||
|
} |
||||||
|
var configFile *v1.ConfigFile |
||||||
|
if i.configFile != nil { |
||||||
|
configFile = i.configFile |
||||||
|
} else { |
||||||
|
cf, err := i.base.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
configFile = cf.DeepCopy() |
||||||
|
} |
||||||
|
diffIDs := configFile.RootFS.DiffIDs |
||||||
|
history := configFile.History |
||||||
|
|
||||||
|
diffIDMap := make(map[v1.Hash]v1.Layer) |
||||||
|
digestMap := make(map[v1.Hash]v1.Layer) |
||||||
|
|
||||||
|
for _, add := range i.adds { |
||||||
|
history = append(history, add.History) |
||||||
|
if add.Layer != nil { |
||||||
|
diffID, err := add.Layer.DiffID() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
diffIDs = append(diffIDs, diffID) |
||||||
|
diffIDMap[diffID] = add.Layer |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
m, err := i.base.Manifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
manifest := m.DeepCopy() |
||||||
|
manifestLayers := manifest.Layers |
||||||
|
for _, add := range i.adds { |
||||||
|
if add.Layer == nil { |
||||||
|
// Empty layers include only history in manifest.
|
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
desc, err := partial.Descriptor(add.Layer) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Fields in the addendum override the original descriptor.
|
||||||
|
if len(add.Annotations) != 0 { |
||||||
|
desc.Annotations = add.Annotations |
||||||
|
} |
||||||
|
if len(add.URLs) != 0 { |
||||||
|
desc.URLs = add.URLs |
||||||
|
} |
||||||
|
|
||||||
|
if add.MediaType != "" { |
||||||
|
desc.MediaType = add.MediaType |
||||||
|
} |
||||||
|
|
||||||
|
manifestLayers = append(manifestLayers, *desc) |
||||||
|
digestMap[desc.Digest] = add.Layer |
||||||
|
} |
||||||
|
|
||||||
|
configFile.RootFS.DiffIDs = diffIDs |
||||||
|
configFile.History = history |
||||||
|
|
||||||
|
manifest.Layers = manifestLayers |
||||||
|
|
||||||
|
rcfg, err := json.Marshal(configFile) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
manifest.Config.Digest = d |
||||||
|
manifest.Config.Size = sz |
||||||
|
|
||||||
|
// If Data was set in the base image, we need to update it in the mutated image.
|
||||||
|
if m.Config.Data != nil { |
||||||
|
manifest.Config.Data = rcfg |
||||||
|
} |
||||||
|
|
||||||
|
// If the user wants to mutate the media type of the config
|
||||||
|
if i.configMediaType != nil { |
||||||
|
manifest.Config.MediaType = *i.configMediaType |
||||||
|
} |
||||||
|
|
||||||
|
if i.mediaType != nil { |
||||||
|
manifest.MediaType = *i.mediaType |
||||||
|
} |
||||||
|
|
||||||
|
if i.annotations != nil { |
||||||
|
if manifest.Annotations == nil { |
||||||
|
manifest.Annotations = map[string]string{} |
||||||
|
} |
||||||
|
|
||||||
|
for k, v := range i.annotations { |
||||||
|
manifest.Annotations[k] = v |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
i.configFile = configFile |
||||||
|
i.manifest = manifest |
||||||
|
i.diffIDMap = diffIDMap |
||||||
|
i.digestMap = digestMap |
||||||
|
i.computed = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Layers returns the ordered collection of filesystem layers that comprise this image.
|
||||||
|
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
||||||
|
func (i *image) Layers() ([]v1.Layer, error) { |
||||||
|
if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { |
||||||
|
// Image contains a streamable layer which has not yet been
|
||||||
|
// consumed. Just return the layers we have in case the caller
|
||||||
|
// is going to consume the layers.
|
||||||
|
layers, err := i.base.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
for _, add := range i.adds { |
||||||
|
layers = append(layers, add.Layer) |
||||||
|
} |
||||||
|
return layers, nil |
||||||
|
} else if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
diffIDs, err := partial.DiffIDs(i) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
ls := make([]v1.Layer, 0, len(diffIDs)) |
||||||
|
for _, h := range diffIDs { |
||||||
|
l, err := i.LayerByDiffID(h) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
ls = append(ls, l) |
||||||
|
} |
||||||
|
return ls, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ConfigName returns the hash of the image's config file.
|
||||||
|
func (i *image) ConfigName() (v1.Hash, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return v1.Hash{}, err |
||||||
|
} |
||||||
|
return partial.ConfigName(i) |
||||||
|
} |
||||||
|
|
||||||
|
// ConfigFile returns this image's config file.
|
||||||
|
func (i *image) ConfigFile() (*v1.ConfigFile, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return i.configFile.DeepCopy(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// RawConfigFile returns the serialized bytes of ConfigFile()
|
||||||
|
func (i *image) RawConfigFile() ([]byte, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return json.Marshal(i.configFile) |
||||||
|
} |
||||||
|
|
||||||
|
// Digest returns the sha256 of this image's manifest.
|
||||||
|
func (i *image) Digest() (v1.Hash, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return v1.Hash{}, err |
||||||
|
} |
||||||
|
return partial.Digest(i) |
||||||
|
} |
||||||
|
|
||||||
|
// Size implements v1.Image.
|
||||||
|
func (i *image) Size() (int64, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return -1, err |
||||||
|
} |
||||||
|
return partial.Size(i) |
||||||
|
} |
||||||
|
|
||||||
|
// Manifest returns this image's Manifest object.
|
||||||
|
func (i *image) Manifest() (*v1.Manifest, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return i.manifest.DeepCopy(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// RawManifest returns the serialized bytes of Manifest()
|
||||||
|
func (i *image) RawManifest() ([]byte, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return json.Marshal(i.manifest) |
||||||
|
} |
||||||
|
|
||||||
|
// LayerByDigest returns a Layer for interacting with a particular layer of
|
||||||
|
// the image, looking it up by "digest" (the compressed hash).
|
||||||
|
func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { |
||||||
|
if cn, err := i.ConfigName(); err != nil { |
||||||
|
return nil, err |
||||||
|
} else if h == cn { |
||||||
|
return partial.ConfigLayer(i) |
||||||
|
} |
||||||
|
if layer, ok := i.digestMap[h]; ok { |
||||||
|
return layer, nil |
||||||
|
} |
||||||
|
return i.base.LayerByDigest(h) |
||||||
|
} |
||||||
|
|
||||||
|
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
|
||||||
|
// (the uncompressed hash).
|
||||||
|
func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { |
||||||
|
if layer, ok := i.diffIDMap[h]; ok { |
||||||
|
return layer, nil |
||||||
|
} |
||||||
|
return i.base.LayerByDiffID(h) |
||||||
|
} |
||||||
|
|
||||||
|
func validate(adds []Addendum) error { |
||||||
|
for _, add := range adds { |
||||||
|
if add.Layer == nil && !add.History.EmptyLayer { |
||||||
|
return errors.New("unable to add a nil layer to the image") |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,202 @@ |
|||||||
|
// Copyright 2019 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mutate |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/logs" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/match" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { |
||||||
|
desc, err := partial.Descriptor(ia.Add) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// The IndexAddendum allows overriding Descriptor values.
|
||||||
|
if ia.Descriptor.Size != 0 { |
||||||
|
desc.Size = ia.Descriptor.Size |
||||||
|
} |
||||||
|
if string(ia.Descriptor.MediaType) != "" { |
||||||
|
desc.MediaType = ia.Descriptor.MediaType |
||||||
|
} |
||||||
|
if ia.Descriptor.Digest != (v1.Hash{}) { |
||||||
|
desc.Digest = ia.Descriptor.Digest |
||||||
|
} |
||||||
|
if ia.Descriptor.Platform != nil { |
||||||
|
desc.Platform = ia.Descriptor.Platform |
||||||
|
} |
||||||
|
if len(ia.Descriptor.URLs) != 0 { |
||||||
|
desc.URLs = ia.Descriptor.URLs |
||||||
|
} |
||||||
|
if len(ia.Descriptor.Annotations) != 0 { |
||||||
|
desc.Annotations = ia.Descriptor.Annotations |
||||||
|
} |
||||||
|
if ia.Descriptor.Data != nil { |
||||||
|
desc.Data = ia.Descriptor.Data |
||||||
|
} |
||||||
|
|
||||||
|
return desc, nil |
||||||
|
} |
||||||
|
|
||||||
|
type index struct { |
||||||
|
base v1.ImageIndex |
||||||
|
adds []IndexAddendum |
||||||
|
// remove is removed before adds
|
||||||
|
remove match.Matcher |
||||||
|
|
||||||
|
computed bool |
||||||
|
manifest *v1.IndexManifest |
||||||
|
annotations map[string]string |
||||||
|
mediaType *types.MediaType |
||||||
|
imageMap map[v1.Hash]v1.Image |
||||||
|
indexMap map[v1.Hash]v1.ImageIndex |
||||||
|
layerMap map[v1.Hash]v1.Layer |
||||||
|
} |
||||||
|
|
||||||
|
var _ v1.ImageIndex = (*index)(nil) |
||||||
|
|
||||||
|
func (i *index) MediaType() (types.MediaType, error) { |
||||||
|
if i.mediaType != nil { |
||||||
|
return *i.mediaType, nil |
||||||
|
} |
||||||
|
return i.base.MediaType() |
||||||
|
} |
||||||
|
|
||||||
|
func (i *index) Size() (int64, error) { return partial.Size(i) } |
||||||
|
|
||||||
|
func (i *index) compute() error { |
||||||
|
// Don't re-compute if already computed.
|
||||||
|
if i.computed { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
i.imageMap = make(map[v1.Hash]v1.Image) |
||||||
|
i.indexMap = make(map[v1.Hash]v1.ImageIndex) |
||||||
|
i.layerMap = make(map[v1.Hash]v1.Layer) |
||||||
|
|
||||||
|
m, err := i.base.IndexManifest() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
manifest := m.DeepCopy() |
||||||
|
manifests := manifest.Manifests |
||||||
|
|
||||||
|
if i.remove != nil { |
||||||
|
var cleanedManifests []v1.Descriptor |
||||||
|
for _, m := range manifests { |
||||||
|
if !i.remove(m) { |
||||||
|
cleanedManifests = append(cleanedManifests, m) |
||||||
|
} |
||||||
|
} |
||||||
|
manifests = cleanedManifests |
||||||
|
} |
||||||
|
|
||||||
|
for _, add := range i.adds { |
||||||
|
desc, err := computeDescriptor(add) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
manifests = append(manifests, *desc) |
||||||
|
if idx, ok := add.Add.(v1.ImageIndex); ok { |
||||||
|
i.indexMap[desc.Digest] = idx |
||||||
|
} else if img, ok := add.Add.(v1.Image); ok { |
||||||
|
i.imageMap[desc.Digest] = img |
||||||
|
} else if l, ok := add.Add.(v1.Layer); ok { |
||||||
|
i.layerMap[desc.Digest] = l |
||||||
|
} else { |
||||||
|
logs.Warn.Printf("Unexpected index addendum: %T", add.Add) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
manifest.Manifests = manifests |
||||||
|
|
||||||
|
if i.mediaType != nil { |
||||||
|
manifest.MediaType = *i.mediaType |
||||||
|
} |
||||||
|
|
||||||
|
if i.annotations != nil { |
||||||
|
if manifest.Annotations == nil { |
||||||
|
manifest.Annotations = map[string]string{} |
||||||
|
} |
||||||
|
for k, v := range i.annotations { |
||||||
|
manifest.Annotations[k] = v |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
i.manifest = manifest |
||||||
|
i.computed = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *index) Image(h v1.Hash) (v1.Image, error) { |
||||||
|
if img, ok := i.imageMap[h]; ok { |
||||||
|
return img, nil |
||||||
|
} |
||||||
|
return i.base.Image(h) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { |
||||||
|
if idx, ok := i.indexMap[h]; ok { |
||||||
|
return idx, nil |
||||||
|
} |
||||||
|
return i.base.ImageIndex(h) |
||||||
|
} |
||||||
|
|
||||||
|
type withLayer interface { |
||||||
|
Layer(v1.Hash) (v1.Layer, error) |
||||||
|
} |
||||||
|
|
||||||
|
// Workaround for #819.
|
||||||
|
func (i *index) Layer(h v1.Hash) (v1.Layer, error) { |
||||||
|
if layer, ok := i.layerMap[h]; ok { |
||||||
|
return layer, nil |
||||||
|
} |
||||||
|
if wl, ok := i.base.(withLayer); ok { |
||||||
|
return wl.Layer(h) |
||||||
|
} |
||||||
|
return nil, fmt.Errorf("layer not found: %s", h) |
||||||
|
} |
||||||
|
|
||||||
|
// Digest returns the sha256 of this image's manifest.
|
||||||
|
func (i *index) Digest() (v1.Hash, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return v1.Hash{}, err |
||||||
|
} |
||||||
|
return partial.Digest(i) |
||||||
|
} |
||||||
|
|
||||||
|
// Manifest returns this image's Manifest object.
|
||||||
|
func (i *index) IndexManifest() (*v1.IndexManifest, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return i.manifest.DeepCopy(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// RawManifest returns the serialized bytes of Manifest()
|
||||||
|
func (i *index) RawManifest() ([]byte, error) { |
||||||
|
if err := i.compute(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return json.Marshal(i.manifest) |
||||||
|
} |
@ -0,0 +1,516 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mutate |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"path/filepath" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/internal/gzip" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/match" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
const whiteoutPrefix = ".wh." |
||||||
|
|
||||||
|
// Addendum contains layers and history to be appended
|
||||||
|
// to a base image
|
||||||
|
type Addendum struct { |
||||||
|
Layer v1.Layer |
||||||
|
History v1.History |
||||||
|
URLs []string |
||||||
|
Annotations map[string]string |
||||||
|
MediaType types.MediaType |
||||||
|
} |
||||||
|
|
||||||
|
// AppendLayers applies layers to a base image.
|
||||||
|
func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { |
||||||
|
additions := make([]Addendum, 0, len(layers)) |
||||||
|
for _, layer := range layers { |
||||||
|
additions = append(additions, Addendum{Layer: layer}) |
||||||
|
} |
||||||
|
|
||||||
|
return Append(base, additions...) |
||||||
|
} |
||||||
|
|
||||||
|
// Append will apply the list of addendums to the base image
|
||||||
|
func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { |
||||||
|
if len(adds) == 0 { |
||||||
|
return base, nil |
||||||
|
} |
||||||
|
if err := validate(adds); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &image{ |
||||||
|
base: base, |
||||||
|
adds: adds, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Appendable is an interface that represents something that can be appended
|
||||||
|
// to an ImageIndex. We need to be able to construct a v1.Descriptor in order
|
||||||
|
// to append something, and this is the minimum required information for that.
|
||||||
|
type Appendable interface { |
||||||
|
MediaType() (types.MediaType, error) |
||||||
|
Digest() (v1.Hash, error) |
||||||
|
Size() (int64, error) |
||||||
|
} |
||||||
|
|
||||||
|
// IndexAddendum represents an appendable thing and all the properties that
|
||||||
|
// we may want to override in the resulting v1.Descriptor.
|
||||||
|
type IndexAddendum struct { |
||||||
|
Add Appendable |
||||||
|
v1.Descriptor |
||||||
|
} |
||||||
|
|
||||||
|
// AppendManifests appends a manifest to the ImageIndex.
|
||||||
|
func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { |
||||||
|
return &index{ |
||||||
|
base: base, |
||||||
|
adds: adds, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// RemoveManifests removes any descriptors that match the match.Matcher.
|
||||||
|
func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { |
||||||
|
return &index{ |
||||||
|
base: base, |
||||||
|
remove: matcher, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Config mutates the provided v1.Image to have the provided v1.Config
|
||||||
|
func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { |
||||||
|
cf, err := base.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cf.Config = cfg |
||||||
|
|
||||||
|
return ConfigFile(base, cf) |
||||||
|
} |
||||||
|
|
||||||
|
// Annotatable represents a manifest that can carry annotations.
|
||||||
|
type Annotatable interface { |
||||||
|
partial.WithRawManifest |
||||||
|
} |
||||||
|
|
||||||
|
// Annotations mutates the annotations on an annotatable image or index manifest.
|
||||||
|
//
|
||||||
|
// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and
|
||||||
|
// returns the same type. You can type-assert the result like so:
|
||||||
|
//
|
||||||
|
// img := Annotations(empty.Image, map[string]string{
|
||||||
|
// "foo": "bar",
|
||||||
|
// }).(v1.Image)
|
||||||
|
//
|
||||||
|
// Or for an index:
|
||||||
|
//
|
||||||
|
// idx := Annotations(empty.Index, map[string]string{
|
||||||
|
// "foo": "bar",
|
||||||
|
// }).(v1.ImageIndex)
|
||||||
|
//
|
||||||
|
// If the input Annotatable is not an Image or ImageIndex, the result will
|
||||||
|
// attempt to lazily annotate the raw manifest.
|
||||||
|
func Annotations(f Annotatable, anns map[string]string) Annotatable { |
||||||
|
if img, ok := f.(v1.Image); ok { |
||||||
|
return &image{ |
||||||
|
base: img, |
||||||
|
annotations: anns, |
||||||
|
} |
||||||
|
} |
||||||
|
if idx, ok := f.(v1.ImageIndex); ok { |
||||||
|
return &index{ |
||||||
|
base: idx, |
||||||
|
annotations: anns, |
||||||
|
} |
||||||
|
} |
||||||
|
return arbitraryRawManifest{f, anns} |
||||||
|
} |
||||||
|
|
||||||
|
type arbitraryRawManifest struct { |
||||||
|
a Annotatable |
||||||
|
anns map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
func (a arbitraryRawManifest) RawManifest() ([]byte, error) { |
||||||
|
b, err := a.a.RawManifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
var m map[string]any |
||||||
|
if err := json.Unmarshal(b, &m); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if ann, ok := m["annotations"]; ok { |
||||||
|
if annm, ok := ann.(map[string]string); ok { |
||||||
|
for k, v := range a.anns { |
||||||
|
annm[k] = v |
||||||
|
} |
||||||
|
} else { |
||||||
|
return nil, fmt.Errorf(".annotations is not a map: %T", ann) |
||||||
|
} |
||||||
|
} else { |
||||||
|
m["annotations"] = a.anns |
||||||
|
} |
||||||
|
return json.Marshal(m) |
||||||
|
} |
||||||
|
|
||||||
|
// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile
|
||||||
|
func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { |
||||||
|
m, err := base.Manifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
image := &image{ |
||||||
|
base: base, |
||||||
|
manifest: m.DeepCopy(), |
||||||
|
configFile: cfg, |
||||||
|
} |
||||||
|
|
||||||
|
return image, nil |
||||||
|
} |
||||||
|
|
||||||
|
// CreatedAt mutates the provided v1.Image to have the provided v1.Time
|
||||||
|
func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { |
||||||
|
cf, err := base.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cfg := cf.DeepCopy() |
||||||
|
cfg.Created = created |
||||||
|
|
||||||
|
return ConfigFile(base, cfg) |
||||||
|
} |
||||||
|
|
||||||
|
// Extract takes an image and returns an io.ReadCloser containing the image's
|
||||||
|
// flattened filesystem.
|
||||||
|
//
|
||||||
|
// Callers can read the filesystem contents by passing the reader to
|
||||||
|
// tar.NewReader, or io.Copy it directly to some output.
|
||||||
|
//
|
||||||
|
// If a caller doesn't read the full contents, they should Close it to free up
|
||||||
|
// resources used during extraction.
|
||||||
|
func Extract(img v1.Image) io.ReadCloser { |
||||||
|
pr, pw := io.Pipe() |
||||||
|
|
||||||
|
go func() { |
||||||
|
// Close the writer with any errors encountered during
|
||||||
|
// extraction. These errors will be returned by the reader end
|
||||||
|
// on subsequent reads. If err == nil, the reader will return
|
||||||
|
// EOF.
|
||||||
|
pw.CloseWithError(extract(img, pw)) |
||||||
|
}() |
||||||
|
|
||||||
|
return pr |
||||||
|
} |
||||||
|
|
||||||
|
// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816
|
||||||
|
func extract(img v1.Image, w io.Writer) error { |
||||||
|
tarWriter := tar.NewWriter(w) |
||||||
|
defer tarWriter.Close() |
||||||
|
|
||||||
|
fileMap := map[string]bool{} |
||||||
|
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("retrieving image layers: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// we iterate through the layers in reverse order because it makes handling
|
||||||
|
// whiteout layers more efficient, since we can just keep track of the removed
|
||||||
|
// files as we see .wh. layers and ignore those in previous layers.
|
||||||
|
for i := len(layers) - 1; i >= 0; i-- { |
||||||
|
layer := layers[i] |
||||||
|
layerReader, err := layer.Uncompressed() |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("reading layer contents: %w", err) |
||||||
|
} |
||||||
|
defer layerReader.Close() |
||||||
|
tarReader := tar.NewReader(layerReader) |
||||||
|
for { |
||||||
|
header, err := tarReader.Next() |
||||||
|
if errors.Is(err, io.EOF) { |
||||||
|
break |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("reading tar: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Some tools prepend everything with "./", so if we don't Clean the
|
||||||
|
// name, we may have duplicate entries, which angers tar-split.
|
||||||
|
header.Name = filepath.Clean(header.Name) |
||||||
|
// force PAX format to remove Name/Linkname length limit of 100 characters
|
||||||
|
// required by USTAR and to not depend on internal tar package guess which
|
||||||
|
// prefers USTAR over PAX
|
||||||
|
header.Format = tar.FormatPAX |
||||||
|
|
||||||
|
basename := filepath.Base(header.Name) |
||||||
|
dirname := filepath.Dir(header.Name) |
||||||
|
tombstone := strings.HasPrefix(basename, whiteoutPrefix) |
||||||
|
if tombstone { |
||||||
|
basename = basename[len(whiteoutPrefix):] |
||||||
|
} |
||||||
|
|
||||||
|
// check if we have seen value before
|
||||||
|
// if we're checking a directory, don't filepath.Join names
|
||||||
|
var name string |
||||||
|
if header.Typeflag == tar.TypeDir { |
||||||
|
name = header.Name |
||||||
|
} else { |
||||||
|
name = filepath.Join(dirname, basename) |
||||||
|
} |
||||||
|
|
||||||
|
if _, ok := fileMap[name]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// check for a whited out parent directory
|
||||||
|
if inWhiteoutDir(fileMap, name) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// mark file as handled. non-directory implicitly tombstones
|
||||||
|
// any entries with a matching (or child) name
|
||||||
|
fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) |
||||||
|
if !tombstone { |
||||||
|
if err := tarWriter.WriteHeader(header); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if header.Size > 0 { |
||||||
|
if _, err := io.CopyN(tarWriter, tarReader, header.Size); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func inWhiteoutDir(fileMap map[string]bool, file string) bool { |
||||||
|
for { |
||||||
|
if file == "" { |
||||||
|
break |
||||||
|
} |
||||||
|
dirname := filepath.Dir(file) |
||||||
|
if file == dirname { |
||||||
|
break |
||||||
|
} |
||||||
|
if val, ok := fileMap[dirname]; ok && val { |
||||||
|
return true |
||||||
|
} |
||||||
|
file = dirname |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
func max(a, b int) int { |
||||||
|
if a > b { |
||||||
|
return a |
||||||
|
} |
||||||
|
return b |
||||||
|
} |
||||||
|
|
||||||
|
// Time sets all timestamps in an image to the given timestamp.
|
||||||
|
func Time(img v1.Image, t time.Time) (v1.Image, error) { |
||||||
|
newImage := empty.Image |
||||||
|
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting image layers: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
ocf, err := img.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting original config file: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
addendums := make([]Addendum, max(len(ocf.History), len(layers))) |
||||||
|
var historyIdx, addendumIdx int |
||||||
|
for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 { |
||||||
|
newLayer, err := layerTime(layers[layerIdx], t) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("setting layer times: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// try to search for the history entry that corresponds to this layer
|
||||||
|
for ; historyIdx < len(ocf.History); historyIdx++ { |
||||||
|
addendums[addendumIdx].History = ocf.History[historyIdx] |
||||||
|
// if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History
|
||||||
|
// and move on to the next History entry
|
||||||
|
if ocf.History[historyIdx].EmptyLayer { |
||||||
|
addendumIdx++ |
||||||
|
continue |
||||||
|
} |
||||||
|
// otherwise, we can exit from the cycle
|
||||||
|
historyIdx++ |
||||||
|
break |
||||||
|
} |
||||||
|
addendums[addendumIdx].Layer = newLayer |
||||||
|
} |
||||||
|
|
||||||
|
// add all leftover History entries
|
||||||
|
for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 { |
||||||
|
addendums[addendumIdx].History = ocf.History[historyIdx] |
||||||
|
} |
||||||
|
|
||||||
|
newImage, err = Append(newImage, addendums...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("appending layers: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
cf, err := newImage.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("setting config file: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
cfg := cf.DeepCopy() |
||||||
|
|
||||||
|
// Copy basic config over
|
||||||
|
cfg.Architecture = ocf.Architecture |
||||||
|
cfg.OS = ocf.OS |
||||||
|
cfg.OSVersion = ocf.OSVersion |
||||||
|
cfg.Config = ocf.Config |
||||||
|
|
||||||
|
// Strip away timestamps from the config file
|
||||||
|
cfg.Created = v1.Time{Time: t} |
||||||
|
|
||||||
|
for i, h := range cfg.History { |
||||||
|
h.Created = v1.Time{Time: t} |
||||||
|
h.CreatedBy = ocf.History[i].CreatedBy |
||||||
|
h.Comment = ocf.History[i].Comment |
||||||
|
h.EmptyLayer = ocf.History[i].EmptyLayer |
||||||
|
// Explicitly ignore Author field; which hinders reproducibility
|
||||||
|
h.Author = "" |
||||||
|
cfg.History[i] = h |
||||||
|
} |
||||||
|
|
||||||
|
return ConfigFile(newImage, cfg) |
||||||
|
} |
||||||
|
|
||||||
|
func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { |
||||||
|
layerReader, err := layer.Uncompressed() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("getting layer: %w", err) |
||||||
|
} |
||||||
|
defer layerReader.Close() |
||||||
|
w := new(bytes.Buffer) |
||||||
|
tarWriter := tar.NewWriter(w) |
||||||
|
defer tarWriter.Close() |
||||||
|
|
||||||
|
tarReader := tar.NewReader(layerReader) |
||||||
|
for { |
||||||
|
header, err := tarReader.Next() |
||||||
|
if errors.Is(err, io.EOF) { |
||||||
|
break |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("reading layer: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
header.ModTime = t |
||||||
|
if err := tarWriter.WriteHeader(header); err != nil { |
||||||
|
return nil, fmt.Errorf("writing tar header: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
if header.Typeflag == tar.TypeReg { |
||||||
|
// TODO(#1168): This should be lazy, and not buffer the entire layer contents.
|
||||||
|
if _, err = io.CopyN(tarWriter, tarReader, header.Size); err != nil { |
||||||
|
return nil, fmt.Errorf("writing layer file: %w", err) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if err := tarWriter.Close(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
b := w.Bytes() |
||||||
|
// gzip the contents, then create the layer
|
||||||
|
opener := func() (io.ReadCloser, error) { |
||||||
|
return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil |
||||||
|
} |
||||||
|
layer, err = tarball.LayerFromOpener(opener) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("creating layer: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
return layer, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Canonical is a helper function to combine Time and configFile
|
||||||
|
// to remove any randomness during a docker build.
|
||||||
|
func Canonical(img v1.Image) (v1.Image, error) { |
||||||
|
// Set all timestamps to 0
|
||||||
|
created := time.Time{} |
||||||
|
img, err := Time(img, created) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cf, err := img.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Get rid of host-dependent random config
|
||||||
|
cfg := cf.DeepCopy() |
||||||
|
|
||||||
|
cfg.Container = "" |
||||||
|
cfg.Config.Hostname = "" |
||||||
|
cfg.DockerVersion = "" |
||||||
|
|
||||||
|
return ConfigFile(img, cfg) |
||||||
|
} |
||||||
|
|
||||||
|
// MediaType modifies the MediaType() of the given image.
|
||||||
|
func MediaType(img v1.Image, mt types.MediaType) v1.Image { |
||||||
|
return &image{ |
||||||
|
base: img, |
||||||
|
mediaType: &mt, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ConfigMediaType modifies the MediaType() of the given image's Config.
|
||||||
|
func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image { |
||||||
|
return &image{ |
||||||
|
base: img, |
||||||
|
configMediaType: &mt, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// IndexMediaType modifies the MediaType() of the given index.
|
||||||
|
func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { |
||||||
|
return &index{ |
||||||
|
base: idx, |
||||||
|
mediaType: &mt, |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,144 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package mutate |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty" |
||||||
|
) |
||||||
|
|
||||||
|
// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase.
|
||||||
|
func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { |
||||||
|
// Verify that oldBase's layers are present in orig, otherwise orig is
|
||||||
|
// not based on oldBase at all.
|
||||||
|
origLayers, err := orig.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to get layers for original: %w", err) |
||||||
|
} |
||||||
|
oldBaseLayers, err := oldBase.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if len(oldBaseLayers) > len(origLayers) { |
||||||
|
return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) |
||||||
|
} |
||||||
|
for i, l := range oldBaseLayers { |
||||||
|
oldLayerDigest, err := l.Digest() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, oldBase, err) |
||||||
|
} |
||||||
|
origLayerDigest, err := origLayers[i].Digest() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, orig, err) |
||||||
|
} |
||||||
|
if oldLayerDigest != origLayerDigest { |
||||||
|
return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
oldConfig, err := oldBase.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to get config for old base: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
origConfig, err := orig.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to get config for original: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
newConfig, err := newBase.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("could not get config for new base: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Stitch together an image that contains:
|
||||||
|
// - original image's config
|
||||||
|
// - new base image's os/arch properties
|
||||||
|
// - new base image's layers + top of original image's layers
|
||||||
|
// - new base image's history + top of original image's history
|
||||||
|
rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to create empty image with original config: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Add new config properties from existing images.
|
||||||
|
rebasedConfig, err := rebasedImage.ConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("could not get config for rebased image: %w", err) |
||||||
|
} |
||||||
|
// OS/Arch properties from new base
|
||||||
|
rebasedConfig.Architecture = newConfig.Architecture |
||||||
|
rebasedConfig.OS = newConfig.OS |
||||||
|
rebasedConfig.OSVersion = newConfig.OSVersion |
||||||
|
|
||||||
|
// Apply config properties to rebased.
|
||||||
|
rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to replace config for rebased image: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Get new base layers and config for history.
|
||||||
|
newBaseLayers, err := newBase.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("could not get new base layers for new base: %w", err) |
||||||
|
} |
||||||
|
// Add new base layers.
|
||||||
|
rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to append new base image: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
// Add original layers above the old base.
|
||||||
|
rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("failed to append original image: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
return rebasedImage, nil |
||||||
|
} |
||||||
|
|
||||||
|
// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer
|
||||||
|
// indexes.
|
||||||
|
func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { |
||||||
|
var adds []Addendum |
||||||
|
// History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history.
|
||||||
|
// They cannot be iterated identically but must be walked independently, only advancing the iterator for layers
|
||||||
|
// when a history entry for a non-empty layer is seen.
|
||||||
|
layerIndex := 0 |
||||||
|
for historyIndex := range history { |
||||||
|
var layer v1.Layer |
||||||
|
emptyLayer := history[historyIndex].EmptyLayer |
||||||
|
if !emptyLayer { |
||||||
|
layer = layers[layerIndex] |
||||||
|
layerIndex++ |
||||||
|
} |
||||||
|
if historyIndex >= startHistory || layerIndex >= startLayer { |
||||||
|
adds = append(adds, Addendum{ |
||||||
|
Layer: layer, |
||||||
|
History: history[historyIndex], |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
||||||
|
// In the event history was malformed or non-existent, append the remaining layers.
|
||||||
|
for i := layerIndex; i < len(layers); i++ { |
||||||
|
if i >= startLayer { |
||||||
|
adds = append(adds, Addendum{Layer: layers[layerIndex]}) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return adds |
||||||
|
} |
@ -0,0 +1,280 @@ |
|||||||
|
# `tarball` |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball) |
||||||
|
|
||||||
|
This package produces tarballs that can consumed via `docker load`. Note |
||||||
|
that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball) |
||||||
|
tarballs that are produced by `docker save`, but this package is still able to |
||||||
|
read the legacy tarballs produced by `docker save`. |
||||||
|
|
||||||
|
## Usage |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"os" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
// Read a tarball from os.Args[1] that contains ubuntu. |
||||||
|
tag, err := name.NewTag("ubuntu") |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
img, err := tarball.ImageFromPath(os.Args[1], &tag) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
|
||||||
|
// Write that tarball to os.Args[2] with a different tag. |
||||||
|
newTag, err := name.NewTag("ubuntu:newest") |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
f, err := os.Create(os.Args[2]) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
defer f.Close() |
||||||
|
|
||||||
|
if err := tarball.Write(newTag, img, f); err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
## Structure |
||||||
|
|
||||||
|
<p align="center"> |
||||||
|
<img src="/images/tarball.dot.svg" /> |
||||||
|
</p> |
||||||
|
|
||||||
|
Let's look at what happens when we write out a tarball: |
||||||
|
|
||||||
|
|
||||||
|
### `ubuntu:latest` |
||||||
|
|
||||||
|
``` |
||||||
|
$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar |
||||||
|
$ tree ubuntu/ |
||||||
|
ubuntu/ |
||||||
|
├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz |
||||||
|
├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz |
||||||
|
├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz |
||||||
|
├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz |
||||||
|
├── manifest.json |
||||||
|
└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c |
||||||
|
|
||||||
|
0 directories, 6 files |
||||||
|
``` |
||||||
|
|
||||||
|
There are a couple interesting files here. |
||||||
|
|
||||||
|
`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor) |
||||||
|
that describe the images contained in this tarball. |
||||||
|
|
||||||
|
For each image, this has the `RepoTags` (how it was pulled), a `Config` file |
||||||
|
that points to the image's config file, a list of `Layers`, and (optionally) |
||||||
|
`LayerSources`. |
||||||
|
|
||||||
|
``` |
||||||
|
$ jq < ubuntu/manifest.json |
||||||
|
[ |
||||||
|
{ |
||||||
|
"Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c", |
||||||
|
"RepoTags": [ |
||||||
|
"ubuntu" |
||||||
|
], |
||||||
|
"Layers": [ |
||||||
|
"423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz", |
||||||
|
"de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz", |
||||||
|
"f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz", |
||||||
|
"b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz" |
||||||
|
] |
||||||
|
} |
||||||
|
] |
||||||
|
``` |
||||||
|
|
||||||
|
The config file and layers are exactly what you would expect, and match the |
||||||
|
registry representations of the same artifacts. You'll notice that the |
||||||
|
`manifest.json` contains similar information as the registry manifest, but isn't |
||||||
|
quite the same: |
||||||
|
|
||||||
|
``` |
||||||
|
$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9 |
||||||
|
{ |
||||||
|
"schemaVersion": 2, |
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.v2+json", |
||||||
|
"config": { |
||||||
|
"mediaType": "application/vnd.docker.container.image.v1+json", |
||||||
|
"size": 3408, |
||||||
|
"digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c" |
||||||
|
}, |
||||||
|
"layers": [ |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 26692096, |
||||||
|
"digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 35365, |
||||||
|
"digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 852, |
||||||
|
"digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 163, |
||||||
|
"digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7" |
||||||
|
} |
||||||
|
] |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
This makes it difficult to maintain image digests when roundtripping images |
||||||
|
through the tarball format, so it's not a great format if you care about |
||||||
|
provenance. |
||||||
|
|
||||||
|
The ubuntu example didn't have any `LayerSources` -- let's look at another image |
||||||
|
that does. |
||||||
|
|
||||||
|
### `hello-world:nanoserver` |
||||||
|
|
||||||
|
``` |
||||||
|
$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar |
||||||
|
$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar |
||||||
|
$ tree nanoserver/ |
||||||
|
nanoserver/ |
||||||
|
├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz |
||||||
|
├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz |
||||||
|
├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz |
||||||
|
├── manifest.json |
||||||
|
└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 |
||||||
|
|
||||||
|
0 directories, 5 files |
||||||
|
|
||||||
|
$ jq < nanoserver/manifest.json |
||||||
|
[ |
||||||
|
{ |
||||||
|
"Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6", |
||||||
|
"RepoTags": [ |
||||||
|
"index.docker.io/library/hello-world:i-was-a-digest" |
||||||
|
], |
||||||
|
"Layers": [ |
||||||
|
"a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz", |
||||||
|
"be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz", |
||||||
|
"10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz" |
||||||
|
], |
||||||
|
"LayerSources": { |
||||||
|
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", |
||||||
|
"size": 101145811, |
||||||
|
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", |
||||||
|
"urls": [ |
||||||
|
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" |
||||||
|
] |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
] |
||||||
|
``` |
||||||
|
|
||||||
|
A couple things to note about this `manifest.json` versus the other: |
||||||
|
* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform |
||||||
|
image, so We had to pull this image by digest, since we're (I'm) on |
||||||
|
amd64/linux and wanted to grab a windows image. Since the tarball format |
||||||
|
expects a tag under `RepoTags`, and we didn't pull by tag, we replace the |
||||||
|
digest with a sentinel `i-was-a-digest` "tag" to appease docker. |
||||||
|
* The `LayerSources` has enough information to reconstruct the foreign layers |
||||||
|
pointer when pushing/pulling from the registry. For legal reasons, microsoft |
||||||
|
doesn't want anyone but them to serve windows base images, so the mediaType |
||||||
|
here indicates a "foreign" or "non-distributable" layer with an URL for where |
||||||
|
you can download it from microsoft (see the [OCI |
||||||
|
image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)). |
||||||
|
|
||||||
|
We can look at what's in the registry to explain both of these things: |
||||||
|
``` |
||||||
|
$ crane manifest hello-world:nanoserver | jq . |
||||||
|
{ |
||||||
|
"manifests": [ |
||||||
|
{ |
||||||
|
"digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b", |
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.v2+json", |
||||||
|
"platform": { |
||||||
|
"architecture": "amd64", |
||||||
|
"os": "windows", |
||||||
|
"os.version": "10.0.17763.1040" |
||||||
|
}, |
||||||
|
"size": 1124 |
||||||
|
} |
||||||
|
], |
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", |
||||||
|
"schemaVersion": 2 |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
# Note the media type and "urls" field. |
||||||
|
$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq . |
||||||
|
{ |
||||||
|
"schemaVersion": 2, |
||||||
|
"mediaType": "application/vnd.docker.distribution.manifest.v2+json", |
||||||
|
"config": { |
||||||
|
"mediaType": "application/vnd.docker.container.image.v1+json", |
||||||
|
"size": 1721, |
||||||
|
"digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6" |
||||||
|
}, |
||||||
|
"layers": [ |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", |
||||||
|
"size": 101145811, |
||||||
|
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", |
||||||
|
"urls": [ |
||||||
|
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 1669, |
||||||
|
"digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", |
||||||
|
"size": 949, |
||||||
|
"digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0" |
||||||
|
} |
||||||
|
] |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file: |
||||||
|
``` |
||||||
|
$ jq '.[0].LayerSources' < nanoserver/manifest.json |
||||||
|
{ |
||||||
|
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { |
||||||
|
"mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", |
||||||
|
"size": 101145811, |
||||||
|
"digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", |
||||||
|
"urls": [ |
||||||
|
"https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" |
||||||
|
] |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs |
||||||
|
{ |
||||||
|
"type": "layers", |
||||||
|
"diff_ids": [ |
||||||
|
"sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e", |
||||||
|
"sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8", |
||||||
|
"sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9" |
||||||
|
] |
||||||
|
} |
||||||
|
``` |
@ -0,0 +1,17 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package tarball provides facilities for reading/writing v1.Images from/to
|
||||||
|
// a tarball on-disk.
|
||||||
|
package tarball |
@ -0,0 +1,429 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tarball |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"path" |
||||||
|
"path/filepath" |
||||||
|
"sync" |
||||||
|
|
||||||
|
comp "github.com/google/go-containerregistry/internal/compression" |
||||||
|
"github.com/google/go-containerregistry/pkg/compression" |
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
type image struct { |
||||||
|
opener Opener |
||||||
|
manifest *Manifest |
||||||
|
config []byte |
||||||
|
imgDescriptor *Descriptor |
||||||
|
|
||||||
|
tag *name.Tag |
||||||
|
} |
||||||
|
|
||||||
|
type uncompressedImage struct { |
||||||
|
*image |
||||||
|
} |
||||||
|
|
||||||
|
type compressedImage struct { |
||||||
|
*image |
||||||
|
manifestLock sync.Mutex // Protects manifest
|
||||||
|
manifest *v1.Manifest |
||||||
|
} |
||||||
|
|
||||||
|
var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) |
||||||
|
var _ partial.CompressedImageCore = (*compressedImage)(nil) |
||||||
|
|
||||||
|
// Opener is a thunk for opening a tar file.
|
||||||
|
type Opener func() (io.ReadCloser, error) |
||||||
|
|
||||||
|
func pathOpener(path string) Opener { |
||||||
|
return func() (io.ReadCloser, error) { |
||||||
|
return os.Open(path) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ImageFromPath returns a v1.Image from a tarball located on path.
|
||||||
|
func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { |
||||||
|
return Image(pathOpener(path), tag) |
||||||
|
} |
||||||
|
|
||||||
|
// LoadManifest load manifest
|
||||||
|
func LoadManifest(opener Opener) (Manifest, error) { |
||||||
|
m, err := extractFileFromTar(opener, "manifest.json") |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
defer m.Close() |
||||||
|
|
||||||
|
var manifest Manifest |
||||||
|
|
||||||
|
if err := json.NewDecoder(m).Decode(&manifest); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return manifest, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Image exposes an image from the tarball at the provided path.
|
||||||
|
func Image(opener Opener, tag *name.Tag) (v1.Image, error) { |
||||||
|
img := &image{ |
||||||
|
opener: opener, |
||||||
|
tag: tag, |
||||||
|
} |
||||||
|
if err := img.loadTarDescriptorAndConfig(); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Peek at the first layer and see if it's compressed.
|
||||||
|
if len(img.imgDescriptor.Layers) > 0 { |
||||||
|
compressed, err := img.areLayersCompressed() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if compressed { |
||||||
|
c := compressedImage{ |
||||||
|
image: img, |
||||||
|
} |
||||||
|
return partial.CompressedToImage(&c) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
uc := uncompressedImage{ |
||||||
|
image: img, |
||||||
|
} |
||||||
|
return partial.UncompressedToImage(&uc) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *image) MediaType() (types.MediaType, error) { |
||||||
|
return types.DockerManifestSchema2, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Descriptor stores the manifest data for a single image inside a `docker save` tarball.
|
||||||
|
type Descriptor struct { |
||||||
|
Config string |
||||||
|
RepoTags []string |
||||||
|
Layers []string |
||||||
|
|
||||||
|
// Tracks foreign layer info. Key is DiffID.
|
||||||
|
LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball.
|
||||||
|
type Manifest []Descriptor |
||||||
|
|
||||||
|
func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) { |
||||||
|
if tag == nil { |
||||||
|
if len(m) != 1 { |
||||||
|
return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") |
||||||
|
} |
||||||
|
return &(m)[0], nil |
||||||
|
} |
||||||
|
for _, img := range m { |
||||||
|
for _, tagStr := range img.RepoTags { |
||||||
|
repoTag, err := name.NewTag(tagStr) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Compare the resolved names, since there are several ways to specify the same tag.
|
||||||
|
if repoTag.Name() == tag.Name() { |
||||||
|
return &img, nil |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return nil, fmt.Errorf("tag %s not found in tarball", tag) |
||||||
|
} |
||||||
|
|
||||||
|
func (i *image) areLayersCompressed() (bool, error) { |
||||||
|
if len(i.imgDescriptor.Layers) == 0 { |
||||||
|
return false, errors.New("0 layers found in image") |
||||||
|
} |
||||||
|
layer := i.imgDescriptor.Layers[0] |
||||||
|
blob, err := extractFileFromTar(i.opener, layer) |
||||||
|
if err != nil { |
||||||
|
return false, err |
||||||
|
} |
||||||
|
defer blob.Close() |
||||||
|
|
||||||
|
cp, _, err := comp.PeekCompression(blob) |
||||||
|
if err != nil { |
||||||
|
return false, err |
||||||
|
} |
||||||
|
|
||||||
|
return cp != compression.None, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *image) loadTarDescriptorAndConfig() error { |
||||||
|
m, err := extractFileFromTar(i.opener, "manifest.json") |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer m.Close() |
||||||
|
|
||||||
|
if err := json.NewDecoder(m).Decode(&i.manifest); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
if i.manifest == nil { |
||||||
|
return errors.New("no valid manifest.json in tarball") |
||||||
|
} |
||||||
|
|
||||||
|
i.imgDescriptor, err = i.manifest.findDescriptor(i.tag) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer cfg.Close() |
||||||
|
|
||||||
|
i.config, err = io.ReadAll(cfg) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *image) RawConfigFile() ([]byte, error) { |
||||||
|
return i.config, nil |
||||||
|
} |
||||||
|
|
||||||
|
// tarFile represents a single file inside a tar. Closing it closes the tar itself.
|
||||||
|
type tarFile struct { |
||||||
|
io.Reader |
||||||
|
io.Closer |
||||||
|
} |
||||||
|
|
||||||
|
func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { |
||||||
|
f, err := opener() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
close := true |
||||||
|
defer func() { |
||||||
|
if close { |
||||||
|
f.Close() |
||||||
|
} |
||||||
|
}() |
||||||
|
|
||||||
|
tf := tar.NewReader(f) |
||||||
|
for { |
||||||
|
hdr, err := tf.Next() |
||||||
|
if errors.Is(err, io.EOF) { |
||||||
|
break |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if hdr.Name == filePath { |
||||||
|
if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink { |
||||||
|
currentDir := filepath.Dir(filePath) |
||||||
|
return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) |
||||||
|
} |
||||||
|
close = false |
||||||
|
return tarFile{ |
||||||
|
Reader: tf, |
||||||
|
Closer: f, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
} |
||||||
|
return nil, fmt.Errorf("file %s not found in tar", filePath) |
||||||
|
} |
||||||
|
|
||||||
|
// uncompressedLayerFromTarball implements partial.UncompressedLayer
|
||||||
|
type uncompressedLayerFromTarball struct { |
||||||
|
diffID v1.Hash |
||||||
|
mediaType types.MediaType |
||||||
|
opener Opener |
||||||
|
filePath string |
||||||
|
} |
||||||
|
|
||||||
|
// foreignUncompressedLayer implements partial.UncompressedLayer but returns
|
||||||
|
// a custom descriptor. This allows the foreign layer URLs to be included in
|
||||||
|
// the generated image manifest for uncompressed layers.
|
||||||
|
type foreignUncompressedLayer struct { |
||||||
|
uncompressedLayerFromTarball |
||||||
|
desc v1.Descriptor |
||||||
|
} |
||||||
|
|
||||||
|
func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) { |
||||||
|
return &fl.desc, nil |
||||||
|
} |
||||||
|
|
||||||
|
// DiffID implements partial.UncompressedLayer
|
||||||
|
func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { |
||||||
|
return ulft.diffID, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Uncompressed implements partial.UncompressedLayer
|
||||||
|
func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { |
||||||
|
return extractFileFromTar(ulft.opener, ulft.filePath) |
||||||
|
} |
||||||
|
|
||||||
|
func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) { |
||||||
|
return ulft.mediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { |
||||||
|
cfg, err := partial.ConfigFile(i) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
for idx, diffID := range cfg.RootFS.DiffIDs { |
||||||
|
if diffID == h { |
||||||
|
// Technically the media type should be 'application/tar' but given that our
|
||||||
|
// v1.Layer doesn't force consumers to care about whether the layer is compressed
|
||||||
|
// we should be fine returning the DockerLayer media type
|
||||||
|
mt := types.DockerLayer |
||||||
|
if bd, ok := i.imgDescriptor.LayerSources[h]; ok { |
||||||
|
// Overwrite the mediaType for foreign layers.
|
||||||
|
return &foreignUncompressedLayer{ |
||||||
|
uncompressedLayerFromTarball: uncompressedLayerFromTarball{ |
||||||
|
diffID: diffID, |
||||||
|
mediaType: bd.MediaType, |
||||||
|
opener: i.opener, |
||||||
|
filePath: i.imgDescriptor.Layers[idx], |
||||||
|
}, |
||||||
|
desc: bd, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
return &uncompressedLayerFromTarball{ |
||||||
|
diffID: diffID, |
||||||
|
mediaType: mt, |
||||||
|
opener: i.opener, |
||||||
|
filePath: i.imgDescriptor.Layers[idx], |
||||||
|
}, nil |
||||||
|
} |
||||||
|
} |
||||||
|
return nil, fmt.Errorf("diff id %q not found", h) |
||||||
|
} |
||||||
|
|
||||||
|
func (c *compressedImage) Manifest() (*v1.Manifest, error) { |
||||||
|
c.manifestLock.Lock() |
||||||
|
defer c.manifestLock.Unlock() |
||||||
|
if c.manifest != nil { |
||||||
|
return c.manifest, nil |
||||||
|
} |
||||||
|
|
||||||
|
b, err := c.RawConfigFile() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
c.manifest = &v1.Manifest{ |
||||||
|
SchemaVersion: 2, |
||||||
|
MediaType: types.DockerManifestSchema2, |
||||||
|
Config: v1.Descriptor{ |
||||||
|
MediaType: types.DockerConfigJSON, |
||||||
|
Size: cfgSize, |
||||||
|
Digest: cfgHash, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
for i, p := range c.imgDescriptor.Layers { |
||||||
|
cfg, err := partial.ConfigFile(c) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
diffid := cfg.RootFS.DiffIDs[i] |
||||||
|
if d, ok := c.imgDescriptor.LayerSources[diffid]; ok { |
||||||
|
// If it's a foreign layer, just append the descriptor so we can avoid
|
||||||
|
// reading the entire file.
|
||||||
|
c.manifest.Layers = append(c.manifest.Layers, d) |
||||||
|
} else { |
||||||
|
l, err := extractFileFromTar(c.opener, p) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
defer l.Close() |
||||||
|
sha, size, err := v1.SHA256(l) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ |
||||||
|
MediaType: types.DockerLayer, |
||||||
|
Size: size, |
||||||
|
Digest: sha, |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
||||||
|
return c.manifest, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (c *compressedImage) RawManifest() ([]byte, error) { |
||||||
|
return partial.RawManifest(c) |
||||||
|
} |
||||||
|
|
||||||
|
// compressedLayerFromTarball implements partial.CompressedLayer
|
||||||
|
type compressedLayerFromTarball struct { |
||||||
|
desc v1.Descriptor |
||||||
|
opener Opener |
||||||
|
filePath string |
||||||
|
} |
||||||
|
|
||||||
|
// Digest implements partial.CompressedLayer
|
||||||
|
func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { |
||||||
|
return clft.desc.Digest, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Compressed implements partial.CompressedLayer
|
||||||
|
func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { |
||||||
|
return extractFileFromTar(clft.opener, clft.filePath) |
||||||
|
} |
||||||
|
|
||||||
|
// MediaType implements partial.CompressedLayer
|
||||||
|
func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) { |
||||||
|
return clft.desc.MediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Size implements partial.CompressedLayer
|
||||||
|
func (clft *compressedLayerFromTarball) Size() (int64, error) { |
||||||
|
return clft.desc.Size, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { |
||||||
|
m, err := c.Manifest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
for i, l := range m.Layers { |
||||||
|
if l.Digest == h { |
||||||
|
fp := c.imgDescriptor.Layers[i] |
||||||
|
return &compressedLayerFromTarball{ |
||||||
|
desc: l, |
||||||
|
opener: c.opener, |
||||||
|
filePath: fp, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
} |
||||||
|
return nil, fmt.Errorf("blob %v not found", h) |
||||||
|
} |
@ -0,0 +1,349 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tarball |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"compress/gzip" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/containerd/stargz-snapshotter/estargz" |
||||||
|
"github.com/google/go-containerregistry/internal/and" |
||||||
|
comp "github.com/google/go-containerregistry/internal/compression" |
||||||
|
gestargz "github.com/google/go-containerregistry/internal/estargz" |
||||||
|
ggzip "github.com/google/go-containerregistry/internal/gzip" |
||||||
|
"github.com/google/go-containerregistry/internal/zstd" |
||||||
|
"github.com/google/go-containerregistry/pkg/compression" |
||||||
|
"github.com/google/go-containerregistry/pkg/logs" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types" |
||||||
|
) |
||||||
|
|
||||||
|
type layer struct { |
||||||
|
digest v1.Hash |
||||||
|
diffID v1.Hash |
||||||
|
size int64 |
||||||
|
compressedopener Opener |
||||||
|
uncompressedopener Opener |
||||||
|
compression compression.Compression |
||||||
|
compressionLevel int |
||||||
|
annotations map[string]string |
||||||
|
estgzopts []estargz.Option |
||||||
|
mediaType types.MediaType |
||||||
|
} |
||||||
|
|
||||||
|
// Descriptor implements partial.withDescriptor.
|
||||||
|
func (l *layer) Descriptor() (*v1.Descriptor, error) { |
||||||
|
digest, err := l.Digest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return &v1.Descriptor{ |
||||||
|
Size: l.size, |
||||||
|
Digest: digest, |
||||||
|
Annotations: l.annotations, |
||||||
|
MediaType: l.mediaType, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Digest implements v1.Layer
|
||||||
|
func (l *layer) Digest() (v1.Hash, error) { |
||||||
|
return l.digest, nil |
||||||
|
} |
||||||
|
|
||||||
|
// DiffID implements v1.Layer
|
||||||
|
func (l *layer) DiffID() (v1.Hash, error) { |
||||||
|
return l.diffID, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Compressed implements v1.Layer
|
||||||
|
func (l *layer) Compressed() (io.ReadCloser, error) { |
||||||
|
return l.compressedopener() |
||||||
|
} |
||||||
|
|
||||||
|
// Uncompressed implements v1.Layer
|
||||||
|
func (l *layer) Uncompressed() (io.ReadCloser, error) { |
||||||
|
return l.uncompressedopener() |
||||||
|
} |
||||||
|
|
||||||
|
// Size implements v1.Layer
|
||||||
|
func (l *layer) Size() (int64, error) { |
||||||
|
return l.size, nil |
||||||
|
} |
||||||
|
|
||||||
|
// MediaType implements v1.Layer
|
||||||
|
func (l *layer) MediaType() (types.MediaType, error) { |
||||||
|
return l.mediaType, nil |
||||||
|
} |
||||||
|
|
||||||
|
// LayerOption applies options to layer
|
||||||
|
type LayerOption func(*layer) |
||||||
|
|
||||||
|
// WithCompression is a functional option for overriding the default
|
||||||
|
// compression algorithm used for compressing uncompressed tarballs.
|
||||||
|
// Please note that WithCompression(compression.ZStd) should be used
|
||||||
|
// in conjunction with WithMediaType(types.OCILayerZStd)
|
||||||
|
func WithCompression(comp compression.Compression) LayerOption { |
||||||
|
return func(l *layer) { |
||||||
|
switch comp { |
||||||
|
case compression.ZStd: |
||||||
|
l.compression = compression.ZStd |
||||||
|
case compression.GZip: |
||||||
|
l.compression = compression.GZip |
||||||
|
case compression.None: |
||||||
|
logs.Warn.Printf("Compression type 'none' is not supported for tarball layers; using gzip compression.") |
||||||
|
l.compression = compression.GZip |
||||||
|
default: |
||||||
|
logs.Warn.Printf("Unexpected compression type for WithCompression(): %s; using gzip compression instead.", comp) |
||||||
|
l.compression = compression.GZip |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithCompressionLevel is a functional option for overriding the default
|
||||||
|
// compression level used for compressing uncompressed tarballs.
|
||||||
|
func WithCompressionLevel(level int) LayerOption { |
||||||
|
return func(l *layer) { |
||||||
|
l.compressionLevel = level |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithMediaType is a functional option for overriding the layer's media type.
|
||||||
|
func WithMediaType(mt types.MediaType) LayerOption { |
||||||
|
return func(l *layer) { |
||||||
|
l.mediaType = mt |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithCompressedCaching is a functional option that overrides the
|
||||||
|
// logic for accessing the compressed bytes to memoize the result
|
||||||
|
// and avoid expensive repeated gzips.
|
||||||
|
func WithCompressedCaching(l *layer) { |
||||||
|
var once sync.Once |
||||||
|
var err error |
||||||
|
|
||||||
|
buf := bytes.NewBuffer(nil) |
||||||
|
og := l.compressedopener |
||||||
|
|
||||||
|
l.compressedopener = func() (io.ReadCloser, error) { |
||||||
|
once.Do(func() { |
||||||
|
var rc io.ReadCloser |
||||||
|
rc, err = og() |
||||||
|
if err == nil { |
||||||
|
defer rc.Close() |
||||||
|
_, err = io.Copy(buf, rc) |
||||||
|
} |
||||||
|
}) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return io.NopCloser(bytes.NewBuffer(buf.Bytes())), nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithEstargzOptions is a functional option that allow the caller to pass
|
||||||
|
// through estargz.Options to the underlying compression layer. This is
|
||||||
|
// only meaningful when estargz is enabled.
|
||||||
|
func WithEstargzOptions(opts ...estargz.Option) LayerOption { |
||||||
|
return func(l *layer) { |
||||||
|
l.estgzopts = opts |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithEstargz is a functional option that explicitly enables estargz support.
|
||||||
|
func WithEstargz(l *layer) { |
||||||
|
oguncompressed := l.uncompressedopener |
||||||
|
estargz := func() (io.ReadCloser, error) { |
||||||
|
crc, err := oguncompressed() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compressionLevel)) |
||||||
|
rc, h, err := gestargz.ReadCloser(crc, eopts...) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
l.annotations[estargz.TOCJSONDigestAnnotation] = h.String() |
||||||
|
return &and.ReadCloser{ |
||||||
|
Reader: rc, |
||||||
|
CloseFunc: func() error { |
||||||
|
err := rc.Close() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
// As an optimization, leverage the DiffID exposed by the estargz ReadCloser
|
||||||
|
l.diffID, err = v1.NewHash(rc.DiffID().String()) |
||||||
|
return err |
||||||
|
}, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
uncompressed := func() (io.ReadCloser, error) { |
||||||
|
urc, err := estargz() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return ggzip.UnzipReadCloser(urc) |
||||||
|
} |
||||||
|
|
||||||
|
l.compressedopener = estargz |
||||||
|
l.uncompressedopener = uncompressed |
||||||
|
} |
||||||
|
|
||||||
|
// LayerFromFile returns a v1.Layer given a tarball
|
||||||
|
func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { |
||||||
|
opener := func() (io.ReadCloser, error) { |
||||||
|
return os.Open(path) |
||||||
|
} |
||||||
|
return LayerFromOpener(opener, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// LayerFromOpener returns a v1.Layer given an Opener function.
|
||||||
|
// The Opener may return either an uncompressed tarball (common),
|
||||||
|
// or a compressed tarball (uncommon).
|
||||||
|
//
|
||||||
|
// When using this in conjunction with something like remote.Write
|
||||||
|
// the uncompressed path may end up gzipping things multiple times:
|
||||||
|
// 1. Compute the layer SHA256
|
||||||
|
// 2. Upload the compressed layer.
|
||||||
|
//
|
||||||
|
// Since gzip can be expensive, we support an option to memoize the
|
||||||
|
// compression that can be passed here: tarball.WithCompressedCaching
|
||||||
|
func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { |
||||||
|
comp, err := comp.GetCompression(opener) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
layer := &layer{ |
||||||
|
compression: compression.GZip, |
||||||
|
compressionLevel: gzip.BestSpeed, |
||||||
|
annotations: make(map[string]string, 1), |
||||||
|
mediaType: types.DockerLayer, |
||||||
|
} |
||||||
|
|
||||||
|
if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { |
||||||
|
opts = append([]LayerOption{WithEstargz}, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
switch comp { |
||||||
|
case compression.GZip: |
||||||
|
layer.compressedopener = opener |
||||||
|
layer.uncompressedopener = func() (io.ReadCloser, error) { |
||||||
|
urc, err := opener() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return ggzip.UnzipReadCloser(urc) |
||||||
|
} |
||||||
|
case compression.ZStd: |
||||||
|
layer.compressedopener = opener |
||||||
|
layer.uncompressedopener = func() (io.ReadCloser, error) { |
||||||
|
urc, err := opener() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return zstd.UnzipReadCloser(urc) |
||||||
|
} |
||||||
|
default: |
||||||
|
layer.uncompressedopener = opener |
||||||
|
layer.compressedopener = func() (io.ReadCloser, error) { |
||||||
|
crc, err := opener() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if layer.compression == compression.ZStd { |
||||||
|
return zstd.ReadCloserLevel(crc, layer.compressionLevel), nil |
||||||
|
} |
||||||
|
|
||||||
|
return ggzip.ReadCloserLevel(crc, layer.compressionLevel), nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
for _, opt := range opts { |
||||||
|
opt(layer) |
||||||
|
} |
||||||
|
|
||||||
|
// Warn if media type does not match compression
|
||||||
|
var mediaTypeMismatch = false |
||||||
|
switch layer.compression { |
||||||
|
case compression.GZip: |
||||||
|
mediaTypeMismatch = |
||||||
|
layer.mediaType != types.OCILayer && |
||||||
|
layer.mediaType != types.OCIRestrictedLayer && |
||||||
|
layer.mediaType != types.DockerLayer |
||||||
|
|
||||||
|
case compression.ZStd: |
||||||
|
mediaTypeMismatch = layer.mediaType != types.OCILayerZStd |
||||||
|
} |
||||||
|
|
||||||
|
if mediaTypeMismatch { |
||||||
|
logs.Warn.Printf("Unexpected mediaType (%s) for selected compression in %s in LayerFromOpener().", layer.mediaType, layer.compression) |
||||||
|
} |
||||||
|
|
||||||
|
if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
empty := v1.Hash{} |
||||||
|
if layer.diffID == empty { |
||||||
|
if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return layer, nil |
||||||
|
} |
||||||
|
|
||||||
|
// LayerFromReader returns a v1.Layer given a io.Reader.
|
||||||
|
//
|
||||||
|
// The reader's contents are read and buffered to a temp file in the process.
|
||||||
|
//
|
||||||
|
// Deprecated: Use LayerFromOpener or stream.NewLayer instead, if possible.
|
||||||
|
func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) { |
||||||
|
tmp, err := os.CreateTemp("", "") |
||||||
|
if err != nil { |
||||||
|
return nil, fmt.Errorf("creating temp file to buffer reader: %w", err) |
||||||
|
} |
||||||
|
if _, err := io.Copy(tmp, reader); err != nil { |
||||||
|
return nil, fmt.Errorf("writing temp file to buffer reader: %w", err) |
||||||
|
} |
||||||
|
return LayerFromFile(tmp.Name(), opts...) |
||||||
|
} |
||||||
|
|
||||||
|
func computeDigest(opener Opener) (v1.Hash, int64, error) { |
||||||
|
rc, err := opener() |
||||||
|
if err != nil { |
||||||
|
return v1.Hash{}, 0, err |
||||||
|
} |
||||||
|
defer rc.Close() |
||||||
|
|
||||||
|
return v1.SHA256(rc) |
||||||
|
} |
||||||
|
|
||||||
|
func computeDiffID(opener Opener) (v1.Hash, error) { |
||||||
|
rc, err := opener() |
||||||
|
if err != nil { |
||||||
|
return v1.Hash{}, err |
||||||
|
} |
||||||
|
defer rc.Close() |
||||||
|
|
||||||
|
digest, _, err := v1.SHA256(rc) |
||||||
|
return digest, err |
||||||
|
} |
@ -0,0 +1,459 @@ |
|||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package tarball |
||||||
|
|
||||||
|
import ( |
||||||
|
"archive/tar" |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"sort" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/name" |
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||||
|
) |
||||||
|
|
||||||
|
// WriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
|
// This is just syntactic sugar wrapping tarball.Write with a new file.
|
||||||
|
func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error { |
||||||
|
w, err := os.Create(p) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer w.Close() |
||||||
|
|
||||||
|
return Write(ref, img, w, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiWriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
|
// This is just syntactic sugar wrapping tarball.MultiWrite with a new file.
|
||||||
|
func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error { |
||||||
|
refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) |
||||||
|
for i, d := range tagToImage { |
||||||
|
refToImage[i] = d |
||||||
|
} |
||||||
|
return MultiRefWriteToFile(p, refToImage, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiRefWriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
|
// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file.
|
||||||
|
func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error { |
||||||
|
w, err := os.Create(p) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer w.Close() |
||||||
|
|
||||||
|
return MultiRefWrite(refToImage, w, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// Write is a wrapper to write a single image and tag to a tarball.
|
||||||
|
func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error { |
||||||
|
return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||||
|
// The contents are written in the following format:
|
||||||
|
// One manifest.json file at the top level containing information about several images.
|
||||||
|
// One file for each layer, named after the layer's SHA.
|
||||||
|
// One file for the config blob, named after its SHA.
|
||||||
|
func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error { |
||||||
|
refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) |
||||||
|
for i, d := range tagToImage { |
||||||
|
refToImage[i] = d |
||||||
|
} |
||||||
|
return MultiRefWrite(refToImage, w, opts...) |
||||||
|
} |
||||||
|
|
||||||
|
// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||||
|
// The contents are written in the following format:
|
||||||
|
// One manifest.json file at the top level containing information about several images.
|
||||||
|
// One file for each layer, named after the layer's SHA.
|
||||||
|
// One file for the config blob, named after its SHA.
|
||||||
|
func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error { |
||||||
|
// process options
|
||||||
|
o := &writeOptions{ |
||||||
|
updates: nil, |
||||||
|
} |
||||||
|
for _, option := range opts { |
||||||
|
if err := option(o); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
size, mBytes, err := getSizeAndManifest(refToImage) |
||||||
|
if err != nil { |
||||||
|
return sendUpdateReturn(o, err) |
||||||
|
} |
||||||
|
|
||||||
|
return writeImagesToTar(refToImage, mBytes, size, w, o) |
||||||
|
} |
||||||
|
|
||||||
|
// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists
|
||||||
|
func sendUpdateReturn(o *writeOptions, err error) error { |
||||||
|
if o != nil && o.updates != nil { |
||||||
|
o.updates <- v1.Update{ |
||||||
|
Error: err, |
||||||
|
} |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information
|
||||||
|
func sendProgressWriterReturn(pw *progressWriter, err error) error { |
||||||
|
if pw != nil { |
||||||
|
return pw.Error(err) |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// writeImagesToTar writes the images to the tarball
|
||||||
|
func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { |
||||||
|
if w == nil { |
||||||
|
return sendUpdateReturn(o, errors.New("must pass valid writer")) |
||||||
|
} |
||||||
|
imageToTags := dedupRefToImage(refToImage) |
||||||
|
|
||||||
|
tw := w |
||||||
|
var pw *progressWriter |
||||||
|
|
||||||
|
// we only calculate the sizes and use a progressWriter if we were provided
|
||||||
|
// an option with a progress channel
|
||||||
|
if o != nil && o.updates != nil { |
||||||
|
pw = &progressWriter{ |
||||||
|
w: w, |
||||||
|
updates: o.updates, |
||||||
|
size: size, |
||||||
|
} |
||||||
|
tw = pw |
||||||
|
} |
||||||
|
|
||||||
|
tf := tar.NewWriter(tw) |
||||||
|
defer tf.Close() |
||||||
|
|
||||||
|
seenLayerDigests := make(map[string]struct{}) |
||||||
|
|
||||||
|
for img := range imageToTags { |
||||||
|
// Write the config.
|
||||||
|
cfgName, err := img.ConfigName() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
cfgBlob, err := img.RawConfigFile() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
|
||||||
|
// Write the layers.
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
layerFiles := make([]string, len(layers)) |
||||||
|
for i, l := range layers { |
||||||
|
d, err := l.Digest() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
// Munge the file name to appease ancient technology.
|
||||||
|
//
|
||||||
|
// tar assumes anything with a colon is a remote tape drive:
|
||||||
|
// https://www.gnu.org/software/tar/manual/html_section/tar_45.html
|
||||||
|
// Drop the algorithm prefix, e.g. "sha256:"
|
||||||
|
hex := d.Hex |
||||||
|
|
||||||
|
// gunzip expects certain file extensions:
|
||||||
|
// https://www.gnu.org/software/gzip/manual/html_node/Overview.html
|
||||||
|
layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) |
||||||
|
|
||||||
|
if _, ok := seenLayerDigests[hex]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
seenLayerDigests[hex] = struct{}{} |
||||||
|
|
||||||
|
r, err := l.Compressed() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
blobSize, err := l.Size() |
||||||
|
if err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
|
||||||
|
if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
|
||||||
|
// be sure to close the tar writer so everything is flushed out before we send our EOF
|
||||||
|
if err := tf.Close(); err != nil { |
||||||
|
return sendProgressWriterReturn(pw, err) |
||||||
|
} |
||||||
|
// send an EOF to indicate finished on the channel, but nil as our return error
|
||||||
|
_ = sendProgressWriterReturn(pw, io.EOF) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// calculateManifest calculates the manifest and optionally the size of the tar file
|
||||||
|
func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err error) { |
||||||
|
imageToTags := dedupRefToImage(refToImage) |
||||||
|
|
||||||
|
if len(imageToTags) == 0 { |
||||||
|
return nil, errors.New("set of images is empty") |
||||||
|
} |
||||||
|
|
||||||
|
for img, tags := range imageToTags { |
||||||
|
cfgName, err := img.ConfigName() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Store foreign layer info.
|
||||||
|
layerSources := make(map[v1.Hash]v1.Descriptor) |
||||||
|
|
||||||
|
// Write the layers.
|
||||||
|
layers, err := img.Layers() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
layerFiles := make([]string, len(layers)) |
||||||
|
for i, l := range layers { |
||||||
|
d, err := l.Digest() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
// Munge the file name to appease ancient technology.
|
||||||
|
//
|
||||||
|
// tar assumes anything with a colon is a remote tape drive:
|
||||||
|
// https://www.gnu.org/software/tar/manual/html_section/tar_45.html
|
||||||
|
// Drop the algorithm prefix, e.g. "sha256:"
|
||||||
|
hex := d.Hex |
||||||
|
|
||||||
|
// gunzip expects certain file extensions:
|
||||||
|
// https://www.gnu.org/software/gzip/manual/html_node/Overview.html
|
||||||
|
layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) |
||||||
|
|
||||||
|
// Add to LayerSources if it's a foreign layer.
|
||||||
|
desc, err := partial.BlobDescriptor(img, d) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if !desc.MediaType.IsDistributable() { |
||||||
|
diffid, err := partial.BlobToDiffID(img, d) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
layerSources[diffid] = *desc |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Generate the tar descriptor and write it.
|
||||||
|
m = append(m, Descriptor{ |
||||||
|
Config: cfgName.String(), |
||||||
|
RepoTags: tags, |
||||||
|
Layers: layerFiles, |
||||||
|
LayerSources: layerSources, |
||||||
|
}) |
||||||
|
} |
||||||
|
// sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the
|
||||||
|
// descriptor, but that would make it hard for humans to process
|
||||||
|
sort.Slice(m, func(i, j int) bool { |
||||||
|
return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",") |
||||||
|
}) |
||||||
|
|
||||||
|
return m, nil |
||||||
|
} |
||||||
|
|
||||||
|
// CalculateSize calculates the expected complete size of the output tar file
|
||||||
|
func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { |
||||||
|
size, _, err = getSizeAndManifest(refToImage) |
||||||
|
return size, err |
||||||
|
} |
||||||
|
|
||||||
|
func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, error) { |
||||||
|
m, err := calculateManifest(refToImage) |
||||||
|
if err != nil { |
||||||
|
return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err) |
||||||
|
} |
||||||
|
mBytes, err := json.Marshal(m) |
||||||
|
if err != nil { |
||||||
|
return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err) |
||||||
|
} |
||||||
|
|
||||||
|
size, err := calculateTarballSize(refToImage, mBytes) |
||||||
|
if err != nil { |
||||||
|
return 0, nil, fmt.Errorf("error calculating tarball size: %w", err) |
||||||
|
} |
||||||
|
return size, mBytes, nil |
||||||
|
} |
||||||
|
|
||||||
|
// calculateTarballSize calculates the size of the tar file
|
||||||
|
func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) { |
||||||
|
imageToTags := dedupRefToImage(refToImage) |
||||||
|
|
||||||
|
seenLayerDigests := make(map[string]struct{}) |
||||||
|
for img, name := range imageToTags { |
||||||
|
manifest, err := img.Manifest() |
||||||
|
if err != nil { |
||||||
|
return size, fmt.Errorf("unable to get manifest for img %s: %w", name, err) |
||||||
|
} |
||||||
|
size += calculateSingleFileInTarSize(manifest.Config.Size) |
||||||
|
for _, l := range manifest.Layers { |
||||||
|
hex := l.Digest.Hex |
||||||
|
if _, ok := seenLayerDigests[hex]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
seenLayerDigests[hex] = struct{}{} |
||||||
|
size += calculateSingleFileInTarSize(l.Size) |
||||||
|
} |
||||||
|
} |
||||||
|
// add the manifest
|
||||||
|
size += calculateSingleFileInTarSize(int64(len(mBytes))) |
||||||
|
|
||||||
|
// add the two padding blocks that indicate end of a tar file
|
||||||
|
size += 1024 |
||||||
|
return size, nil |
||||||
|
} |
||||||
|
|
||||||
|
func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { |
||||||
|
imageToTags := make(map[v1.Image][]string) |
||||||
|
|
||||||
|
for ref, img := range refToImage { |
||||||
|
if tag, ok := ref.(name.Tag); ok { |
||||||
|
if tags, ok := imageToTags[img]; !ok || tags == nil { |
||||||
|
imageToTags[img] = []string{} |
||||||
|
} |
||||||
|
// Docker cannot load tarballs without an explicit tag:
|
||||||
|
// https://github.com/google/go-containerregistry/issues/890
|
||||||
|
//
|
||||||
|
// We can't use the fully qualified tag.Name() because of rules_docker:
|
||||||
|
// https://github.com/google/go-containerregistry/issues/527
|
||||||
|
//
|
||||||
|
// If the tag is "latest", but tag.String() doesn't end in ":latest",
|
||||||
|
// just append it. Kind of gross, but should work for now.
|
||||||
|
ts := tag.String() |
||||||
|
if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) { |
||||||
|
ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag) |
||||||
|
} |
||||||
|
imageToTags[img] = append(imageToTags[img], ts) |
||||||
|
} else if _, ok := imageToTags[img]; !ok { |
||||||
|
imageToTags[img] = nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return imageToTags |
||||||
|
} |
||||||
|
|
||||||
|
// writeTarEntry writes a file to the provided writer with a corresponding tar header
|
||||||
|
func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { |
||||||
|
hdr := &tar.Header{ |
||||||
|
Mode: 0644, |
||||||
|
Typeflag: tar.TypeReg, |
||||||
|
Size: size, |
||||||
|
Name: path, |
||||||
|
} |
||||||
|
if err := tf.WriteHeader(hdr); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
_, err := io.Copy(tf, r) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// ComputeManifest get the manifest.json that will be written to the tarball
|
||||||
|
// for multiple references
|
||||||
|
func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { |
||||||
|
return calculateManifest(refToImage) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteOption a function option to pass to Write()
|
||||||
|
type WriteOption func(*writeOptions) error |
||||||
|
type writeOptions struct { |
||||||
|
updates chan<- v1.Update |
||||||
|
} |
||||||
|
|
||||||
|
// WithProgress create a WriteOption for passing to Write() that enables
|
||||||
|
// a channel to receive updates as they are downloaded and written to disk.
|
||||||
|
func WithProgress(updates chan<- v1.Update) WriteOption { |
||||||
|
return func(o *writeOptions) error { |
||||||
|
o.updates = updates |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// progressWriter is a writer which will send the download progress
|
||||||
|
type progressWriter struct { |
||||||
|
w io.Writer |
||||||
|
updates chan<- v1.Update |
||||||
|
size, complete int64 |
||||||
|
} |
||||||
|
|
||||||
|
func (pw *progressWriter) Write(p []byte) (int, error) { |
||||||
|
n, err := pw.w.Write(p) |
||||||
|
if err != nil { |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
pw.complete += int64(n) |
||||||
|
|
||||||
|
pw.updates <- v1.Update{ |
||||||
|
Total: pw.size, |
||||||
|
Complete: pw.complete, |
||||||
|
} |
||||||
|
|
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
func (pw *progressWriter) Error(err error) error { |
||||||
|
pw.updates <- v1.Update{ |
||||||
|
Total: pw.size, |
||||||
|
Complete: pw.complete, |
||||||
|
Error: err, |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
func (pw *progressWriter) Close() error { |
||||||
|
pw.updates <- v1.Update{ |
||||||
|
Total: pw.size, |
||||||
|
Complete: pw.complete, |
||||||
|
Error: io.EOF, |
||||||
|
} |
||||||
|
return io.EOF |
||||||
|
} |
||||||
|
|
||||||
|
// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive,
|
||||||
|
// given the input data. Provided by rounding up to nearest whole block (512)
|
||||||
|
// and adding header 512
|
||||||
|
func calculateSingleFileInTarSize(in int64) (out int64) { |
||||||
|
// doing this manually, because math.Round() works with float64
|
||||||
|
out += in |
||||||
|
if remainder := out % 512; remainder != 0 { |
||||||
|
out += (512 - remainder) |
||||||
|
} |
||||||
|
out += 512 |
||||||
|
return out |
||||||
|
} |
@ -0,0 +1,28 @@ |
|||||||
|
Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA |
||||||
|
|
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer. |
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer in the documentation |
||||||
|
and/or other materials provided with the distribution. |
||||||
|
|
||||||
|
3. Neither the name of the copyright holder nor the names of its contributors |
||||||
|
may be used to endorse or promote products derived from this software without |
||||||
|
specific prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE |
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,723 @@ |
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package tar implements access to tar archives.
|
||||||
|
//
|
||||||
|
// Tape archives (tar) are a file format for storing a sequence of files that
|
||||||
|
// can be read and written in a streaming manner.
|
||||||
|
// This package aims to cover most variations of the format,
|
||||||
|
// including those produced by GNU and BSD tar tools.
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"os" |
||||||
|
"path" |
||||||
|
"reflect" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
|
||||||
|
// architectures. If a large value is encountered when decoding, the result
|
||||||
|
// stored in Header will be the truncated version.
|
||||||
|
|
||||||
|
var ( |
||||||
|
ErrHeader = errors.New("archive/tar: invalid tar header") |
||||||
|
ErrWriteTooLong = errors.New("archive/tar: write too long") |
||||||
|
ErrFieldTooLong = errors.New("archive/tar: header field too long") |
||||||
|
ErrWriteAfterClose = errors.New("archive/tar: write after close") |
||||||
|
errMissData = errors.New("archive/tar: sparse file references non-existent data") |
||||||
|
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") |
||||||
|
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") |
||||||
|
) |
||||||
|
|
||||||
|
type headerError []string |
||||||
|
|
||||||
|
func (he headerError) Error() string { |
||||||
|
const prefix = "archive/tar: cannot encode header" |
||||||
|
var ss []string |
||||||
|
for _, s := range he { |
||||||
|
if s != "" { |
||||||
|
ss = append(ss, s) |
||||||
|
} |
||||||
|
} |
||||||
|
if len(ss) == 0 { |
||||||
|
return prefix |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) |
||||||
|
} |
||||||
|
|
||||||
|
// Type flags for Header.Typeflag.
|
||||||
|
const ( |
||||||
|
// Type '0' indicates a regular file.
|
||||||
|
TypeReg = '0' |
||||||
|
TypeRegA = '\x00' // Deprecated: Use TypeReg instead.
|
||||||
|
|
||||||
|
// Type '1' to '6' are header-only flags and may not have a data body.
|
||||||
|
TypeLink = '1' // Hard link
|
||||||
|
TypeSymlink = '2' // Symbolic link
|
||||||
|
TypeChar = '3' // Character device node
|
||||||
|
TypeBlock = '4' // Block device node
|
||||||
|
TypeDir = '5' // Directory
|
||||||
|
TypeFifo = '6' // FIFO node
|
||||||
|
|
||||||
|
// Type '7' is reserved.
|
||||||
|
TypeCont = '7' |
||||||
|
|
||||||
|
// Type 'x' is used by the PAX format to store key-value records that
|
||||||
|
// are only relevant to the next file.
|
||||||
|
// This package transparently handles these types.
|
||||||
|
TypeXHeader = 'x' |
||||||
|
|
||||||
|
// Type 'g' is used by the PAX format to store key-value records that
|
||||||
|
// are relevant to all subsequent files.
|
||||||
|
// This package only supports parsing and composing such headers,
|
||||||
|
// but does not currently support persisting the global state across files.
|
||||||
|
TypeXGlobalHeader = 'g' |
||||||
|
|
||||||
|
// Type 'S' indicates a sparse file in the GNU format.
|
||||||
|
TypeGNUSparse = 'S' |
||||||
|
|
||||||
|
// Types 'L' and 'K' are used by the GNU format for a meta file
|
||||||
|
// used to store the path or link name for the next file.
|
||||||
|
// This package transparently handles these types.
|
||||||
|
TypeGNULongName = 'L' |
||||||
|
TypeGNULongLink = 'K' |
||||||
|
) |
||||||
|
|
||||||
|
// Keywords for PAX extended header records.
|
||||||
|
const ( |
||||||
|
paxNone = "" // Indicates that no PAX key is suitable
|
||||||
|
paxPath = "path" |
||||||
|
paxLinkpath = "linkpath" |
||||||
|
paxSize = "size" |
||||||
|
paxUid = "uid" |
||||||
|
paxGid = "gid" |
||||||
|
paxUname = "uname" |
||||||
|
paxGname = "gname" |
||||||
|
paxMtime = "mtime" |
||||||
|
paxAtime = "atime" |
||||||
|
paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
|
||||||
|
paxCharset = "charset" // Currently unused
|
||||||
|
paxComment = "comment" // Currently unused
|
||||||
|
|
||||||
|
paxSchilyXattr = "SCHILY.xattr." |
||||||
|
|
||||||
|
// Keywords for GNU sparse files in a PAX extended header.
|
||||||
|
paxGNUSparse = "GNU.sparse." |
||||||
|
paxGNUSparseNumBlocks = "GNU.sparse.numblocks" |
||||||
|
paxGNUSparseOffset = "GNU.sparse.offset" |
||||||
|
paxGNUSparseNumBytes = "GNU.sparse.numbytes" |
||||||
|
paxGNUSparseMap = "GNU.sparse.map" |
||||||
|
paxGNUSparseName = "GNU.sparse.name" |
||||||
|
paxGNUSparseMajor = "GNU.sparse.major" |
||||||
|
paxGNUSparseMinor = "GNU.sparse.minor" |
||||||
|
paxGNUSparseSize = "GNU.sparse.size" |
||||||
|
paxGNUSparseRealSize = "GNU.sparse.realsize" |
||||||
|
) |
||||||
|
|
||||||
|
// basicKeys is a set of the PAX keys for which we have built-in support.
|
||||||
|
// This does not contain "charset" or "comment", which are both PAX-specific,
|
||||||
|
// so adding them as first-class features of Header is unlikely.
|
||||||
|
// Users can use the PAXRecords field to set it themselves.
|
||||||
|
var basicKeys = map[string]bool{ |
||||||
|
paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, |
||||||
|
paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, |
||||||
|
} |
||||||
|
|
||||||
|
// A Header represents a single header in a tar archive.
|
||||||
|
// Some fields may not be populated.
|
||||||
|
//
|
||||||
|
// For forward compatibility, users that retrieve a Header from Reader.Next,
|
||||||
|
// mutate it in some ways, and then pass it back to Writer.WriteHeader
|
||||||
|
// should do so by creating a new Header and copying the fields
|
||||||
|
// that they are interested in preserving.
|
||||||
|
type Header struct { |
||||||
|
// Typeflag is the type of header entry.
|
||||||
|
// The zero value is automatically promoted to either TypeReg or TypeDir
|
||||||
|
// depending on the presence of a trailing slash in Name.
|
||||||
|
Typeflag byte |
||||||
|
|
||||||
|
Name string // Name of file entry
|
||||||
|
Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
|
||||||
|
|
||||||
|
Size int64 // Logical file size in bytes
|
||||||
|
Mode int64 // Permission and mode bits
|
||||||
|
Uid int // User ID of owner
|
||||||
|
Gid int // Group ID of owner
|
||||||
|
Uname string // User name of owner
|
||||||
|
Gname string // Group name of owner
|
||||||
|
|
||||||
|
// If the Format is unspecified, then Writer.WriteHeader rounds ModTime
|
||||||
|
// to the nearest second and ignores the AccessTime and ChangeTime fields.
|
||||||
|
//
|
||||||
|
// To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
|
||||||
|
// To use sub-second resolution, specify the Format as PAX.
|
||||||
|
ModTime time.Time // Modification time
|
||||||
|
AccessTime time.Time // Access time (requires either PAX or GNU support)
|
||||||
|
ChangeTime time.Time // Change time (requires either PAX or GNU support)
|
||||||
|
|
||||||
|
Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
|
||||||
|
Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
|
||||||
|
|
||||||
|
// Xattrs stores extended attributes as PAX records under the
|
||||||
|
// "SCHILY.xattr." namespace.
|
||||||
|
//
|
||||||
|
// The following are semantically equivalent:
|
||||||
|
// h.Xattrs[key] = value
|
||||||
|
// h.PAXRecords["SCHILY.xattr."+key] = value
|
||||||
|
//
|
||||||
|
// When Writer.WriteHeader is called, the contents of Xattrs will take
|
||||||
|
// precedence over those in PAXRecords.
|
||||||
|
//
|
||||||
|
// Deprecated: Use PAXRecords instead.
|
||||||
|
Xattrs map[string]string |
||||||
|
|
||||||
|
// PAXRecords is a map of PAX extended header records.
|
||||||
|
//
|
||||||
|
// User-defined records should have keys of the following form:
|
||||||
|
// VENDOR.keyword
|
||||||
|
// Where VENDOR is some namespace in all uppercase, and keyword may
|
||||||
|
// not contain the '=' character (e.g., "GOLANG.pkg.version").
|
||||||
|
// The key and value should be non-empty UTF-8 strings.
|
||||||
|
//
|
||||||
|
// When Writer.WriteHeader is called, PAX records derived from the
|
||||||
|
// other fields in Header take precedence over PAXRecords.
|
||||||
|
PAXRecords map[string]string |
||||||
|
|
||||||
|
// Format specifies the format of the tar header.
|
||||||
|
//
|
||||||
|
// This is set by Reader.Next as a best-effort guess at the format.
|
||||||
|
// Since the Reader liberally reads some non-compliant files,
|
||||||
|
// it is possible for this to be FormatUnknown.
|
||||||
|
//
|
||||||
|
// If the format is unspecified when Writer.WriteHeader is called,
|
||||||
|
// then it uses the first format (in the order of USTAR, PAX, GNU)
|
||||||
|
// capable of encoding this Header (see Format).
|
||||||
|
Format Format |
||||||
|
} |
||||||
|
|
||||||
|
// sparseEntry represents a Length-sized fragment at Offset in the file.
|
||||||
|
type sparseEntry struct{ Offset, Length int64 } |
||||||
|
|
||||||
|
func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } |
||||||
|
|
||||||
|
// A sparse file can be represented as either a sparseDatas or a sparseHoles.
|
||||||
|
// As long as the total size is known, they are equivalent and one can be
|
||||||
|
// converted to the other form and back. The various tar formats with sparse
|
||||||
|
// file support represent sparse files in the sparseDatas form. That is, they
|
||||||
|
// specify the fragments in the file that has data, and treat everything else as
|
||||||
|
// having zero bytes. As such, the encoding and decoding logic in this package
|
||||||
|
// deals with sparseDatas.
|
||||||
|
//
|
||||||
|
// However, the external API uses sparseHoles instead of sparseDatas because the
|
||||||
|
// zero value of sparseHoles logically represents a normal file (i.e., there are
|
||||||
|
// no holes in it). On the other hand, the zero value of sparseDatas implies
|
||||||
|
// that the file has no data in it, which is rather odd.
|
||||||
|
//
|
||||||
|
// As an example, if the underlying raw file contains the 10-byte data:
|
||||||
|
// var compactFile = "abcdefgh"
|
||||||
|
//
|
||||||
|
// And the sparse map has the following entries:
|
||||||
|
// var spd sparseDatas = []sparseEntry{
|
||||||
|
// {Offset: 2, Length: 5}, // Data fragment for 2..6
|
||||||
|
// {Offset: 18, Length: 3}, // Data fragment for 18..20
|
||||||
|
// }
|
||||||
|
// var sph sparseHoles = []sparseEntry{
|
||||||
|
// {Offset: 0, Length: 2}, // Hole fragment for 0..1
|
||||||
|
// {Offset: 7, Length: 11}, // Hole fragment for 7..17
|
||||||
|
// {Offset: 21, Length: 4}, // Hole fragment for 21..24
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Then the content of the resulting sparse file with a Header.Size of 25 is:
|
||||||
|
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||||
|
type ( |
||||||
|
sparseDatas []sparseEntry |
||||||
|
sparseHoles []sparseEntry |
||||||
|
) |
||||||
|
|
||||||
|
// validateSparseEntries reports whether sp is a valid sparse map.
|
||||||
|
// It does not matter whether sp represents data fragments or hole fragments.
|
||||||
|
func validateSparseEntries(sp []sparseEntry, size int64) bool { |
||||||
|
// Validate all sparse entries. These are the same checks as performed by
|
||||||
|
// the BSD tar utility.
|
||||||
|
if size < 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
var pre sparseEntry |
||||||
|
for _, cur := range sp { |
||||||
|
switch { |
||||||
|
case cur.Offset < 0 || cur.Length < 0: |
||||||
|
return false // Negative values are never okay
|
||||||
|
case cur.Offset > math.MaxInt64-cur.Length: |
||||||
|
return false // Integer overflow with large length
|
||||||
|
case cur.endOffset() > size: |
||||||
|
return false // Region extends beyond the actual size
|
||||||
|
case pre.endOffset() > cur.Offset: |
||||||
|
return false // Regions cannot overlap and must be in order
|
||||||
|
} |
||||||
|
pre = cur |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// alignSparseEntries mutates src and returns dst where each fragment's
|
||||||
|
// starting offset is aligned up to the nearest block edge, and each
|
||||||
|
// ending offset is aligned down to the nearest block edge.
|
||||||
|
//
|
||||||
|
// Even though the Go tar Reader and the BSD tar utility can handle entries
|
||||||
|
// with arbitrary offsets and lengths, the GNU tar utility can only handle
|
||||||
|
// offsets and lengths that are multiples of blockSize.
|
||||||
|
func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { |
||||||
|
dst := src[:0] |
||||||
|
for _, s := range src { |
||||||
|
pos, end := s.Offset, s.endOffset() |
||||||
|
pos += blockPadding(+pos) // Round-up to nearest blockSize
|
||||||
|
if end != size { |
||||||
|
end -= blockPadding(-end) // Round-down to nearest blockSize
|
||||||
|
} |
||||||
|
if pos < end { |
||||||
|
dst = append(dst, sparseEntry{Offset: pos, Length: end - pos}) |
||||||
|
} |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// invertSparseEntries converts a sparse map from one form to the other.
|
||||||
|
// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
|
||||||
|
// The input must have been already validated.
|
||||||
|
//
|
||||||
|
// This function mutates src and returns a normalized map where:
|
||||||
|
// * adjacent fragments are coalesced together
|
||||||
|
// * only the last fragment may be empty
|
||||||
|
// * the endOffset of the last fragment is the total size
|
||||||
|
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { |
||||||
|
dst := src[:0] |
||||||
|
var pre sparseEntry |
||||||
|
for _, cur := range src { |
||||||
|
if cur.Length == 0 { |
||||||
|
continue // Skip empty fragments
|
||||||
|
} |
||||||
|
pre.Length = cur.Offset - pre.Offset |
||||||
|
if pre.Length > 0 { |
||||||
|
dst = append(dst, pre) // Only add non-empty fragments
|
||||||
|
} |
||||||
|
pre.Offset = cur.endOffset() |
||||||
|
} |
||||||
|
pre.Length = size - pre.Offset // Possibly the only empty fragment
|
||||||
|
return append(dst, pre) |
||||||
|
} |
||||||
|
|
||||||
|
// fileState tracks the number of logical (includes sparse holes) and physical
|
||||||
|
// (actual in tar archive) bytes remaining for the current file.
|
||||||
|
//
|
||||||
|
// Invariant: LogicalRemaining >= PhysicalRemaining
|
||||||
|
type fileState interface { |
||||||
|
LogicalRemaining() int64 |
||||||
|
PhysicalRemaining() int64 |
||||||
|
} |
||||||
|
|
||||||
|
// allowedFormats determines which formats can be used.
|
||||||
|
// The value returned is the logical OR of multiple possible formats.
|
||||||
|
// If the value is FormatUnknown, then the input Header cannot be encoded
|
||||||
|
// and an error is returned explaining why.
|
||||||
|
//
|
||||||
|
// As a by-product of checking the fields, this function returns paxHdrs, which
|
||||||
|
// contain all fields that could not be directly encoded.
|
||||||
|
// A value receiver ensures that this method does not mutate the source Header.
|
||||||
|
func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { |
||||||
|
format = FormatUSTAR | FormatPAX | FormatGNU |
||||||
|
paxHdrs = make(map[string]string) |
||||||
|
|
||||||
|
var whyNoUSTAR, whyNoPAX, whyNoGNU string |
||||||
|
var preferPAX bool // Prefer PAX over USTAR
|
||||||
|
verifyString := func(s string, size int, name, paxKey string) { |
||||||
|
// NUL-terminator is optional for path and linkpath.
|
||||||
|
// Technically, it is required for uname and gname,
|
||||||
|
// but neither GNU nor BSD tar checks for it.
|
||||||
|
tooLong := len(s) > size |
||||||
|
allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath |
||||||
|
if hasNUL(s) || (tooLong && !allowLongGNU) { |
||||||
|
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) |
||||||
|
format.mustNotBe(FormatGNU) |
||||||
|
} |
||||||
|
if !isASCII(s) || tooLong { |
||||||
|
canSplitUSTAR := paxKey == paxPath |
||||||
|
if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { |
||||||
|
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) |
||||||
|
format.mustNotBe(FormatUSTAR) |
||||||
|
} |
||||||
|
if paxKey == paxNone { |
||||||
|
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) |
||||||
|
format.mustNotBe(FormatPAX) |
||||||
|
} else { |
||||||
|
paxHdrs[paxKey] = s |
||||||
|
} |
||||||
|
} |
||||||
|
if v, ok := h.PAXRecords[paxKey]; ok && v == s { |
||||||
|
paxHdrs[paxKey] = v |
||||||
|
} |
||||||
|
} |
||||||
|
verifyNumeric := func(n int64, size int, name, paxKey string) { |
||||||
|
if !fitsInBase256(size, n) { |
||||||
|
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) |
||||||
|
format.mustNotBe(FormatGNU) |
||||||
|
} |
||||||
|
if !fitsInOctal(size, n) { |
||||||
|
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) |
||||||
|
format.mustNotBe(FormatUSTAR) |
||||||
|
if paxKey == paxNone { |
||||||
|
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) |
||||||
|
format.mustNotBe(FormatPAX) |
||||||
|
} else { |
||||||
|
paxHdrs[paxKey] = strconv.FormatInt(n, 10) |
||||||
|
} |
||||||
|
} |
||||||
|
if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { |
||||||
|
paxHdrs[paxKey] = v |
||||||
|
} |
||||||
|
} |
||||||
|
verifyTime := func(ts time.Time, size int, name, paxKey string) { |
||||||
|
if ts.IsZero() { |
||||||
|
return // Always okay
|
||||||
|
} |
||||||
|
if !fitsInBase256(size, ts.Unix()) { |
||||||
|
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) |
||||||
|
format.mustNotBe(FormatGNU) |
||||||
|
} |
||||||
|
isMtime := paxKey == paxMtime |
||||||
|
fitsOctal := fitsInOctal(size, ts.Unix()) |
||||||
|
if (isMtime && !fitsOctal) || !isMtime { |
||||||
|
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) |
||||||
|
format.mustNotBe(FormatUSTAR) |
||||||
|
} |
||||||
|
needsNano := ts.Nanosecond() != 0 |
||||||
|
if !isMtime || !fitsOctal || needsNano { |
||||||
|
preferPAX = true // USTAR may truncate sub-second measurements
|
||||||
|
if paxKey == paxNone { |
||||||
|
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) |
||||||
|
format.mustNotBe(FormatPAX) |
||||||
|
} else { |
||||||
|
paxHdrs[paxKey] = formatPAXTime(ts) |
||||||
|
} |
||||||
|
} |
||||||
|
if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { |
||||||
|
paxHdrs[paxKey] = v |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Check basic fields.
|
||||||
|
var blk block |
||||||
|
v7 := blk.V7() |
||||||
|
ustar := blk.USTAR() |
||||||
|
gnu := blk.GNU() |
||||||
|
verifyString(h.Name, len(v7.Name()), "Name", paxPath) |
||||||
|
verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) |
||||||
|
verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) |
||||||
|
verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) |
||||||
|
verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) |
||||||
|
verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) |
||||||
|
verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) |
||||||
|
verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) |
||||||
|
verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) |
||||||
|
verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) |
||||||
|
verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) |
||||||
|
verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) |
||||||
|
verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) |
||||||
|
|
||||||
|
// Check for header-only types.
|
||||||
|
var whyOnlyPAX, whyOnlyGNU string |
||||||
|
switch h.Typeflag { |
||||||
|
case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: |
||||||
|
// Exclude TypeLink and TypeSymlink, since they may reference directories.
|
||||||
|
if strings.HasSuffix(h.Name, "/") { |
||||||
|
return FormatUnknown, nil, headerError{"filename may not have trailing slash"} |
||||||
|
} |
||||||
|
case TypeXHeader, TypeGNULongName, TypeGNULongLink: |
||||||
|
return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} |
||||||
|
case TypeXGlobalHeader: |
||||||
|
h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format} |
||||||
|
if !reflect.DeepEqual(h, h2) { |
||||||
|
return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"} |
||||||
|
} |
||||||
|
whyOnlyPAX = "only PAX supports TypeXGlobalHeader" |
||||||
|
format.mayOnlyBe(FormatPAX) |
||||||
|
} |
||||||
|
if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { |
||||||
|
return FormatUnknown, nil, headerError{"negative size on header-only type"} |
||||||
|
} |
||||||
|
|
||||||
|
// Check PAX records.
|
||||||
|
if len(h.Xattrs) > 0 { |
||||||
|
for k, v := range h.Xattrs { |
||||||
|
paxHdrs[paxSchilyXattr+k] = v |
||||||
|
} |
||||||
|
whyOnlyPAX = "only PAX supports Xattrs" |
||||||
|
format.mayOnlyBe(FormatPAX) |
||||||
|
} |
||||||
|
if len(h.PAXRecords) > 0 { |
||||||
|
for k, v := range h.PAXRecords { |
||||||
|
switch _, exists := paxHdrs[k]; { |
||||||
|
case exists: |
||||||
|
continue // Do not overwrite existing records
|
||||||
|
case h.Typeflag == TypeXGlobalHeader: |
||||||
|
paxHdrs[k] = v // Copy all records
|
||||||
|
case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): |
||||||
|
paxHdrs[k] = v // Ignore local records that may conflict
|
||||||
|
} |
||||||
|
} |
||||||
|
whyOnlyPAX = "only PAX supports PAXRecords" |
||||||
|
format.mayOnlyBe(FormatPAX) |
||||||
|
} |
||||||
|
for k, v := range paxHdrs { |
||||||
|
if !validPAXRecord(k, v) { |
||||||
|
return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
/* |
||||||
|
// Check sparse files.
|
||||||
|
if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { |
||||||
|
if isHeaderOnlyType(h.Typeflag) { |
||||||
|
return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} |
||||||
|
} |
||||||
|
if !validateSparseEntries(h.SparseHoles, h.Size) { |
||||||
|
return FormatUnknown, nil, headerError{"invalid sparse holes"} |
||||||
|
} |
||||||
|
if h.Typeflag == TypeGNUSparse { |
||||||
|
whyOnlyGNU = "only GNU supports TypeGNUSparse" |
||||||
|
format.mayOnlyBe(FormatGNU) |
||||||
|
} else { |
||||||
|
whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" |
||||||
|
format.mustNotBe(FormatGNU) |
||||||
|
} |
||||||
|
whyNoUSTAR = "USTAR does not support sparse files" |
||||||
|
format.mustNotBe(FormatUSTAR) |
||||||
|
} |
||||||
|
*/ |
||||||
|
|
||||||
|
// Check desired format.
|
||||||
|
if wantFormat := h.Format; wantFormat != FormatUnknown { |
||||||
|
if wantFormat.has(FormatPAX) && !preferPAX { |
||||||
|
wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
|
||||||
|
} |
||||||
|
format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
|
||||||
|
} |
||||||
|
if format == FormatUnknown { |
||||||
|
switch h.Format { |
||||||
|
case FormatUSTAR: |
||||||
|
err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} |
||||||
|
case FormatPAX: |
||||||
|
err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} |
||||||
|
case FormatGNU: |
||||||
|
err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} |
||||||
|
default: |
||||||
|
err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} |
||||||
|
} |
||||||
|
} |
||||||
|
return format, paxHdrs, err |
||||||
|
} |
||||||
|
|
||||||
|
// FileInfo returns an os.FileInfo for the Header.
|
||||||
|
func (h *Header) FileInfo() os.FileInfo { |
||||||
|
return headerFileInfo{h} |
||||||
|
} |
||||||
|
|
||||||
|
// headerFileInfo implements os.FileInfo.
|
||||||
|
type headerFileInfo struct { |
||||||
|
h *Header |
||||||
|
} |
||||||
|
|
||||||
|
func (fi headerFileInfo) Size() int64 { return fi.h.Size } |
||||||
|
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } |
||||||
|
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } |
||||||
|
func (fi headerFileInfo) Sys() interface{} { return fi.h } |
||||||
|
|
||||||
|
// Name returns the base name of the file.
|
||||||
|
func (fi headerFileInfo) Name() string { |
||||||
|
if fi.IsDir() { |
||||||
|
return path.Base(path.Clean(fi.h.Name)) |
||||||
|
} |
||||||
|
return path.Base(fi.h.Name) |
||||||
|
} |
||||||
|
|
||||||
|
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||||
|
func (fi headerFileInfo) Mode() (mode os.FileMode) { |
||||||
|
// Set file permission bits.
|
||||||
|
mode = os.FileMode(fi.h.Mode).Perm() |
||||||
|
|
||||||
|
// Set setuid, setgid and sticky bits.
|
||||||
|
if fi.h.Mode&c_ISUID != 0 { |
||||||
|
mode |= os.ModeSetuid |
||||||
|
} |
||||||
|
if fi.h.Mode&c_ISGID != 0 { |
||||||
|
mode |= os.ModeSetgid |
||||||
|
} |
||||||
|
if fi.h.Mode&c_ISVTX != 0 { |
||||||
|
mode |= os.ModeSticky |
||||||
|
} |
||||||
|
|
||||||
|
// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
|
||||||
|
switch m := os.FileMode(fi.h.Mode) &^ 07777; m { |
||||||
|
case c_ISDIR: |
||||||
|
mode |= os.ModeDir |
||||||
|
case c_ISFIFO: |
||||||
|
mode |= os.ModeNamedPipe |
||||||
|
case c_ISLNK: |
||||||
|
mode |= os.ModeSymlink |
||||||
|
case c_ISBLK: |
||||||
|
mode |= os.ModeDevice |
||||||
|
case c_ISCHR: |
||||||
|
mode |= os.ModeDevice |
||||||
|
mode |= os.ModeCharDevice |
||||||
|
case c_ISSOCK: |
||||||
|
mode |= os.ModeSocket |
||||||
|
} |
||||||
|
|
||||||
|
switch fi.h.Typeflag { |
||||||
|
case TypeSymlink: |
||||||
|
mode |= os.ModeSymlink |
||||||
|
case TypeChar: |
||||||
|
mode |= os.ModeDevice |
||||||
|
mode |= os.ModeCharDevice |
||||||
|
case TypeBlock: |
||||||
|
mode |= os.ModeDevice |
||||||
|
case TypeDir: |
||||||
|
mode |= os.ModeDir |
||||||
|
case TypeFifo: |
||||||
|
mode |= os.ModeNamedPipe |
||||||
|
} |
||||||
|
|
||||||
|
return mode |
||||||
|
} |
||||||
|
|
||||||
|
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||||
|
var sysStat func(fi os.FileInfo, h *Header) error |
||||||
|
|
||||||
|
const ( |
||||||
|
// Mode constants from the USTAR spec:
|
||||||
|
// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
|
||||||
|
c_ISUID = 04000 // Set uid
|
||||||
|
c_ISGID = 02000 // Set gid
|
||||||
|
c_ISVTX = 01000 // Save text (sticky bit)
|
||||||
|
|
||||||
|
// Common Unix mode constants; these are not defined in any common tar standard.
|
||||||
|
// Header.FileInfo understands these, but FileInfoHeader will never produce these.
|
||||||
|
c_ISDIR = 040000 // Directory
|
||||||
|
c_ISFIFO = 010000 // FIFO
|
||||||
|
c_ISREG = 0100000 // Regular file
|
||||||
|
c_ISLNK = 0120000 // Symbolic link
|
||||||
|
c_ISBLK = 060000 // Block special file
|
||||||
|
c_ISCHR = 020000 // Character special file
|
||||||
|
c_ISSOCK = 0140000 // Socket
|
||||||
|
) |
||||||
|
|
||||||
|
// FileInfoHeader creates a partially-populated Header from fi.
|
||||||
|
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||||
|
// If fi describes a directory, a slash is appended to the name.
|
||||||
|
//
|
||||||
|
// Since os.FileInfo's Name method only returns the base name of
|
||||||
|
// the file it describes, it may be necessary to modify Header.Name
|
||||||
|
// to provide the full path name of the file.
|
||||||
|
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { |
||||||
|
if fi == nil { |
||||||
|
return nil, errors.New("archive/tar: FileInfo is nil") |
||||||
|
} |
||||||
|
fm := fi.Mode() |
||||||
|
h := &Header{ |
||||||
|
Name: fi.Name(), |
||||||
|
ModTime: fi.ModTime(), |
||||||
|
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||||
|
} |
||||||
|
switch { |
||||||
|
case fm.IsRegular(): |
||||||
|
h.Typeflag = TypeReg |
||||||
|
h.Size = fi.Size() |
||||||
|
case fi.IsDir(): |
||||||
|
h.Typeflag = TypeDir |
||||||
|
h.Name += "/" |
||||||
|
case fm&os.ModeSymlink != 0: |
||||||
|
h.Typeflag = TypeSymlink |
||||||
|
h.Linkname = link |
||||||
|
case fm&os.ModeDevice != 0: |
||||||
|
if fm&os.ModeCharDevice != 0 { |
||||||
|
h.Typeflag = TypeChar |
||||||
|
} else { |
||||||
|
h.Typeflag = TypeBlock |
||||||
|
} |
||||||
|
case fm&os.ModeNamedPipe != 0: |
||||||
|
h.Typeflag = TypeFifo |
||||||
|
case fm&os.ModeSocket != 0: |
||||||
|
return nil, fmt.Errorf("archive/tar: sockets not supported") |
||||||
|
default: |
||||||
|
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) |
||||||
|
} |
||||||
|
if fm&os.ModeSetuid != 0 { |
||||||
|
h.Mode |= c_ISUID |
||||||
|
} |
||||||
|
if fm&os.ModeSetgid != 0 { |
||||||
|
h.Mode |= c_ISGID |
||||||
|
} |
||||||
|
if fm&os.ModeSticky != 0 { |
||||||
|
h.Mode |= c_ISVTX |
||||||
|
} |
||||||
|
// If possible, populate additional fields from OS-specific
|
||||||
|
// FileInfo fields.
|
||||||
|
if sys, ok := fi.Sys().(*Header); ok { |
||||||
|
// This FileInfo came from a Header (not the OS). Use the
|
||||||
|
// original Header to populate all remaining fields.
|
||||||
|
h.Uid = sys.Uid |
||||||
|
h.Gid = sys.Gid |
||||||
|
h.Uname = sys.Uname |
||||||
|
h.Gname = sys.Gname |
||||||
|
h.AccessTime = sys.AccessTime |
||||||
|
h.ChangeTime = sys.ChangeTime |
||||||
|
if sys.Xattrs != nil { |
||||||
|
h.Xattrs = make(map[string]string) |
||||||
|
for k, v := range sys.Xattrs { |
||||||
|
h.Xattrs[k] = v |
||||||
|
} |
||||||
|
} |
||||||
|
if sys.Typeflag == TypeLink { |
||||||
|
// hard link
|
||||||
|
h.Typeflag = TypeLink |
||||||
|
h.Size = 0 |
||||||
|
h.Linkname = sys.Linkname |
||||||
|
} |
||||||
|
if sys.PAXRecords != nil { |
||||||
|
h.PAXRecords = make(map[string]string) |
||||||
|
for k, v := range sys.PAXRecords { |
||||||
|
h.PAXRecords[k] = v |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
if sysStat != nil { |
||||||
|
return h, sysStat(fi, h) |
||||||
|
} |
||||||
|
return h, nil |
||||||
|
} |
||||||
|
|
||||||
|
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||||
|
// data section even if a size is specified.
|
||||||
|
func isHeaderOnlyType(flag byte) bool { |
||||||
|
switch flag { |
||||||
|
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: |
||||||
|
return true |
||||||
|
default: |
||||||
|
return false |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func min(a, b int64) int64 { |
||||||
|
if a < b { |
||||||
|
return a |
||||||
|
} |
||||||
|
return b |
||||||
|
} |
@ -0,0 +1,303 @@ |
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import "strings" |
||||||
|
|
||||||
|
// Format represents the tar archive format.
|
||||||
|
//
|
||||||
|
// The original tar format was introduced in Unix V7.
|
||||||
|
// Since then, there have been multiple competing formats attempting to
|
||||||
|
// standardize or extend the V7 format to overcome its limitations.
|
||||||
|
// The most common formats are the USTAR, PAX, and GNU formats,
|
||||||
|
// each with their own advantages and limitations.
|
||||||
|
//
|
||||||
|
// The following table captures the capabilities of each format:
|
||||||
|
//
|
||||||
|
// | USTAR | PAX | GNU
|
||||||
|
// ------------------+--------+-----------+----------
|
||||||
|
// Name | 256B | unlimited | unlimited
|
||||||
|
// Linkname | 100B | unlimited | unlimited
|
||||||
|
// Size | uint33 | unlimited | uint89
|
||||||
|
// Mode | uint21 | uint21 | uint57
|
||||||
|
// Uid/Gid | uint21 | unlimited | uint57
|
||||||
|
// Uname/Gname | 32B | unlimited | 32B
|
||||||
|
// ModTime | uint33 | unlimited | int89
|
||||||
|
// AccessTime | n/a | unlimited | int89
|
||||||
|
// ChangeTime | n/a | unlimited | int89
|
||||||
|
// Devmajor/Devminor | uint21 | uint21 | uint57
|
||||||
|
// ------------------+--------+-----------+----------
|
||||||
|
// string encoding | ASCII | UTF-8 | binary
|
||||||
|
// sub-second times | no | yes | no
|
||||||
|
// sparse files | no | yes | yes
|
||||||
|
//
|
||||||
|
// The table's upper portion shows the Header fields, where each format reports
|
||||||
|
// the maximum number of bytes allowed for each string field and
|
||||||
|
// the integer type used to store each numeric field
|
||||||
|
// (where timestamps are stored as the number of seconds since the Unix epoch).
|
||||||
|
//
|
||||||
|
// The table's lower portion shows specialized features of each format,
|
||||||
|
// such as supported string encodings, support for sub-second timestamps,
|
||||||
|
// or support for sparse files.
|
||||||
|
//
|
||||||
|
// The Writer currently provides no support for sparse files.
|
||||||
|
type Format int |
||||||
|
|
||||||
|
// Constants to identify various tar formats.
|
||||||
|
const ( |
||||||
|
// Deliberately hide the meaning of constants from public API.
|
||||||
|
_ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
|
||||||
|
|
||||||
|
// FormatUnknown indicates that the format is unknown.
|
||||||
|
FormatUnknown |
||||||
|
|
||||||
|
// The format of the original Unix V7 tar tool prior to standardization.
|
||||||
|
formatV7 |
||||||
|
|
||||||
|
// FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
|
||||||
|
//
|
||||||
|
// While this format is compatible with most tar readers,
|
||||||
|
// the format has several limitations making it unsuitable for some usages.
|
||||||
|
// Most notably, it cannot support sparse files, files larger than 8GiB,
|
||||||
|
// filenames larger than 256 characters, and non-ASCII filenames.
|
||||||
|
//
|
||||||
|
// Reference:
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
|
||||||
|
FormatUSTAR |
||||||
|
|
||||||
|
// FormatPAX represents the PAX header format defined in POSIX.1-2001.
|
||||||
|
//
|
||||||
|
// PAX extends USTAR by writing a special file with Typeflag TypeXHeader
|
||||||
|
// preceding the original header. This file contains a set of key-value
|
||||||
|
// records, which are used to overcome USTAR's shortcomings, in addition to
|
||||||
|
// providing the ability to have sub-second resolution for timestamps.
|
||||||
|
//
|
||||||
|
// Some newer formats add their own extensions to PAX by defining their
|
||||||
|
// own keys and assigning certain semantic meaning to the associated values.
|
||||||
|
// For example, sparse file support in PAX is implemented using keys
|
||||||
|
// defined by the GNU manual (e.g., "GNU.sparse.map").
|
||||||
|
//
|
||||||
|
// Reference:
|
||||||
|
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
|
||||||
|
FormatPAX |
||||||
|
|
||||||
|
// FormatGNU represents the GNU header format.
|
||||||
|
//
|
||||||
|
// The GNU header format is older than the USTAR and PAX standards and
|
||||||
|
// is not compatible with them. The GNU format supports
|
||||||
|
// arbitrary file sizes, filenames of arbitrary encoding and length,
|
||||||
|
// sparse files, and other features.
|
||||||
|
//
|
||||||
|
// It is recommended that PAX be chosen over GNU unless the target
|
||||||
|
// application can only parse GNU formatted archives.
|
||||||
|
//
|
||||||
|
// Reference:
|
||||||
|
// https://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||||
|
FormatGNU |
||||||
|
|
||||||
|
// Schily's tar format, which is incompatible with USTAR.
|
||||||
|
// This does not cover STAR extensions to the PAX format; these fall under
|
||||||
|
// the PAX format.
|
||||||
|
formatSTAR |
||||||
|
|
||||||
|
formatMax |
||||||
|
) |
||||||
|
|
||||||
|
func (f Format) has(f2 Format) bool { return f&f2 != 0 } |
||||||
|
func (f *Format) mayBe(f2 Format) { *f |= f2 } |
||||||
|
func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } |
||||||
|
func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } |
||||||
|
|
||||||
|
var formatNames = map[Format]string{ |
||||||
|
formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", |
||||||
|
} |
||||||
|
|
||||||
|
func (f Format) String() string { |
||||||
|
var ss []string |
||||||
|
for f2 := Format(1); f2 < formatMax; f2 <<= 1 { |
||||||
|
if f.has(f2) { |
||||||
|
ss = append(ss, formatNames[f2]) |
||||||
|
} |
||||||
|
} |
||||||
|
switch len(ss) { |
||||||
|
case 0: |
||||||
|
return "<unknown>" |
||||||
|
case 1: |
||||||
|
return ss[0] |
||||||
|
default: |
||||||
|
return "(" + strings.Join(ss, " | ") + ")" |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Magics used to identify various formats.
|
||||||
|
const ( |
||||||
|
magicGNU, versionGNU = "ustar ", " \x00" |
||||||
|
magicUSTAR, versionUSTAR = "ustar\x00", "00" |
||||||
|
trailerSTAR = "tar\x00" |
||||||
|
) |
||||||
|
|
||||||
|
// Size constants from various tar specifications.
|
||||||
|
const ( |
||||||
|
blockSize = 512 // Size of each block in a tar stream
|
||||||
|
nameSize = 100 // Max length of the name field in USTAR format
|
||||||
|
prefixSize = 155 // Max length of the prefix field in USTAR format
|
||||||
|
) |
||||||
|
|
||||||
|
// blockPadding computes the number of bytes needed to pad offset up to the
|
||||||
|
// nearest block edge where 0 <= n < blockSize.
|
||||||
|
func blockPadding(offset int64) (n int64) { |
||||||
|
return -offset & (blockSize - 1) |
||||||
|
} |
||||||
|
|
||||||
|
var zeroBlock block |
||||||
|
|
||||||
|
type block [blockSize]byte |
||||||
|
|
||||||
|
// Convert block to any number of formats.
|
||||||
|
func (b *block) V7() *headerV7 { return (*headerV7)(b) } |
||||||
|
func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } |
||||||
|
func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } |
||||||
|
func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } |
||||||
|
func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } |
||||||
|
|
||||||
|
// GetFormat checks that the block is a valid tar header based on the checksum.
|
||||||
|
// It then attempts to guess the specific format based on magic values.
|
||||||
|
// If the checksum fails, then FormatUnknown is returned.
|
||||||
|
func (b *block) GetFormat() Format { |
||||||
|
// Verify checksum.
|
||||||
|
var p parser |
||||||
|
value := p.parseOctal(b.V7().Chksum()) |
||||||
|
chksum1, chksum2 := b.ComputeChecksum() |
||||||
|
if p.err != nil || (value != chksum1 && value != chksum2) { |
||||||
|
return FormatUnknown |
||||||
|
} |
||||||
|
|
||||||
|
// Guess the magic values.
|
||||||
|
magic := string(b.USTAR().Magic()) |
||||||
|
version := string(b.USTAR().Version()) |
||||||
|
trailer := string(b.STAR().Trailer()) |
||||||
|
switch { |
||||||
|
case magic == magicUSTAR && trailer == trailerSTAR: |
||||||
|
return formatSTAR |
||||||
|
case magic == magicUSTAR: |
||||||
|
return FormatUSTAR | FormatPAX |
||||||
|
case magic == magicGNU && version == versionGNU: |
||||||
|
return FormatGNU |
||||||
|
default: |
||||||
|
return formatV7 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// SetFormat writes the magic values necessary for specified format
|
||||||
|
// and then updates the checksum accordingly.
|
||||||
|
func (b *block) SetFormat(format Format) { |
||||||
|
// Set the magic values.
|
||||||
|
switch { |
||||||
|
case format.has(formatV7): |
||||||
|
// Do nothing.
|
||||||
|
case format.has(FormatGNU): |
||||||
|
copy(b.GNU().Magic(), magicGNU) |
||||||
|
copy(b.GNU().Version(), versionGNU) |
||||||
|
case format.has(formatSTAR): |
||||||
|
copy(b.STAR().Magic(), magicUSTAR) |
||||||
|
copy(b.STAR().Version(), versionUSTAR) |
||||||
|
copy(b.STAR().Trailer(), trailerSTAR) |
||||||
|
case format.has(FormatUSTAR | FormatPAX): |
||||||
|
copy(b.USTAR().Magic(), magicUSTAR) |
||||||
|
copy(b.USTAR().Version(), versionUSTAR) |
||||||
|
default: |
||||||
|
panic("invalid format") |
||||||
|
} |
||||||
|
|
||||||
|
// Update checksum.
|
||||||
|
// This field is special in that it is terminated by a NULL then space.
|
||||||
|
var f formatter |
||||||
|
field := b.V7().Chksum() |
||||||
|
chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
|
||||||
|
f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
|
||||||
|
field[7] = ' ' |
||||||
|
} |
||||||
|
|
||||||
|
// ComputeChecksum computes the checksum for the header block.
|
||||||
|
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
|
||||||
|
// signed byte values.
|
||||||
|
// We compute and return both.
|
||||||
|
func (b *block) ComputeChecksum() (unsigned, signed int64) { |
||||||
|
for i, c := range b { |
||||||
|
if 148 <= i && i < 156 { |
||||||
|
c = ' ' // Treat the checksum field itself as all spaces.
|
||||||
|
} |
||||||
|
unsigned += int64(c) |
||||||
|
signed += int64(int8(c)) |
||||||
|
} |
||||||
|
return unsigned, signed |
||||||
|
} |
||||||
|
|
||||||
|
// Reset clears the block with all zeros.
|
||||||
|
func (b *block) Reset() { |
||||||
|
*b = block{} |
||||||
|
} |
||||||
|
|
||||||
|
type headerV7 [blockSize]byte |
||||||
|
|
||||||
|
func (h *headerV7) Name() []byte { return h[000:][:100] } |
||||||
|
func (h *headerV7) Mode() []byte { return h[100:][:8] } |
||||||
|
func (h *headerV7) UID() []byte { return h[108:][:8] } |
||||||
|
func (h *headerV7) GID() []byte { return h[116:][:8] } |
||||||
|
func (h *headerV7) Size() []byte { return h[124:][:12] } |
||||||
|
func (h *headerV7) ModTime() []byte { return h[136:][:12] } |
||||||
|
func (h *headerV7) Chksum() []byte { return h[148:][:8] } |
||||||
|
func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } |
||||||
|
func (h *headerV7) LinkName() []byte { return h[157:][:100] } |
||||||
|
|
||||||
|
type headerGNU [blockSize]byte |
||||||
|
|
||||||
|
func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } |
||||||
|
func (h *headerGNU) Magic() []byte { return h[257:][:6] } |
||||||
|
func (h *headerGNU) Version() []byte { return h[263:][:2] } |
||||||
|
func (h *headerGNU) UserName() []byte { return h[265:][:32] } |
||||||
|
func (h *headerGNU) GroupName() []byte { return h[297:][:32] } |
||||||
|
func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } |
||||||
|
func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } |
||||||
|
func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } |
||||||
|
func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } |
||||||
|
func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } |
||||||
|
func (h *headerGNU) RealSize() []byte { return h[483:][:12] } |
||||||
|
|
||||||
|
type headerSTAR [blockSize]byte |
||||||
|
|
||||||
|
func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } |
||||||
|
func (h *headerSTAR) Magic() []byte { return h[257:][:6] } |
||||||
|
func (h *headerSTAR) Version() []byte { return h[263:][:2] } |
||||||
|
func (h *headerSTAR) UserName() []byte { return h[265:][:32] } |
||||||
|
func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } |
||||||
|
func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } |
||||||
|
func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } |
||||||
|
func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } |
||||||
|
func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } |
||||||
|
func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } |
||||||
|
func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } |
||||||
|
|
||||||
|
type headerUSTAR [blockSize]byte |
||||||
|
|
||||||
|
func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } |
||||||
|
func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } |
||||||
|
func (h *headerUSTAR) Version() []byte { return h[263:][:2] } |
||||||
|
func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } |
||||||
|
func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } |
||||||
|
func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } |
||||||
|
func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } |
||||||
|
func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } |
||||||
|
|
||||||
|
type sparseArray []byte |
||||||
|
|
||||||
|
func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } |
||||||
|
func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } |
||||||
|
func (s sparseArray) MaxEntries() int { return len(s) / 24 } |
||||||
|
|
||||||
|
type sparseElem []byte |
||||||
|
|
||||||
|
func (s sparseElem) Offset() []byte { return s[00:][:12] } |
||||||
|
func (s sparseElem) Length() []byte { return s[12:][:12] } |
@ -0,0 +1,923 @@ |
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"io" |
||||||
|
"io/ioutil" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// Reader provides sequential access to the contents of a tar archive.
|
||||||
|
// Reader.Next advances to the next file in the archive (including the first),
|
||||||
|
// and then Reader can be treated as an io.Reader to access the file's data.
|
||||||
|
type Reader struct { |
||||||
|
r io.Reader |
||||||
|
pad int64 // Amount of padding (ignored) after current file entry
|
||||||
|
curr fileReader // Reader for current file entry
|
||||||
|
blk block // Buffer to use as temporary local storage
|
||||||
|
|
||||||
|
// err is a persistent error.
|
||||||
|
// It is only the responsibility of every exported method of Reader to
|
||||||
|
// ensure that this error is sticky.
|
||||||
|
err error |
||||||
|
|
||||||
|
RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
|
||||||
|
rawBytes *bytes.Buffer // last raw bits
|
||||||
|
} |
||||||
|
|
||||||
|
type fileReader interface { |
||||||
|
io.Reader |
||||||
|
fileState |
||||||
|
|
||||||
|
WriteTo(io.Writer) (int64, error) |
||||||
|
} |
||||||
|
|
||||||
|
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
|
||||||
|
// This includes the header and padding.
|
||||||
|
//
|
||||||
|
// This call resets the current rawbytes buffer
|
||||||
|
//
|
||||||
|
// Only when RawAccounting is enabled, otherwise this returns nil
|
||||||
|
func (tr *Reader) RawBytes() []byte { |
||||||
|
if !tr.RawAccounting { |
||||||
|
return nil |
||||||
|
} |
||||||
|
if tr.rawBytes == nil { |
||||||
|
tr.rawBytes = bytes.NewBuffer(nil) |
||||||
|
} |
||||||
|
defer tr.rawBytes.Reset() // if we've read them, then flush them.
|
||||||
|
|
||||||
|
return tr.rawBytes.Bytes() |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// NewReader creates a new Reader reading from r.
|
||||||
|
func NewReader(r io.Reader) *Reader { |
||||||
|
return &Reader{r: r, curr: ®FileReader{r, 0}} |
||||||
|
} |
||||||
|
|
||||||
|
// Next advances to the next entry in the tar archive.
|
||||||
|
// The Header.Size determines how many bytes can be read for the next file.
|
||||||
|
// Any remaining data in the current file is automatically discarded.
|
||||||
|
//
|
||||||
|
// io.EOF is returned at the end of the input.
|
||||||
|
func (tr *Reader) Next() (*Header, error) { |
||||||
|
if tr.err != nil { |
||||||
|
return nil, tr.err |
||||||
|
} |
||||||
|
hdr, err := tr.next() |
||||||
|
tr.err = err |
||||||
|
return hdr, err |
||||||
|
} |
||||||
|
|
||||||
|
func (tr *Reader) next() (*Header, error) { |
||||||
|
var paxHdrs map[string]string |
||||||
|
var gnuLongName, gnuLongLink string |
||||||
|
|
||||||
|
if tr.RawAccounting { |
||||||
|
if tr.rawBytes == nil { |
||||||
|
tr.rawBytes = bytes.NewBuffer(nil) |
||||||
|
} else { |
||||||
|
tr.rawBytes.Reset() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Externally, Next iterates through the tar archive as if it is a series of
|
||||||
|
// files. Internally, the tar format often uses fake "files" to add meta
|
||||||
|
// data that describes the next file. These meta data "files" should not
|
||||||
|
// normally be visible to the outside. As such, this loop iterates through
|
||||||
|
// one or more "header files" until it finds a "normal file".
|
||||||
|
format := FormatUSTAR | FormatPAX | FormatGNU |
||||||
|
for { |
||||||
|
// Discard the remainder of the file and any padding.
|
||||||
|
if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
n, err := tryReadFull(tr.r, tr.blk[:tr.pad]) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if tr.RawAccounting { |
||||||
|
tr.rawBytes.Write(tr.blk[:n]) |
||||||
|
} |
||||||
|
tr.pad = 0 |
||||||
|
|
||||||
|
hdr, rawHdr, err := tr.readHeader() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if err := tr.handleRegularFile(hdr); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
format.mayOnlyBe(hdr.Format) |
||||||
|
|
||||||
|
// Check for PAX/GNU special headers and files.
|
||||||
|
switch hdr.Typeflag { |
||||||
|
case TypeXHeader, TypeXGlobalHeader: |
||||||
|
format.mayOnlyBe(FormatPAX) |
||||||
|
paxHdrs, err = parsePAX(tr) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if hdr.Typeflag == TypeXGlobalHeader { |
||||||
|
mergePAX(hdr, paxHdrs) |
||||||
|
return &Header{ |
||||||
|
Name: hdr.Name, |
||||||
|
Typeflag: hdr.Typeflag, |
||||||
|
Xattrs: hdr.Xattrs, |
||||||
|
PAXRecords: hdr.PAXRecords, |
||||||
|
Format: format, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
continue // This is a meta header affecting the next header
|
||||||
|
case TypeGNULongName, TypeGNULongLink: |
||||||
|
format.mayOnlyBe(FormatGNU) |
||||||
|
realname, err := ioutil.ReadAll(tr) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if tr.RawAccounting { |
||||||
|
tr.rawBytes.Write(realname) |
||||||
|
} |
||||||
|
|
||||||
|
var p parser |
||||||
|
switch hdr.Typeflag { |
||||||
|
case TypeGNULongName: |
||||||
|
gnuLongName = p.parseString(realname) |
||||||
|
case TypeGNULongLink: |
||||||
|
gnuLongLink = p.parseString(realname) |
||||||
|
} |
||||||
|
continue // This is a meta header affecting the next header
|
||||||
|
default: |
||||||
|
// The old GNU sparse format is handled here since it is technically
|
||||||
|
// just a regular file with additional attributes.
|
||||||
|
|
||||||
|
if err := mergePAX(hdr, paxHdrs); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if gnuLongName != "" { |
||||||
|
hdr.Name = gnuLongName |
||||||
|
} |
||||||
|
if gnuLongLink != "" { |
||||||
|
hdr.Linkname = gnuLongLink |
||||||
|
} |
||||||
|
if hdr.Typeflag == TypeRegA { |
||||||
|
if strings.HasSuffix(hdr.Name, "/") { |
||||||
|
hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
|
||||||
|
} else { |
||||||
|
hdr.Typeflag = TypeReg |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// The extended headers may have updated the size.
|
||||||
|
// Thus, setup the regFileReader again after merging PAX headers.
|
||||||
|
if err := tr.handleRegularFile(hdr); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Sparse formats rely on being able to read from the logical data
|
||||||
|
// section; there must be a preceding call to handleRegularFile.
|
||||||
|
if err := tr.handleSparseFile(hdr, rawHdr); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Set the final guess at the format.
|
||||||
|
if format.has(FormatUSTAR) && format.has(FormatPAX) { |
||||||
|
format.mayOnlyBe(FormatUSTAR) |
||||||
|
} |
||||||
|
hdr.Format = format |
||||||
|
return hdr, nil // This is a file, so stop
|
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// handleRegularFile sets up the current file reader and padding such that it
|
||||||
|
// can only read the following logical data section. It will properly handle
|
||||||
|
// special headers that contain no data section.
|
||||||
|
func (tr *Reader) handleRegularFile(hdr *Header) error { |
||||||
|
nb := hdr.Size |
||||||
|
if isHeaderOnlyType(hdr.Typeflag) { |
||||||
|
nb = 0 |
||||||
|
} |
||||||
|
if nb < 0 { |
||||||
|
return ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
tr.pad = blockPadding(nb) |
||||||
|
tr.curr = ®FileReader{r: tr.r, nb: nb} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// handleSparseFile checks if the current file is a sparse format of any type
|
||||||
|
// and sets the curr reader appropriately.
|
||||||
|
func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { |
||||||
|
var spd sparseDatas |
||||||
|
var err error |
||||||
|
if hdr.Typeflag == TypeGNUSparse { |
||||||
|
spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) |
||||||
|
} else { |
||||||
|
spd, err = tr.readGNUSparsePAXHeaders(hdr) |
||||||
|
} |
||||||
|
|
||||||
|
// If sp is non-nil, then this is a sparse file.
|
||||||
|
// Note that it is possible for len(sp) == 0.
|
||||||
|
if err == nil && spd != nil { |
||||||
|
if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { |
||||||
|
return ErrHeader |
||||||
|
} |
||||||
|
sph := invertSparseEntries(spd, hdr.Size) |
||||||
|
tr.curr = &sparseFileReader{tr.curr, sph, 0} |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
|
||||||
|
// If they are found, then this function reads the sparse map and returns it.
|
||||||
|
// This assumes that 0.0 headers have already been converted to 0.1 headers
|
||||||
|
// by the PAX header parsing logic.
|
||||||
|
func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { |
||||||
|
// Identify the version of GNU headers.
|
||||||
|
var is1x0 bool |
||||||
|
major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] |
||||||
|
switch { |
||||||
|
case major == "0" && (minor == "0" || minor == "1"): |
||||||
|
is1x0 = false |
||||||
|
case major == "1" && minor == "0": |
||||||
|
is1x0 = true |
||||||
|
case major != "" || minor != "": |
||||||
|
return nil, nil // Unknown GNU sparse PAX version
|
||||||
|
case hdr.PAXRecords[paxGNUSparseMap] != "": |
||||||
|
is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
|
||||||
|
default: |
||||||
|
return nil, nil // Not a PAX format GNU sparse file.
|
||||||
|
} |
||||||
|
hdr.Format.mayOnlyBe(FormatPAX) |
||||||
|
|
||||||
|
// Update hdr from GNU sparse PAX headers.
|
||||||
|
if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { |
||||||
|
hdr.Name = name |
||||||
|
} |
||||||
|
size := hdr.PAXRecords[paxGNUSparseSize] |
||||||
|
if size == "" { |
||||||
|
size = hdr.PAXRecords[paxGNUSparseRealSize] |
||||||
|
} |
||||||
|
if size != "" { |
||||||
|
n, err := strconv.ParseInt(size, 10, 64) |
||||||
|
if err != nil { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
hdr.Size = n |
||||||
|
} |
||||||
|
|
||||||
|
// Read the sparse map according to the appropriate format.
|
||||||
|
if is1x0 { |
||||||
|
return readGNUSparseMap1x0(tr.curr) |
||||||
|
} |
||||||
|
return readGNUSparseMap0x1(hdr.PAXRecords) |
||||||
|
} |
||||||
|
|
||||||
|
// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
|
||||||
|
func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { |
||||||
|
for k, v := range paxHdrs { |
||||||
|
if v == "" { |
||||||
|
continue // Keep the original USTAR value
|
||||||
|
} |
||||||
|
var id64 int64 |
||||||
|
switch k { |
||||||
|
case paxPath: |
||||||
|
hdr.Name = v |
||||||
|
case paxLinkpath: |
||||||
|
hdr.Linkname = v |
||||||
|
case paxUname: |
||||||
|
hdr.Uname = v |
||||||
|
case paxGname: |
||||||
|
hdr.Gname = v |
||||||
|
case paxUid: |
||||||
|
id64, err = strconv.ParseInt(v, 10, 64) |
||||||
|
hdr.Uid = int(id64) // Integer overflow possible
|
||||||
|
case paxGid: |
||||||
|
id64, err = strconv.ParseInt(v, 10, 64) |
||||||
|
hdr.Gid = int(id64) // Integer overflow possible
|
||||||
|
case paxAtime: |
||||||
|
hdr.AccessTime, err = parsePAXTime(v) |
||||||
|
case paxMtime: |
||||||
|
hdr.ModTime, err = parsePAXTime(v) |
||||||
|
case paxCtime: |
||||||
|
hdr.ChangeTime, err = parsePAXTime(v) |
||||||
|
case paxSize: |
||||||
|
hdr.Size, err = strconv.ParseInt(v, 10, 64) |
||||||
|
default: |
||||||
|
if strings.HasPrefix(k, paxSchilyXattr) { |
||||||
|
if hdr.Xattrs == nil { |
||||||
|
hdr.Xattrs = make(map[string]string) |
||||||
|
} |
||||||
|
hdr.Xattrs[k[len(paxSchilyXattr):]] = v |
||||||
|
} |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return ErrHeader |
||||||
|
} |
||||||
|
} |
||||||
|
hdr.PAXRecords = paxHdrs |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// parsePAX parses PAX headers.
|
||||||
|
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
||||||
|
func parsePAX(r io.Reader) (map[string]string, error) { |
||||||
|
buf, err := ioutil.ReadAll(r) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
// leaving this function for io.Reader makes it more testable
|
||||||
|
if tr, ok := r.(*Reader); ok && tr.RawAccounting { |
||||||
|
if _, err = tr.rawBytes.Write(buf); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
sbuf := string(buf) |
||||||
|
|
||||||
|
// For GNU PAX sparse format 0.0 support.
|
||||||
|
// This function transforms the sparse format 0.0 headers into format 0.1
|
||||||
|
// headers since 0.0 headers were not PAX compliant.
|
||||||
|
var sparseMap []string |
||||||
|
|
||||||
|
paxHdrs := make(map[string]string) |
||||||
|
for len(sbuf) > 0 { |
||||||
|
key, value, residual, err := parsePAXRecord(sbuf) |
||||||
|
if err != nil { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
sbuf = residual |
||||||
|
|
||||||
|
switch key { |
||||||
|
case paxGNUSparseOffset, paxGNUSparseNumBytes: |
||||||
|
// Validate sparse header order and value.
|
||||||
|
if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || |
||||||
|
(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || |
||||||
|
strings.Contains(value, ",") { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
sparseMap = append(sparseMap, value) |
||||||
|
default: |
||||||
|
paxHdrs[key] = value |
||||||
|
} |
||||||
|
} |
||||||
|
if len(sparseMap) > 0 { |
||||||
|
paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") |
||||||
|
} |
||||||
|
return paxHdrs, nil |
||||||
|
} |
||||||
|
|
||||||
|
// readHeader reads the next block header and assumes that the underlying reader
|
||||||
|
// is already aligned to a block boundary. It returns the raw block of the
|
||||||
|
// header in case further processing is required.
|
||||||
|
//
|
||||||
|
// The err will be set to io.EOF only when one of the following occurs:
|
||||||
|
// * Exactly 0 bytes are read and EOF is hit.
|
||||||
|
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||||
|
// * At least 2 blocks of zeros are read.
|
||||||
|
func (tr *Reader) readHeader() (*Header, *block, error) { |
||||||
|
// Two blocks of zero bytes marks the end of the archive.
|
||||||
|
n, err := io.ReadFull(tr.r, tr.blk[:]) |
||||||
|
if tr.RawAccounting && (err == nil || err == io.EOF) { |
||||||
|
tr.rawBytes.Write(tr.blk[:n]) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err // EOF is okay here; exactly 0 bytes read
|
||||||
|
} |
||||||
|
|
||||||
|
if bytes.Equal(tr.blk[:], zeroBlock[:]) { |
||||||
|
n, err = io.ReadFull(tr.r, tr.blk[:]) |
||||||
|
if tr.RawAccounting && (err == nil || err == io.EOF) { |
||||||
|
tr.rawBytes.Write(tr.blk[:n]) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
|
||||||
|
} |
||||||
|
if bytes.Equal(tr.blk[:], zeroBlock[:]) { |
||||||
|
return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
|
||||||
|
} |
||||||
|
return nil, nil, ErrHeader // Zero block and then non-zero block
|
||||||
|
} |
||||||
|
|
||||||
|
// Verify the header matches a known format.
|
||||||
|
format := tr.blk.GetFormat() |
||||||
|
if format == FormatUnknown { |
||||||
|
return nil, nil, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
var p parser |
||||||
|
hdr := new(Header) |
||||||
|
|
||||||
|
// Unpack the V7 header.
|
||||||
|
v7 := tr.blk.V7() |
||||||
|
hdr.Typeflag = v7.TypeFlag()[0] |
||||||
|
hdr.Name = p.parseString(v7.Name()) |
||||||
|
hdr.Linkname = p.parseString(v7.LinkName()) |
||||||
|
hdr.Size = p.parseNumeric(v7.Size()) |
||||||
|
hdr.Mode = p.parseNumeric(v7.Mode()) |
||||||
|
hdr.Uid = int(p.parseNumeric(v7.UID())) |
||||||
|
hdr.Gid = int(p.parseNumeric(v7.GID())) |
||||||
|
hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) |
||||||
|
|
||||||
|
// Unpack format specific fields.
|
||||||
|
if format > formatV7 { |
||||||
|
ustar := tr.blk.USTAR() |
||||||
|
hdr.Uname = p.parseString(ustar.UserName()) |
||||||
|
hdr.Gname = p.parseString(ustar.GroupName()) |
||||||
|
hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) |
||||||
|
hdr.Devminor = p.parseNumeric(ustar.DevMinor()) |
||||||
|
|
||||||
|
var prefix string |
||||||
|
switch { |
||||||
|
case format.has(FormatUSTAR | FormatPAX): |
||||||
|
hdr.Format = format |
||||||
|
ustar := tr.blk.USTAR() |
||||||
|
prefix = p.parseString(ustar.Prefix()) |
||||||
|
|
||||||
|
// For Format detection, check if block is properly formatted since
|
||||||
|
// the parser is more liberal than what USTAR actually permits.
|
||||||
|
notASCII := func(r rune) bool { return r >= 0x80 } |
||||||
|
if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { |
||||||
|
hdr.Format = FormatUnknown // Non-ASCII characters in block.
|
||||||
|
} |
||||||
|
nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } |
||||||
|
if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && |
||||||
|
nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { |
||||||
|
hdr.Format = FormatUnknown // Numeric fields must end in NUL
|
||||||
|
} |
||||||
|
case format.has(formatSTAR): |
||||||
|
star := tr.blk.STAR() |
||||||
|
prefix = p.parseString(star.Prefix()) |
||||||
|
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) |
||||||
|
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) |
||||||
|
case format.has(FormatGNU): |
||||||
|
hdr.Format = format |
||||||
|
var p2 parser |
||||||
|
gnu := tr.blk.GNU() |
||||||
|
if b := gnu.AccessTime(); b[0] != 0 { |
||||||
|
hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) |
||||||
|
} |
||||||
|
if b := gnu.ChangeTime(); b[0] != 0 { |
||||||
|
hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) |
||||||
|
} |
||||||
|
|
||||||
|
// Prior to Go1.8, the Writer had a bug where it would output
|
||||||
|
// an invalid tar file in certain rare situations because the logic
|
||||||
|
// incorrectly believed that the old GNU format had a prefix field.
|
||||||
|
// This is wrong and leads to an output file that mangles the
|
||||||
|
// atime and ctime fields, which are often left unused.
|
||||||
|
//
|
||||||
|
// In order to continue reading tar files created by former, buggy
|
||||||
|
// versions of Go, we skeptically parse the atime and ctime fields.
|
||||||
|
// If we are unable to parse them and the prefix field looks like
|
||||||
|
// an ASCII string, then we fallback on the pre-Go1.8 behavior
|
||||||
|
// of treating these fields as the USTAR prefix field.
|
||||||
|
//
|
||||||
|
// Note that this will not use the fallback logic for all possible
|
||||||
|
// files generated by a pre-Go1.8 toolchain. If the generated file
|
||||||
|
// happened to have a prefix field that parses as valid
|
||||||
|
// atime and ctime fields (e.g., when they are valid octal strings),
|
||||||
|
// then it is impossible to distinguish between an valid GNU file
|
||||||
|
// and an invalid pre-Go1.8 file.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issues/12594
|
||||||
|
// See https://golang.org/issues/21005
|
||||||
|
if p2.err != nil { |
||||||
|
hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} |
||||||
|
ustar := tr.blk.USTAR() |
||||||
|
if s := p.parseString(ustar.Prefix()); isASCII(s) { |
||||||
|
prefix = s |
||||||
|
} |
||||||
|
hdr.Format = FormatUnknown // Buggy file is not GNU
|
||||||
|
} |
||||||
|
} |
||||||
|
if len(prefix) > 0 { |
||||||
|
hdr.Name = prefix + "/" + hdr.Name |
||||||
|
} |
||||||
|
} |
||||||
|
return hdr, &tr.blk, p.err |
||||||
|
} |
||||||
|
|
||||||
|
// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
|
||||||
|
// The sparse map is stored in the tar header if it's small enough.
|
||||||
|
// If it's larger than four entries, then one or more extension headers are used
|
||||||
|
// to store the rest of the sparse map.
|
||||||
|
//
|
||||||
|
// The Header.Size does not reflect the size of any extended headers used.
|
||||||
|
// Thus, this function will read from the raw io.Reader to fetch extra headers.
|
||||||
|
// This method mutates blk in the process.
|
||||||
|
func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { |
||||||
|
// Make sure that the input format is GNU.
|
||||||
|
// Unfortunately, the STAR format also has a sparse header format that uses
|
||||||
|
// the same type flag but has a completely different layout.
|
||||||
|
if blk.GetFormat() != FormatGNU { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
hdr.Format.mayOnlyBe(FormatGNU) |
||||||
|
|
||||||
|
var p parser |
||||||
|
hdr.Size = p.parseNumeric(blk.GNU().RealSize()) |
||||||
|
if p.err != nil { |
||||||
|
return nil, p.err |
||||||
|
} |
||||||
|
s := blk.GNU().Sparse() |
||||||
|
spd := make(sparseDatas, 0, s.MaxEntries()) |
||||||
|
for { |
||||||
|
for i := 0; i < s.MaxEntries(); i++ { |
||||||
|
// This termination condition is identical to GNU and BSD tar.
|
||||||
|
if s.Entry(i).Offset()[0] == 0x00 { |
||||||
|
break // Don't return, need to process extended headers (even if empty)
|
||||||
|
} |
||||||
|
offset := p.parseNumeric(s.Entry(i).Offset()) |
||||||
|
length := p.parseNumeric(s.Entry(i).Length()) |
||||||
|
if p.err != nil { |
||||||
|
return nil, p.err |
||||||
|
} |
||||||
|
spd = append(spd, sparseEntry{Offset: offset, Length: length}) |
||||||
|
} |
||||||
|
|
||||||
|
if s.IsExtended()[0] > 0 { |
||||||
|
// There are more entries. Read an extension header and parse its entries.
|
||||||
|
if _, err := mustReadFull(tr.r, blk[:]); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if tr.RawAccounting { |
||||||
|
tr.rawBytes.Write(blk[:]) |
||||||
|
} |
||||||
|
s = blk.Sparse() |
||||||
|
continue |
||||||
|
} |
||||||
|
return spd, nil // Done
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
|
||||||
|
// version 1.0. The format of the sparse map consists of a series of
|
||||||
|
// newline-terminated numeric fields. The first field is the number of entries
|
||||||
|
// and is always present. Following this are the entries, consisting of two
|
||||||
|
// fields (offset, length). This function must stop reading at the end
|
||||||
|
// boundary of the block containing the last newline.
|
||||||
|
//
|
||||||
|
// Note that the GNU manual says that numeric values should be encoded in octal
|
||||||
|
// format. However, the GNU tar utility itself outputs these values in decimal.
|
||||||
|
// As such, this library treats values as being encoded in decimal.
|
||||||
|
func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { |
||||||
|
var ( |
||||||
|
cntNewline int64 |
||||||
|
buf bytes.Buffer |
||||||
|
blk block |
||||||
|
) |
||||||
|
|
||||||
|
// feedTokens copies data in blocks from r into buf until there are
|
||||||
|
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||||
|
feedTokens := func(n int64) error { |
||||||
|
for cntNewline < n { |
||||||
|
if _, err := mustReadFull(r, blk[:]); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
buf.Write(blk[:]) |
||||||
|
for _, c := range blk { |
||||||
|
if c == '\n' { |
||||||
|
cntNewline++ |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// nextToken gets the next token delimited by a newline. This assumes that
|
||||||
|
// at least one newline exists in the buffer.
|
||||||
|
nextToken := func() string { |
||||||
|
cntNewline-- |
||||||
|
tok, _ := buf.ReadString('\n') |
||||||
|
return strings.TrimRight(tok, "\n") |
||||||
|
} |
||||||
|
|
||||||
|
// Parse for the number of entries.
|
||||||
|
// Use integer overflow resistant math to check this.
|
||||||
|
if err := feedTokens(1); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
|
||||||
|
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// Parse for all member entries.
|
||||||
|
// numEntries is trusted after this since a potential attacker must have
|
||||||
|
// committed resources proportional to what this library used.
|
||||||
|
if err := feedTokens(2 * numEntries); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
spd := make(sparseDatas, 0, numEntries) |
||||||
|
for i := int64(0); i < numEntries; i++ { |
||||||
|
offset, err1 := strconv.ParseInt(nextToken(), 10, 64) |
||||||
|
length, err2 := strconv.ParseInt(nextToken(), 10, 64) |
||||||
|
if err1 != nil || err2 != nil { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
spd = append(spd, sparseEntry{Offset: offset, Length: length}) |
||||||
|
} |
||||||
|
return spd, nil |
||||||
|
} |
||||||
|
|
||||||
|
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
|
||||||
|
// version 0.1. The sparse map is stored in the PAX headers.
|
||||||
|
func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { |
||||||
|
// Get number of entries.
|
||||||
|
// Use integer overflow resistant math to check this.
|
||||||
|
numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] |
||||||
|
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
|
||||||
|
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// There should be two numbers in sparseMap for each entry.
|
||||||
|
sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") |
||||||
|
if len(sparseMap) == 1 && sparseMap[0] == "" { |
||||||
|
sparseMap = sparseMap[:0] |
||||||
|
} |
||||||
|
if int64(len(sparseMap)) != 2*numEntries { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// Loop through the entries in the sparse map.
|
||||||
|
// numEntries is trusted now.
|
||||||
|
spd := make(sparseDatas, 0, numEntries) |
||||||
|
for len(sparseMap) >= 2 { |
||||||
|
offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) |
||||||
|
length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) |
||||||
|
if err1 != nil || err2 != nil { |
||||||
|
return nil, ErrHeader |
||||||
|
} |
||||||
|
spd = append(spd, sparseEntry{Offset: offset, Length: length}) |
||||||
|
sparseMap = sparseMap[2:] |
||||||
|
} |
||||||
|
return spd, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Read reads from the current file in the tar archive.
|
||||||
|
// It returns (0, io.EOF) when it reaches the end of that file,
|
||||||
|
// until Next is called to advance to the next file.
|
||||||
|
//
|
||||||
|
// If the current file is sparse, then the regions marked as a hole
|
||||||
|
// are read back as NUL-bytes.
|
||||||
|
//
|
||||||
|
// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
|
||||||
|
// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
|
||||||
|
// the Header.Size claims.
|
||||||
|
func (tr *Reader) Read(b []byte) (int, error) { |
||||||
|
if tr.err != nil { |
||||||
|
return 0, tr.err |
||||||
|
} |
||||||
|
n, err := tr.curr.Read(b) |
||||||
|
if err != nil && err != io.EOF { |
||||||
|
tr.err = err |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// writeTo writes the content of the current file to w.
|
||||||
|
// The bytes written matches the number of remaining bytes in the current file.
|
||||||
|
//
|
||||||
|
// If the current file is sparse and w is an io.WriteSeeker,
|
||||||
|
// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
|
||||||
|
// assuming that skipped regions are filled with NULs.
|
||||||
|
// This always writes the last byte to ensure w is the right size.
|
||||||
|
//
|
||||||
|
// TODO(dsnet): Re-export this when adding sparse file support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
func (tr *Reader) writeTo(w io.Writer) (int64, error) { |
||||||
|
if tr.err != nil { |
||||||
|
return 0, tr.err |
||||||
|
} |
||||||
|
n, err := tr.curr.WriteTo(w) |
||||||
|
if err != nil { |
||||||
|
tr.err = err |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// regFileReader is a fileReader for reading data from a regular file entry.
|
||||||
|
type regFileReader struct { |
||||||
|
r io.Reader // Underlying Reader
|
||||||
|
nb int64 // Number of remaining bytes to read
|
||||||
|
} |
||||||
|
|
||||||
|
func (fr *regFileReader) Read(b []byte) (n int, err error) { |
||||||
|
if int64(len(b)) > fr.nb { |
||||||
|
b = b[:fr.nb] |
||||||
|
} |
||||||
|
if len(b) > 0 { |
||||||
|
n, err = fr.r.Read(b) |
||||||
|
fr.nb -= int64(n) |
||||||
|
} |
||||||
|
switch { |
||||||
|
case err == io.EOF && fr.nb > 0: |
||||||
|
return n, io.ErrUnexpectedEOF |
||||||
|
case err == nil && fr.nb == 0: |
||||||
|
return n, io.EOF |
||||||
|
default: |
||||||
|
return n, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { |
||||||
|
return io.Copy(w, struct{ io.Reader }{fr}) |
||||||
|
} |
||||||
|
|
||||||
|
func (fr regFileReader) LogicalRemaining() int64 { |
||||||
|
return fr.nb |
||||||
|
} |
||||||
|
|
||||||
|
func (fr regFileReader) PhysicalRemaining() int64 { |
||||||
|
return fr.nb |
||||||
|
} |
||||||
|
|
||||||
|
// sparseFileReader is a fileReader for reading data from a sparse file entry.
|
||||||
|
type sparseFileReader struct { |
||||||
|
fr fileReader // Underlying fileReader
|
||||||
|
sp sparseHoles // Normalized list of sparse holes
|
||||||
|
pos int64 // Current position in sparse file
|
||||||
|
} |
||||||
|
|
||||||
|
func (sr *sparseFileReader) Read(b []byte) (n int, err error) { |
||||||
|
finished := int64(len(b)) >= sr.LogicalRemaining() |
||||||
|
if finished { |
||||||
|
b = b[:sr.LogicalRemaining()] |
||||||
|
} |
||||||
|
|
||||||
|
b0 := b |
||||||
|
endPos := sr.pos + int64(len(b)) |
||||||
|
for endPos > sr.pos && err == nil { |
||||||
|
var nf int // Bytes read in fragment
|
||||||
|
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() |
||||||
|
if sr.pos < holeStart { // In a data fragment
|
||||||
|
bf := b[:min(int64(len(b)), holeStart-sr.pos)] |
||||||
|
nf, err = tryReadFull(sr.fr, bf) |
||||||
|
} else { // In a hole fragment
|
||||||
|
bf := b[:min(int64(len(b)), holeEnd-sr.pos)] |
||||||
|
nf, err = tryReadFull(zeroReader{}, bf) |
||||||
|
} |
||||||
|
b = b[nf:] |
||||||
|
sr.pos += int64(nf) |
||||||
|
if sr.pos >= holeEnd && len(sr.sp) > 1 { |
||||||
|
sr.sp = sr.sp[1:] // Ensure last fragment always remains
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
n = len(b0) - len(b) |
||||||
|
switch { |
||||||
|
case err == io.EOF: |
||||||
|
return n, errMissData // Less data in dense file than sparse file
|
||||||
|
case err != nil: |
||||||
|
return n, err |
||||||
|
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: |
||||||
|
return n, errUnrefData // More data in dense file than sparse file
|
||||||
|
case finished: |
||||||
|
return n, io.EOF |
||||||
|
default: |
||||||
|
return n, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { |
||||||
|
ws, ok := w.(io.WriteSeeker) |
||||||
|
if ok { |
||||||
|
if _, err := ws.Seek(0, io.SeekCurrent); err != nil { |
||||||
|
ok = false // Not all io.Seeker can really seek
|
||||||
|
} |
||||||
|
} |
||||||
|
if !ok { |
||||||
|
return io.Copy(w, struct{ io.Reader }{sr}) |
||||||
|
} |
||||||
|
|
||||||
|
var writeLastByte bool |
||||||
|
pos0 := sr.pos |
||||||
|
for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { |
||||||
|
var nf int64 // Size of fragment
|
||||||
|
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() |
||||||
|
if sr.pos < holeStart { // In a data fragment
|
||||||
|
nf = holeStart - sr.pos |
||||||
|
nf, err = io.CopyN(ws, sr.fr, nf) |
||||||
|
} else { // In a hole fragment
|
||||||
|
nf = holeEnd - sr.pos |
||||||
|
if sr.PhysicalRemaining() == 0 { |
||||||
|
writeLastByte = true |
||||||
|
nf-- |
||||||
|
} |
||||||
|
_, err = ws.Seek(nf, io.SeekCurrent) |
||||||
|
} |
||||||
|
sr.pos += nf |
||||||
|
if sr.pos >= holeEnd && len(sr.sp) > 1 { |
||||||
|
sr.sp = sr.sp[1:] // Ensure last fragment always remains
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// If the last fragment is a hole, then seek to 1-byte before EOF, and
|
||||||
|
// write a single byte to ensure the file is the right size.
|
||||||
|
if writeLastByte && err == nil { |
||||||
|
_, err = ws.Write([]byte{0}) |
||||||
|
sr.pos++ |
||||||
|
} |
||||||
|
|
||||||
|
n = sr.pos - pos0 |
||||||
|
switch { |
||||||
|
case err == io.EOF: |
||||||
|
return n, errMissData // Less data in dense file than sparse file
|
||||||
|
case err != nil: |
||||||
|
return n, err |
||||||
|
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: |
||||||
|
return n, errUnrefData // More data in dense file than sparse file
|
||||||
|
default: |
||||||
|
return n, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (sr sparseFileReader) LogicalRemaining() int64 { |
||||||
|
return sr.sp[len(sr.sp)-1].endOffset() - sr.pos |
||||||
|
} |
||||||
|
func (sr sparseFileReader) PhysicalRemaining() int64 { |
||||||
|
return sr.fr.PhysicalRemaining() |
||||||
|
} |
||||||
|
|
||||||
|
type zeroReader struct{} |
||||||
|
|
||||||
|
func (zeroReader) Read(b []byte) (int, error) { |
||||||
|
for i := range b { |
||||||
|
b[i] = 0 |
||||||
|
} |
||||||
|
return len(b), nil |
||||||
|
} |
||||||
|
|
||||||
|
// mustReadFull is like io.ReadFull except it returns
|
||||||
|
// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
|
||||||
|
func mustReadFull(r io.Reader, b []byte) (int, error) { |
||||||
|
n, err := tryReadFull(r, b) |
||||||
|
if err == io.EOF { |
||||||
|
err = io.ErrUnexpectedEOF |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// tryReadFull is like io.ReadFull except it returns
|
||||||
|
// io.EOF when it is hit before len(b) bytes are read.
|
||||||
|
func tryReadFull(r io.Reader, b []byte) (n int, err error) { |
||||||
|
for len(b) > n && err == nil { |
||||||
|
var nn int |
||||||
|
nn, err = r.Read(b[n:]) |
||||||
|
n += nn |
||||||
|
} |
||||||
|
if len(b) == n && err == io.EOF { |
||||||
|
err = nil |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// discard skips n bytes in r, reporting an error if unable to do so.
|
||||||
|
func discard(tr *Reader, n int64) error { |
||||||
|
var seekSkipped, copySkipped int64 |
||||||
|
var err error |
||||||
|
r := tr.r |
||||||
|
if tr.RawAccounting { |
||||||
|
|
||||||
|
copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n) |
||||||
|
goto out |
||||||
|
} |
||||||
|
|
||||||
|
// If possible, Seek to the last byte before the end of the data section.
|
||||||
|
// Do this because Seek is often lazy about reporting errors; this will mask
|
||||||
|
// the fact that the stream may be truncated. We can rely on the
|
||||||
|
// io.CopyN done shortly afterwards to trigger any IO errors.
|
||||||
|
if sr, ok := r.(io.Seeker); ok && n > 1 { |
||||||
|
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
|
||||||
|
// io.Seeker, but calling Seek always returns an error and performs
|
||||||
|
// no action. Thus, we try an innocent seek to the current position
|
||||||
|
// to see if Seek is really supported.
|
||||||
|
pos1, err := sr.Seek(0, io.SeekCurrent) |
||||||
|
if pos1 >= 0 && err == nil { |
||||||
|
// Seek seems supported, so perform the real Seek.
|
||||||
|
pos2, err := sr.Seek(n-1, io.SeekCurrent) |
||||||
|
if pos2 < 0 || err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
seekSkipped = pos2 - pos1 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
copySkipped, err = io.CopyN(ioutil.Discard, r, n-seekSkipped) |
||||||
|
out: |
||||||
|
if err == io.EOF && seekSkipped+copySkipped < n { |
||||||
|
err = io.ErrUnexpectedEOF |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,20 @@ |
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux dragonfly openbsd solaris
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"syscall" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
func statAtime(st *syscall.Stat_t) time.Time { |
||||||
|
return time.Unix(st.Atim.Unix()) |
||||||
|
} |
||||||
|
|
||||||
|
func statCtime(st *syscall.Stat_t) time.Time { |
||||||
|
return time.Unix(st.Ctim.Unix()) |
||||||
|
} |
@ -0,0 +1,20 @@ |
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin freebsd netbsd
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"syscall" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
func statAtime(st *syscall.Stat_t) time.Time { |
||||||
|
return time.Unix(st.Atimespec.Unix()) |
||||||
|
} |
||||||
|
|
||||||
|
func statCtime(st *syscall.Stat_t) time.Time { |
||||||
|
return time.Unix(st.Ctimespec.Unix()) |
||||||
|
} |
@ -0,0 +1,96 @@ |
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"os" |
||||||
|
"os/user" |
||||||
|
"runtime" |
||||||
|
"strconv" |
||||||
|
"sync" |
||||||
|
"syscall" |
||||||
|
) |
||||||
|
|
||||||
|
func init() { |
||||||
|
sysStat = statUnix |
||||||
|
} |
||||||
|
|
||||||
|
// userMap and groupMap caches UID and GID lookups for performance reasons.
|
||||||
|
// The downside is that renaming uname or gname by the OS never takes effect.
|
||||||
|
var userMap, groupMap sync.Map // map[int]string
|
||||||
|
|
||||||
|
func statUnix(fi os.FileInfo, h *Header) error { |
||||||
|
sys, ok := fi.Sys().(*syscall.Stat_t) |
||||||
|
if !ok { |
||||||
|
return nil |
||||||
|
} |
||||||
|
h.Uid = int(sys.Uid) |
||||||
|
h.Gid = int(sys.Gid) |
||||||
|
|
||||||
|
// Best effort at populating Uname and Gname.
|
||||||
|
// The os/user functions may fail for any number of reasons
|
||||||
|
// (not implemented on that platform, cgo not enabled, etc).
|
||||||
|
if u, ok := userMap.Load(h.Uid); ok { |
||||||
|
h.Uname = u.(string) |
||||||
|
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { |
||||||
|
h.Uname = u.Username |
||||||
|
userMap.Store(h.Uid, h.Uname) |
||||||
|
} |
||||||
|
if g, ok := groupMap.Load(h.Gid); ok { |
||||||
|
h.Gname = g.(string) |
||||||
|
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { |
||||||
|
h.Gname = g.Name |
||||||
|
groupMap.Store(h.Gid, h.Gname) |
||||||
|
} |
||||||
|
|
||||||
|
h.AccessTime = statAtime(sys) |
||||||
|
h.ChangeTime = statCtime(sys) |
||||||
|
|
||||||
|
// Best effort at populating Devmajor and Devminor.
|
||||||
|
if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { |
||||||
|
dev := uint64(sys.Rdev) // May be int32 or uint32
|
||||||
|
switch runtime.GOOS { |
||||||
|
case "linux": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_linux.go.
|
||||||
|
major := uint32((dev & 0x00000000000fff00) >> 8) |
||||||
|
major |= uint32((dev & 0xfffff00000000000) >> 32) |
||||||
|
minor := uint32((dev & 0x00000000000000ff) >> 0) |
||||||
|
minor |= uint32((dev & 0x00000ffffff00000) >> 12) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
case "darwin": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_darwin.go.
|
||||||
|
major := uint32((dev >> 24) & 0xff) |
||||||
|
minor := uint32(dev & 0xffffff) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
case "dragonfly": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_dragonfly.go.
|
||||||
|
major := uint32((dev >> 8) & 0xff) |
||||||
|
minor := uint32(dev & 0xffff00ff) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
case "freebsd": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_freebsd.go.
|
||||||
|
major := uint32((dev >> 8) & 0xff) |
||||||
|
minor := uint32(dev & 0xffff00ff) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
case "netbsd": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_netbsd.go.
|
||||||
|
major := uint32((dev & 0x000fff00) >> 8) |
||||||
|
minor := uint32((dev & 0x000000ff) >> 0) |
||||||
|
minor |= uint32((dev & 0xfff00000) >> 12) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
case "openbsd": |
||||||
|
// Copied from golang.org/x/sys/unix/dev_openbsd.go.
|
||||||
|
major := uint32((dev & 0x0000ff00) >> 8) |
||||||
|
minor := uint32((dev & 0x000000ff) >> 0) |
||||||
|
minor |= uint32((dev & 0xffff0000) >> 8) |
||||||
|
h.Devmajor, h.Devminor = int64(major), int64(minor) |
||||||
|
default: |
||||||
|
// TODO: Implement solaris (see https://golang.org/issue/8106)
|
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,326 @@ |
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"fmt" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// hasNUL reports whether the NUL character exists within s.
|
||||||
|
func hasNUL(s string) bool { |
||||||
|
return strings.IndexByte(s, 0) >= 0 |
||||||
|
} |
||||||
|
|
||||||
|
// isASCII reports whether the input is an ASCII C-style string.
|
||||||
|
func isASCII(s string) bool { |
||||||
|
for _, c := range s { |
||||||
|
if c >= 0x80 || c == 0x00 { |
||||||
|
return false |
||||||
|
} |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// toASCII converts the input to an ASCII C-style string.
|
||||||
|
// This a best effort conversion, so invalid characters are dropped.
|
||||||
|
func toASCII(s string) string { |
||||||
|
if isASCII(s) { |
||||||
|
return s |
||||||
|
} |
||||||
|
b := make([]byte, 0, len(s)) |
||||||
|
for _, c := range s { |
||||||
|
if c < 0x80 && c != 0x00 { |
||||||
|
b = append(b, byte(c)) |
||||||
|
} |
||||||
|
} |
||||||
|
return string(b) |
||||||
|
} |
||||||
|
|
||||||
|
type parser struct { |
||||||
|
err error // Last error seen
|
||||||
|
} |
||||||
|
|
||||||
|
type formatter struct { |
||||||
|
err error // Last error seen
|
||||||
|
} |
||||||
|
|
||||||
|
// parseString parses bytes as a NUL-terminated C-style string.
|
||||||
|
// If a NUL byte is not found then the whole slice is returned as a string.
|
||||||
|
func (*parser) parseString(b []byte) string { |
||||||
|
if i := bytes.IndexByte(b, 0); i >= 0 { |
||||||
|
return string(b[:i]) |
||||||
|
} |
||||||
|
return string(b) |
||||||
|
} |
||||||
|
|
||||||
|
// formatString copies s into b, NUL-terminating if possible.
|
||||||
|
func (f *formatter) formatString(b []byte, s string) { |
||||||
|
if len(s) > len(b) { |
||||||
|
f.err = ErrFieldTooLong |
||||||
|
} |
||||||
|
copy(b, s) |
||||||
|
if len(s) < len(b) { |
||||||
|
b[len(s)] = 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Some buggy readers treat regular files with a trailing slash
|
||||||
|
// in the V7 path field as a directory even though the full path
|
||||||
|
// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
|
||||||
|
if len(s) > len(b) && b[len(b)-1] == '/' { |
||||||
|
n := len(strings.TrimRight(s[:len(b)], "/")) |
||||||
|
b[n] = 0 // Replace trailing slash with NUL terminator
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||||
|
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||||
|
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||||
|
//
|
||||||
|
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||||
|
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||||
|
// equivalent to the sign bit in two's complement form.
|
||||||
|
func fitsInBase256(n int, x int64) bool { |
||||||
|
binBits := uint(n-1) * 8 |
||||||
|
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits) |
||||||
|
} |
||||||
|
|
||||||
|
// parseNumeric parses the input as being encoded in either base-256 or octal.
|
||||||
|
// This function may return negative numbers.
|
||||||
|
// If parsing fails or an integer overflow occurs, err will be set.
|
||||||
|
func (p *parser) parseNumeric(b []byte) int64 { |
||||||
|
// Check for base-256 (binary) format first.
|
||||||
|
// If the first bit is set, then all following bits constitute a two's
|
||||||
|
// complement encoded number in big-endian byte order.
|
||||||
|
if len(b) > 0 && b[0]&0x80 != 0 { |
||||||
|
// Handling negative numbers relies on the following identity:
|
||||||
|
// -a-1 == ^a
|
||||||
|
//
|
||||||
|
// If the number is negative, we use an inversion mask to invert the
|
||||||
|
// data bytes and treat the value as an unsigned number.
|
||||||
|
var inv byte // 0x00 if positive or zero, 0xff if negative
|
||||||
|
if b[0]&0x40 != 0 { |
||||||
|
inv = 0xff |
||||||
|
} |
||||||
|
|
||||||
|
var x uint64 |
||||||
|
for i, c := range b { |
||||||
|
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
|
||||||
|
if i == 0 { |
||||||
|
c &= 0x7f // Ignore signal bit in first byte
|
||||||
|
} |
||||||
|
if (x >> 56) > 0 { |
||||||
|
p.err = ErrHeader // Integer overflow
|
||||||
|
return 0 |
||||||
|
} |
||||||
|
x = x<<8 | uint64(c) |
||||||
|
} |
||||||
|
if (x >> 63) > 0 { |
||||||
|
p.err = ErrHeader // Integer overflow
|
||||||
|
return 0 |
||||||
|
} |
||||||
|
if inv == 0xff { |
||||||
|
return ^int64(x) |
||||||
|
} |
||||||
|
return int64(x) |
||||||
|
} |
||||||
|
|
||||||
|
// Normal case is base-8 (octal) format.
|
||||||
|
return p.parseOctal(b) |
||||||
|
} |
||||||
|
|
||||||
|
// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
|
||||||
|
// Otherwise it will attempt to use base-256 (binary) encoding.
|
||||||
|
func (f *formatter) formatNumeric(b []byte, x int64) { |
||||||
|
if fitsInOctal(len(b), x) { |
||||||
|
f.formatOctal(b, x) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
if fitsInBase256(len(b), x) { |
||||||
|
for i := len(b) - 1; i >= 0; i-- { |
||||||
|
b[i] = byte(x) |
||||||
|
x >>= 8 |
||||||
|
} |
||||||
|
b[0] |= 0x80 // Highest bit indicates binary format
|
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
f.formatOctal(b, 0) // Last resort, just write zero
|
||||||
|
f.err = ErrFieldTooLong |
||||||
|
} |
||||||
|
|
||||||
|
func (p *parser) parseOctal(b []byte) int64 { |
||||||
|
// Because unused fields are filled with NULs, we need
|
||||||
|
// to skip leading NULs. Fields may also be padded with
|
||||||
|
// spaces or NULs.
|
||||||
|
// So we remove leading and trailing NULs and spaces to
|
||||||
|
// be sure.
|
||||||
|
b = bytes.Trim(b, " \x00") |
||||||
|
|
||||||
|
if len(b) == 0 { |
||||||
|
return 0 |
||||||
|
} |
||||||
|
x, perr := strconv.ParseUint(p.parseString(b), 8, 64) |
||||||
|
if perr != nil { |
||||||
|
p.err = ErrHeader |
||||||
|
} |
||||||
|
return int64(x) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *formatter) formatOctal(b []byte, x int64) { |
||||||
|
if !fitsInOctal(len(b), x) { |
||||||
|
x = 0 // Last resort, just write zero
|
||||||
|
f.err = ErrFieldTooLong |
||||||
|
} |
||||||
|
|
||||||
|
s := strconv.FormatInt(x, 8) |
||||||
|
// Add leading zeros, but leave room for a NUL.
|
||||||
|
if n := len(b) - len(s) - 1; n > 0 { |
||||||
|
s = strings.Repeat("0", n) + s |
||||||
|
} |
||||||
|
f.formatString(b, s) |
||||||
|
} |
||||||
|
|
||||||
|
// fitsInOctal reports whether the integer x fits in a field n-bytes long
|
||||||
|
// using octal encoding with the appropriate NUL terminator.
|
||||||
|
func fitsInOctal(n int, x int64) bool { |
||||||
|
octBits := uint(n-1) * 3 |
||||||
|
return x >= 0 && (n >= 22 || x < 1<<octBits) |
||||||
|
} |
||||||
|
|
||||||
|
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||||
|
// specification. Note that this implementation allows for negative timestamps,
|
||||||
|
// which is allowed for by the PAX specification, but not always portable.
|
||||||
|
func parsePAXTime(s string) (time.Time, error) { |
||||||
|
const maxNanoSecondDigits = 9 |
||||||
|
|
||||||
|
// Split string into seconds and sub-seconds parts.
|
||||||
|
ss, sn := s, "" |
||||||
|
if pos := strings.IndexByte(s, '.'); pos >= 0 { |
||||||
|
ss, sn = s[:pos], s[pos+1:] |
||||||
|
} |
||||||
|
|
||||||
|
// Parse the seconds.
|
||||||
|
secs, err := strconv.ParseInt(ss, 10, 64) |
||||||
|
if err != nil { |
||||||
|
return time.Time{}, ErrHeader |
||||||
|
} |
||||||
|
if len(sn) == 0 { |
||||||
|
return time.Unix(secs, 0), nil // No sub-second values
|
||||||
|
} |
||||||
|
|
||||||
|
// Parse the nanoseconds.
|
||||||
|
if strings.Trim(sn, "0123456789") != "" { |
||||||
|
return time.Time{}, ErrHeader |
||||||
|
} |
||||||
|
if len(sn) < maxNanoSecondDigits { |
||||||
|
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||||
|
} else { |
||||||
|
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||||
|
} |
||||||
|
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||||
|
if len(ss) > 0 && ss[0] == '-' { |
||||||
|
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||||
|
} |
||||||
|
return time.Unix(secs, nsecs), nil |
||||||
|
} |
||||||
|
|
||||||
|
// formatPAXTime converts ts into a time of the form %d.%d as described in the
|
||||||
|
// PAX specification. This function is capable of negative timestamps.
|
||||||
|
func formatPAXTime(ts time.Time) (s string) { |
||||||
|
secs, nsecs := ts.Unix(), ts.Nanosecond() |
||||||
|
if nsecs == 0 { |
||||||
|
return strconv.FormatInt(secs, 10) |
||||||
|
} |
||||||
|
|
||||||
|
// If seconds is negative, then perform correction.
|
||||||
|
sign := "" |
||||||
|
if secs < 0 { |
||||||
|
sign = "-" // Remember sign
|
||||||
|
secs = -(secs + 1) // Add a second to secs
|
||||||
|
nsecs = -(nsecs - 1E9) // Take that second away from nsecs
|
||||||
|
} |
||||||
|
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") |
||||||
|
} |
||||||
|
|
||||||
|
// parsePAXRecord parses the input PAX record string into a key-value pair.
|
||||||
|
// If parsing is successful, it will slice off the currently read record and
|
||||||
|
// return the remainder as r.
|
||||||
|
func parsePAXRecord(s string) (k, v, r string, err error) { |
||||||
|
// The size field ends at the first space.
|
||||||
|
sp := strings.IndexByte(s, ' ') |
||||||
|
if sp == -1 { |
||||||
|
return "", "", s, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// Parse the first token as a decimal integer.
|
||||||
|
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
|
||||||
|
if perr != nil || n < 5 || int64(len(s)) < n { |
||||||
|
return "", "", s, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// Extract everything between the space and the final newline.
|
||||||
|
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] |
||||||
|
if nl != "\n" { |
||||||
|
return "", "", s, ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
// The first equals separates the key from the value.
|
||||||
|
eq := strings.IndexByte(rec, '=') |
||||||
|
if eq == -1 { |
||||||
|
return "", "", s, ErrHeader |
||||||
|
} |
||||||
|
k, v = rec[:eq], rec[eq+1:] |
||||||
|
|
||||||
|
if !validPAXRecord(k, v) { |
||||||
|
return "", "", s, ErrHeader |
||||||
|
} |
||||||
|
return k, v, rem, nil |
||||||
|
} |
||||||
|
|
||||||
|
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||||
|
// appropriate length.
|
||||||
|
func formatPAXRecord(k, v string) (string, error) { |
||||||
|
if !validPAXRecord(k, v) { |
||||||
|
return "", ErrHeader |
||||||
|
} |
||||||
|
|
||||||
|
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||||
|
size := len(k) + len(v) + padding |
||||||
|
size += len(strconv.Itoa(size)) |
||||||
|
record := strconv.Itoa(size) + " " + k + "=" + v + "\n" |
||||||
|
|
||||||
|
// Final adjustment if adding size field increased the record size.
|
||||||
|
if len(record) != size { |
||||||
|
size = len(record) |
||||||
|
record = strconv.Itoa(size) + " " + k + "=" + v + "\n" |
||||||
|
} |
||||||
|
return record, nil |
||||||
|
} |
||||||
|
|
||||||
|
// validPAXRecord reports whether the key-value pair is valid where each
|
||||||
|
// record is formatted as:
|
||||||
|
// "%d %s=%s\n" % (size, key, value)
|
||||||
|
//
|
||||||
|
// Keys and values should be UTF-8, but the number of bad writers out there
|
||||||
|
// forces us to be a more liberal.
|
||||||
|
// Thus, we only reject all keys with NUL, and only reject NULs in values
|
||||||
|
// for the PAX version of the USTAR string fields.
|
||||||
|
// The key must not contain an '=' character.
|
||||||
|
func validPAXRecord(k, v string) bool { |
||||||
|
if k == "" || strings.IndexByte(k, '=') >= 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
switch k { |
||||||
|
case paxPath, paxLinkpath, paxUname, paxGname: |
||||||
|
return !hasNUL(v) |
||||||
|
default: |
||||||
|
return !hasNUL(k) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,653 @@ |
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tar |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"path" |
||||||
|
"sort" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// Writer provides sequential writing of a tar archive.
|
||||||
|
// Write.WriteHeader begins a new file with the provided Header,
|
||||||
|
// and then Writer can be treated as an io.Writer to supply that file's data.
|
||||||
|
type Writer struct { |
||||||
|
w io.Writer |
||||||
|
pad int64 // Amount of padding to write after current file entry
|
||||||
|
curr fileWriter // Writer for current file entry
|
||||||
|
hdr Header // Shallow copy of Header that is safe for mutations
|
||||||
|
blk block // Buffer to use as temporary local storage
|
||||||
|
|
||||||
|
// err is a persistent error.
|
||||||
|
// It is only the responsibility of every exported method of Writer to
|
||||||
|
// ensure that this error is sticky.
|
||||||
|
err error |
||||||
|
} |
||||||
|
|
||||||
|
// NewWriter creates a new Writer writing to w.
|
||||||
|
func NewWriter(w io.Writer) *Writer { |
||||||
|
return &Writer{w: w, curr: ®FileWriter{w, 0}} |
||||||
|
} |
||||||
|
|
||||||
|
type fileWriter interface { |
||||||
|
io.Writer |
||||||
|
fileState |
||||||
|
|
||||||
|
ReadFrom(io.Reader) (int64, error) |
||||||
|
} |
||||||
|
|
||||||
|
// Flush finishes writing the current file's block padding.
|
||||||
|
// The current file must be fully written before Flush can be called.
|
||||||
|
//
|
||||||
|
// This is unnecessary as the next call to WriteHeader or Close
|
||||||
|
// will implicitly flush out the file's padding.
|
||||||
|
func (tw *Writer) Flush() error { |
||||||
|
if tw.err != nil { |
||||||
|
return tw.err |
||||||
|
} |
||||||
|
if nb := tw.curr.LogicalRemaining(); nb > 0 { |
||||||
|
return fmt.Errorf("archive/tar: missed writing %d bytes", nb) |
||||||
|
} |
||||||
|
if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { |
||||||
|
return tw.err |
||||||
|
} |
||||||
|
tw.pad = 0 |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||||
|
// The Header.Size determines how many bytes can be written for the next file.
|
||||||
|
// If the current file is not fully written, then this returns an error.
|
||||||
|
// This implicitly flushes any padding necessary before writing the header.
|
||||||
|
func (tw *Writer) WriteHeader(hdr *Header) error { |
||||||
|
if err := tw.Flush(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
tw.hdr = *hdr // Shallow copy of Header
|
||||||
|
|
||||||
|
// Avoid usage of the legacy TypeRegA flag, and automatically promote
|
||||||
|
// it to use TypeReg or TypeDir.
|
||||||
|
if tw.hdr.Typeflag == TypeRegA { |
||||||
|
if strings.HasSuffix(tw.hdr.Name, "/") { |
||||||
|
tw.hdr.Typeflag = TypeDir |
||||||
|
} else { |
||||||
|
tw.hdr.Typeflag = TypeReg |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Round ModTime and ignore AccessTime and ChangeTime unless
|
||||||
|
// the format is explicitly chosen.
|
||||||
|
// This ensures nominal usage of WriteHeader (without specifying the format)
|
||||||
|
// does not always result in the PAX format being chosen, which
|
||||||
|
// causes a 1KiB increase to every header.
|
||||||
|
if tw.hdr.Format == FormatUnknown { |
||||||
|
tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) |
||||||
|
tw.hdr.AccessTime = time.Time{} |
||||||
|
tw.hdr.ChangeTime = time.Time{} |
||||||
|
} |
||||||
|
|
||||||
|
allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() |
||||||
|
switch { |
||||||
|
case allowedFormats.has(FormatUSTAR): |
||||||
|
tw.err = tw.writeUSTARHeader(&tw.hdr) |
||||||
|
return tw.err |
||||||
|
case allowedFormats.has(FormatPAX): |
||||||
|
tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) |
||||||
|
return tw.err |
||||||
|
case allowedFormats.has(FormatGNU): |
||||||
|
tw.err = tw.writeGNUHeader(&tw.hdr) |
||||||
|
return tw.err |
||||||
|
default: |
||||||
|
return err // Non-fatal error
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (tw *Writer) writeUSTARHeader(hdr *Header) error { |
||||||
|
// Check if we can use USTAR prefix/suffix splitting.
|
||||||
|
var namePrefix string |
||||||
|
if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { |
||||||
|
namePrefix, hdr.Name = prefix, suffix |
||||||
|
} |
||||||
|
|
||||||
|
// Pack the main header.
|
||||||
|
var f formatter |
||||||
|
blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) |
||||||
|
f.formatString(blk.USTAR().Prefix(), namePrefix) |
||||||
|
blk.SetFormat(FormatUSTAR) |
||||||
|
if f.err != nil { |
||||||
|
return f.err // Should never happen since header is validated
|
||||||
|
} |
||||||
|
return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) |
||||||
|
} |
||||||
|
|
||||||
|
func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { |
||||||
|
realName, realSize := hdr.Name, hdr.Size |
||||||
|
|
||||||
|
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
/* |
||||||
|
// Handle sparse files.
|
||||||
|
var spd sparseDatas |
||||||
|
var spb []byte |
||||||
|
if len(hdr.SparseHoles) > 0 { |
||||||
|
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
|
||||||
|
sph = alignSparseEntries(sph, hdr.Size) |
||||||
|
spd = invertSparseEntries(sph, hdr.Size) |
||||||
|
|
||||||
|
// Format the sparse map.
|
||||||
|
hdr.Size = 0 // Replace with encoded size
|
||||||
|
spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') |
||||||
|
for _, s := range spd { |
||||||
|
hdr.Size += s.Length |
||||||
|
spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') |
||||||
|
spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') |
||||||
|
} |
||||||
|
pad := blockPadding(int64(len(spb))) |
||||||
|
spb = append(spb, zeroBlock[:pad]...) |
||||||
|
hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
|
||||||
|
|
||||||
|
// Add and modify appropriate PAX records.
|
||||||
|
dir, file := path.Split(realName) |
||||||
|
hdr.Name = path.Join(dir, "GNUSparseFile.0", file) |
||||||
|
paxHdrs[paxGNUSparseMajor] = "1" |
||||||
|
paxHdrs[paxGNUSparseMinor] = "0" |
||||||
|
paxHdrs[paxGNUSparseName] = realName |
||||||
|
paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) |
||||||
|
paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) |
||||||
|
delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
|
||||||
|
} |
||||||
|
*/ |
||||||
|
_ = realSize |
||||||
|
|
||||||
|
// Write PAX records to the output.
|
||||||
|
isGlobal := hdr.Typeflag == TypeXGlobalHeader |
||||||
|
if len(paxHdrs) > 0 || isGlobal { |
||||||
|
// Sort keys for deterministic ordering.
|
||||||
|
var keys []string |
||||||
|
for k := range paxHdrs { |
||||||
|
keys = append(keys, k) |
||||||
|
} |
||||||
|
sort.Strings(keys) |
||||||
|
|
||||||
|
// Write each record to a buffer.
|
||||||
|
var buf strings.Builder |
||||||
|
for _, k := range keys { |
||||||
|
rec, err := formatPAXRecord(k, paxHdrs[k]) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
buf.WriteString(rec) |
||||||
|
} |
||||||
|
|
||||||
|
// Write the extended header file.
|
||||||
|
var name string |
||||||
|
var flag byte |
||||||
|
if isGlobal { |
||||||
|
name = realName |
||||||
|
if name == "" { |
||||||
|
name = "GlobalHead.0.0" |
||||||
|
} |
||||||
|
flag = TypeXGlobalHeader |
||||||
|
} else { |
||||||
|
dir, file := path.Split(realName) |
||||||
|
name = path.Join(dir, "PaxHeaders.0", file) |
||||||
|
flag = TypeXHeader |
||||||
|
} |
||||||
|
data := buf.String() |
||||||
|
if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { |
||||||
|
return err // Global headers return here
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Pack the main header.
|
||||||
|
var f formatter // Ignore errors since they are expected
|
||||||
|
fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } |
||||||
|
blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) |
||||||
|
blk.SetFormat(FormatPAX) |
||||||
|
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
/* |
||||||
|
// Write the sparse map and setup the sparse writer if necessary.
|
||||||
|
if len(spd) > 0 { |
||||||
|
// Use tw.curr since the sparse map is accounted for in hdr.Size.
|
||||||
|
if _, err := tw.curr.Write(spb); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
tw.curr = &sparseFileWriter{tw.curr, spd, 0} |
||||||
|
} |
||||||
|
*/ |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (tw *Writer) writeGNUHeader(hdr *Header) error { |
||||||
|
// Use long-link files if Name or Linkname exceeds the field size.
|
||||||
|
const longName = "././@LongLink" |
||||||
|
if len(hdr.Name) > nameSize { |
||||||
|
data := hdr.Name + "\x00" |
||||||
|
if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
if len(hdr.Linkname) > nameSize { |
||||||
|
data := hdr.Linkname + "\x00" |
||||||
|
if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Pack the main header.
|
||||||
|
var f formatter // Ignore errors since they are expected
|
||||||
|
var spd sparseDatas |
||||||
|
var spb []byte |
||||||
|
blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) |
||||||
|
if !hdr.AccessTime.IsZero() { |
||||||
|
f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) |
||||||
|
} |
||||||
|
if !hdr.ChangeTime.IsZero() { |
||||||
|
f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) |
||||||
|
} |
||||||
|
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
/* |
||||||
|
if hdr.Typeflag == TypeGNUSparse { |
||||||
|
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
|
||||||
|
sph = alignSparseEntries(sph, hdr.Size) |
||||||
|
spd = invertSparseEntries(sph, hdr.Size) |
||||||
|
|
||||||
|
// Format the sparse map.
|
||||||
|
formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { |
||||||
|
for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { |
||||||
|
f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) |
||||||
|
f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) |
||||||
|
sp = sp[1:] |
||||||
|
} |
||||||
|
if len(sp) > 0 { |
||||||
|
sa.IsExtended()[0] = 1 |
||||||
|
} |
||||||
|
return sp |
||||||
|
} |
||||||
|
sp2 := formatSPD(spd, blk.GNU().Sparse()) |
||||||
|
for len(sp2) > 0 { |
||||||
|
var spHdr block |
||||||
|
sp2 = formatSPD(sp2, spHdr.Sparse()) |
||||||
|
spb = append(spb, spHdr[:]...) |
||||||
|
} |
||||||
|
|
||||||
|
// Update size fields in the header block.
|
||||||
|
realSize := hdr.Size |
||||||
|
hdr.Size = 0 // Encoded size; does not account for encoded sparse map
|
||||||
|
for _, s := range spd { |
||||||
|
hdr.Size += s.Length |
||||||
|
} |
||||||
|
copy(blk.V7().Size(), zeroBlock[:]) // Reset field
|
||||||
|
f.formatNumeric(blk.V7().Size(), hdr.Size) |
||||||
|
f.formatNumeric(blk.GNU().RealSize(), realSize) |
||||||
|
} |
||||||
|
*/ |
||||||
|
blk.SetFormat(FormatGNU) |
||||||
|
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Write the extended sparse map and setup the sparse writer if necessary.
|
||||||
|
if len(spd) > 0 { |
||||||
|
// Use tw.w since the sparse map is not accounted for in hdr.Size.
|
||||||
|
if _, err := tw.w.Write(spb); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
tw.curr = &sparseFileWriter{tw.curr, spd, 0} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
type ( |
||||||
|
stringFormatter func([]byte, string) |
||||||
|
numberFormatter func([]byte, int64) |
||||||
|
) |
||||||
|
|
||||||
|
// templateV7Plus fills out the V7 fields of a block using values from hdr.
|
||||||
|
// It also fills out fields (uname, gname, devmajor, devminor) that are
|
||||||
|
// shared in the USTAR, PAX, and GNU formats using the provided formatters.
|
||||||
|
//
|
||||||
|
// The block returned is only valid until the next call to
|
||||||
|
// templateV7Plus or writeRawFile.
|
||||||
|
func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { |
||||||
|
tw.blk.Reset() |
||||||
|
|
||||||
|
modTime := hdr.ModTime |
||||||
|
if modTime.IsZero() { |
||||||
|
modTime = time.Unix(0, 0) |
||||||
|
} |
||||||
|
|
||||||
|
v7 := tw.blk.V7() |
||||||
|
v7.TypeFlag()[0] = hdr.Typeflag |
||||||
|
fmtStr(v7.Name(), hdr.Name) |
||||||
|
fmtStr(v7.LinkName(), hdr.Linkname) |
||||||
|
fmtNum(v7.Mode(), hdr.Mode) |
||||||
|
fmtNum(v7.UID(), int64(hdr.Uid)) |
||||||
|
fmtNum(v7.GID(), int64(hdr.Gid)) |
||||||
|
fmtNum(v7.Size(), hdr.Size) |
||||||
|
fmtNum(v7.ModTime(), modTime.Unix()) |
||||||
|
|
||||||
|
ustar := tw.blk.USTAR() |
||||||
|
fmtStr(ustar.UserName(), hdr.Uname) |
||||||
|
fmtStr(ustar.GroupName(), hdr.Gname) |
||||||
|
fmtNum(ustar.DevMajor(), hdr.Devmajor) |
||||||
|
fmtNum(ustar.DevMinor(), hdr.Devminor) |
||||||
|
|
||||||
|
return &tw.blk |
||||||
|
} |
||||||
|
|
||||||
|
// writeRawFile writes a minimal file with the given name and flag type.
|
||||||
|
// It uses format to encode the header format and will write data as the body.
|
||||||
|
// It uses default values for all of the other fields (as BSD and GNU tar does).
|
||||||
|
func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { |
||||||
|
tw.blk.Reset() |
||||||
|
|
||||||
|
// Best effort for the filename.
|
||||||
|
name = toASCII(name) |
||||||
|
if len(name) > nameSize { |
||||||
|
name = name[:nameSize] |
||||||
|
} |
||||||
|
name = strings.TrimRight(name, "/") |
||||||
|
|
||||||
|
var f formatter |
||||||
|
v7 := tw.blk.V7() |
||||||
|
v7.TypeFlag()[0] = flag |
||||||
|
f.formatString(v7.Name(), name) |
||||||
|
f.formatOctal(v7.Mode(), 0) |
||||||
|
f.formatOctal(v7.UID(), 0) |
||||||
|
f.formatOctal(v7.GID(), 0) |
||||||
|
f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
|
||||||
|
f.formatOctal(v7.ModTime(), 0) |
||||||
|
tw.blk.SetFormat(format) |
||||||
|
if f.err != nil { |
||||||
|
return f.err // Only occurs if size condition is violated
|
||||||
|
} |
||||||
|
|
||||||
|
// Write the header and data.
|
||||||
|
if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
_, err := io.WriteString(tw, data) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// writeRawHeader writes the value of blk, regardless of its value.
|
||||||
|
// It sets up the Writer such that it can accept a file of the given size.
|
||||||
|
// If the flag is a special header-only flag, then the size is treated as zero.
|
||||||
|
func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { |
||||||
|
if err := tw.Flush(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if _, err := tw.w.Write(blk[:]); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if isHeaderOnlyType(flag) { |
||||||
|
size = 0 |
||||||
|
} |
||||||
|
tw.curr = ®FileWriter{tw.w, size} |
||||||
|
tw.pad = blockPadding(size) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||||
|
// If the path is not splittable, then it will return ("", "", false).
|
||||||
|
func splitUSTARPath(name string) (prefix, suffix string, ok bool) { |
||||||
|
length := len(name) |
||||||
|
if length <= nameSize || !isASCII(name) { |
||||||
|
return "", "", false |
||||||
|
} else if length > prefixSize+1 { |
||||||
|
length = prefixSize + 1 |
||||||
|
} else if name[length-1] == '/' { |
||||||
|
length-- |
||||||
|
} |
||||||
|
|
||||||
|
i := strings.LastIndex(name[:length], "/") |
||||||
|
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||||
|
plen := i // plen is length of prefix
|
||||||
|
if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { |
||||||
|
return "", "", false |
||||||
|
} |
||||||
|
return name[:i], name[i+1:], true |
||||||
|
} |
||||||
|
|
||||||
|
// Write writes to the current file in the tar archive.
|
||||||
|
// Write returns the error ErrWriteTooLong if more than
|
||||||
|
// Header.Size bytes are written after WriteHeader.
|
||||||
|
//
|
||||||
|
// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
|
||||||
|
// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
|
||||||
|
// of what the Header.Size claims.
|
||||||
|
func (tw *Writer) Write(b []byte) (int, error) { |
||||||
|
if tw.err != nil { |
||||||
|
return 0, tw.err |
||||||
|
} |
||||||
|
n, err := tw.curr.Write(b) |
||||||
|
if err != nil && err != ErrWriteTooLong { |
||||||
|
tw.err = err |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// readFrom populates the content of the current file by reading from r.
|
||||||
|
// The bytes read must match the number of remaining bytes in the current file.
|
||||||
|
//
|
||||||
|
// If the current file is sparse and r is an io.ReadSeeker,
|
||||||
|
// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
|
||||||
|
// assuming that skipped regions are all NULs.
|
||||||
|
// This always reads the last byte to ensure r is the right size.
|
||||||
|
//
|
||||||
|
// TODO(dsnet): Re-export this when adding sparse file support.
|
||||||
|
// See https://golang.org/issue/22735
|
||||||
|
func (tw *Writer) readFrom(r io.Reader) (int64, error) { |
||||||
|
if tw.err != nil { |
||||||
|
return 0, tw.err |
||||||
|
} |
||||||
|
n, err := tw.curr.ReadFrom(r) |
||||||
|
if err != nil && err != ErrWriteTooLong { |
||||||
|
tw.err = err |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// Close closes the tar archive by flushing the padding, and writing the footer.
|
||||||
|
// If the current file (from a prior call to WriteHeader) is not fully written,
|
||||||
|
// then this returns an error.
|
||||||
|
func (tw *Writer) Close() error { |
||||||
|
if tw.err == ErrWriteAfterClose { |
||||||
|
return nil |
||||||
|
} |
||||||
|
if tw.err != nil { |
||||||
|
return tw.err |
||||||
|
} |
||||||
|
|
||||||
|
// Trailer: two zero blocks.
|
||||||
|
err := tw.Flush() |
||||||
|
for i := 0; i < 2 && err == nil; i++ { |
||||||
|
_, err = tw.w.Write(zeroBlock[:]) |
||||||
|
} |
||||||
|
|
||||||
|
// Ensure all future actions are invalid.
|
||||||
|
tw.err = ErrWriteAfterClose |
||||||
|
return err // Report IO errors
|
||||||
|
} |
||||||
|
|
||||||
|
// regFileWriter is a fileWriter for writing data to a regular file entry.
|
||||||
|
type regFileWriter struct { |
||||||
|
w io.Writer // Underlying Writer
|
||||||
|
nb int64 // Number of remaining bytes to write
|
||||||
|
} |
||||||
|
|
||||||
|
func (fw *regFileWriter) Write(b []byte) (n int, err error) { |
||||||
|
overwrite := int64(len(b)) > fw.nb |
||||||
|
if overwrite { |
||||||
|
b = b[:fw.nb] |
||||||
|
} |
||||||
|
if len(b) > 0 { |
||||||
|
n, err = fw.w.Write(b) |
||||||
|
fw.nb -= int64(n) |
||||||
|
} |
||||||
|
switch { |
||||||
|
case err != nil: |
||||||
|
return n, err |
||||||
|
case overwrite: |
||||||
|
return n, ErrWriteTooLong |
||||||
|
default: |
||||||
|
return n, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { |
||||||
|
return io.Copy(struct{ io.Writer }{fw}, r) |
||||||
|
} |
||||||
|
|
||||||
|
func (fw regFileWriter) LogicalRemaining() int64 { |
||||||
|
return fw.nb |
||||||
|
} |
||||||
|
func (fw regFileWriter) PhysicalRemaining() int64 { |
||||||
|
return fw.nb |
||||||
|
} |
||||||
|
|
||||||
|
// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
|
||||||
|
type sparseFileWriter struct { |
||||||
|
fw fileWriter // Underlying fileWriter
|
||||||
|
sp sparseDatas // Normalized list of data fragments
|
||||||
|
pos int64 // Current position in sparse file
|
||||||
|
} |
||||||
|
|
||||||
|
func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { |
||||||
|
overwrite := int64(len(b)) > sw.LogicalRemaining() |
||||||
|
if overwrite { |
||||||
|
b = b[:sw.LogicalRemaining()] |
||||||
|
} |
||||||
|
|
||||||
|
b0 := b |
||||||
|
endPos := sw.pos + int64(len(b)) |
||||||
|
for endPos > sw.pos && err == nil { |
||||||
|
var nf int // Bytes written in fragment
|
||||||
|
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() |
||||||
|
if sw.pos < dataStart { // In a hole fragment
|
||||||
|
bf := b[:min(int64(len(b)), dataStart-sw.pos)] |
||||||
|
nf, err = zeroWriter{}.Write(bf) |
||||||
|
} else { // In a data fragment
|
||||||
|
bf := b[:min(int64(len(b)), dataEnd-sw.pos)] |
||||||
|
nf, err = sw.fw.Write(bf) |
||||||
|
} |
||||||
|
b = b[nf:] |
||||||
|
sw.pos += int64(nf) |
||||||
|
if sw.pos >= dataEnd && len(sw.sp) > 1 { |
||||||
|
sw.sp = sw.sp[1:] // Ensure last fragment always remains
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
n = len(b0) - len(b) |
||||||
|
switch { |
||||||
|
case err == ErrWriteTooLong: |
||||||
|
return n, errMissData // Not possible; implies bug in validation logic
|
||||||
|
case err != nil: |
||||||
|
return n, err |
||||||
|
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: |
||||||
|
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||||
|
case overwrite: |
||||||
|
return n, ErrWriteTooLong |
||||||
|
default: |
||||||
|
return n, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { |
||||||
|
rs, ok := r.(io.ReadSeeker) |
||||||
|
if ok { |
||||||
|
if _, err := rs.Seek(0, io.SeekCurrent); err != nil { |
||||||
|
ok = false // Not all io.Seeker can really seek
|
||||||
|
} |
||||||
|
} |
||||||
|
if !ok { |
||||||
|
return io.Copy(struct{ io.Writer }{sw}, r) |
||||||
|
} |
||||||
|
|
||||||
|
var readLastByte bool |
||||||
|
pos0 := sw.pos |
||||||
|
for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { |
||||||
|
var nf int64 // Size of fragment
|
||||||
|
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() |
||||||
|
if sw.pos < dataStart { // In a hole fragment
|
||||||
|
nf = dataStart - sw.pos |
||||||
|
if sw.PhysicalRemaining() == 0 { |
||||||
|
readLastByte = true |
||||||
|
nf-- |
||||||
|
} |
||||||
|
_, err = rs.Seek(nf, io.SeekCurrent) |
||||||
|
} else { // In a data fragment
|
||||||
|
nf = dataEnd - sw.pos |
||||||
|
nf, err = io.CopyN(sw.fw, rs, nf) |
||||||
|
} |
||||||
|
sw.pos += nf |
||||||
|
if sw.pos >= dataEnd && len(sw.sp) > 1 { |
||||||
|
sw.sp = sw.sp[1:] // Ensure last fragment always remains
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// If the last fragment is a hole, then seek to 1-byte before EOF, and
|
||||||
|
// read a single byte to ensure the file is the right size.
|
||||||
|
if readLastByte && err == nil { |
||||||
|
_, err = mustReadFull(rs, []byte{0}) |
||||||
|
sw.pos++ |
||||||
|
} |
||||||
|
|
||||||
|
n = sw.pos - pos0 |
||||||
|
switch { |
||||||
|
case err == io.EOF: |
||||||
|
return n, io.ErrUnexpectedEOF |
||||||
|
case err == ErrWriteTooLong: |
||||||
|
return n, errMissData // Not possible; implies bug in validation logic
|
||||||
|
case err != nil: |
||||||
|
return n, err |
||||||
|
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: |
||||||
|
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||||
|
default: |
||||||
|
return n, ensureEOF(rs) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (sw sparseFileWriter) LogicalRemaining() int64 { |
||||||
|
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos |
||||||
|
} |
||||||
|
func (sw sparseFileWriter) PhysicalRemaining() int64 { |
||||||
|
return sw.fw.PhysicalRemaining() |
||||||
|
} |
||||||
|
|
||||||
|
// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
|
||||||
|
type zeroWriter struct{} |
||||||
|
|
||||||
|
func (zeroWriter) Write(b []byte) (int, error) { |
||||||
|
for i, c := range b { |
||||||
|
if c != 0 { |
||||||
|
return i, errWriteHole |
||||||
|
} |
||||||
|
} |
||||||
|
return len(b), nil |
||||||
|
} |
||||||
|
|
||||||
|
// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
|
||||||
|
func ensureEOF(r io.Reader) error { |
||||||
|
n, err := tryReadFull(r, []byte{0}) |
||||||
|
switch { |
||||||
|
case n > 0: |
||||||
|
return ErrWriteTooLong |
||||||
|
case err == io.EOF: |
||||||
|
return nil |
||||||
|
default: |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue