mirror of https://github.com/k3d-io/k3d
change: allow full K3s registry configuration (#1215)
parent
8d54019838
commit
dc9f05036a
@ -1,81 +0,0 @@ |
||||
/* |
||||
Copyright © 2020-2022 The k3d Author(s) |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in |
||||
all copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
THE SOFTWARE. |
||||
*/ |
||||
package k3s |
||||
|
||||
/* |
||||
* Copied from https://github.com/k3s-io/k3s/blob/cf8c101b705c7af20e2ed11df43beb4951e6d9dc/pkg/agent/templates/registry.go
|
||||
* .. to avoid pulling in k3s as a dependency |
||||
*/ |
||||
|
||||
// Mirror contains the config related to the registry mirror
|
||||
type Mirror struct { |
||||
// Endpoints are endpoints for a namespace. CRI plugin will try the endpoints
|
||||
// one by one until a working one is found. The endpoint must be a valid url
|
||||
// with host specified.
|
||||
// The scheme, host and path from the endpoint URL will be used.
|
||||
Endpoints []string `toml:"endpoint" json:"endpoint"` |
||||
} |
||||
|
||||
// AuthConfig contains the config related to authentication to a specific registry
|
||||
type AuthConfig struct { |
||||
// Username is the username to login the registry.
|
||||
Username string `toml:"username" json:"username"` |
||||
// Password is the password to login the registry.
|
||||
Password string `toml:"password" json:"password"` |
||||
// Auth is a base64 encoded string from the concatenation of the username,
|
||||
// a colon, and the password.
|
||||
Auth string `toml:"auth" json:"auth"` |
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `toml:"identitytoken" json:"identity_token"` |
||||
} |
||||
|
||||
// TLSConfig contains the CA/Cert/Key used for a registry
|
||||
type TLSConfig struct { |
||||
CAFile string `toml:"ca_file" json:"ca_file"` |
||||
CertFile string `toml:"cert_file" json:"cert_file"` |
||||
KeyFile string `toml:"key_file" json:"key_file"` |
||||
InsecureSkipVerify bool `toml:"insecure_skip_verify" json:"insecure_skip_verify"` |
||||
} |
||||
|
||||
// Registry is registry settings configured
|
||||
type Registry struct { |
||||
// Mirrors are namespace to mirror mapping for all namespaces.
|
||||
Mirrors map[string]Mirror `toml:"mirrors" json:"mirrors"` |
||||
// Configs are configs for each registry.
|
||||
// The key is the FDQN or IP of the registry.
|
||||
Configs map[string]RegistryConfig `toml:"configs" json:"configs"` |
||||
|
||||
// Auths are registry endpoint to auth config mapping. The registry endpoint must
|
||||
// be a valid url with host specified.
|
||||
// DEPRECATED: Use Configs instead. Remove in containerd 1.4.
|
||||
Auths map[string]AuthConfig `toml:"auths" json:"auths"` |
||||
} |
||||
|
||||
// RegistryConfig contains configuration used to communicate with the registry.
|
||||
type RegistryConfig struct { |
||||
// Auth contains information to authenticate to the registry.
|
||||
Auth *AuthConfig `toml:"auth" json:"auth"` |
||||
// TLS is a pair of CA/Cert/Key which then are used when creating the transport
|
||||
// that communicates with the registry.
|
||||
TLS *TLSConfig `toml:"tls" json:"tls"` |
||||
} |
@ -0,0 +1,202 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "[]" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright [yyyy] [name of copyright owner] |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
@ -0,0 +1,48 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package and provides helpers for adding Close to io.{Reader|Writer}.
|
||||
package and |
||||
|
||||
import ( |
||||
"io" |
||||
) |
||||
|
||||
// ReadCloser implements io.ReadCloser by reading from a particular io.Reader
|
||||
// and then calling the provided "Close()" method.
|
||||
type ReadCloser struct { |
||||
io.Reader |
||||
CloseFunc func() error |
||||
} |
||||
|
||||
var _ io.ReadCloser = (*ReadCloser)(nil) |
||||
|
||||
// Close implements io.ReadCloser
|
||||
func (rac *ReadCloser) Close() error { |
||||
return rac.CloseFunc() |
||||
} |
||||
|
||||
// WriteCloser implements io.WriteCloser by reading from a particular io.Writer
|
||||
// and then calling the provided "Close()" method.
|
||||
type WriteCloser struct { |
||||
io.Writer |
||||
CloseFunc func() error |
||||
} |
||||
|
||||
var _ io.WriteCloser = (*WriteCloser)(nil) |
||||
|
||||
// Close implements io.WriteCloser
|
||||
func (wac *WriteCloser) Close() error { |
||||
return wac.CloseFunc() |
||||
} |
97
vendor/github.com/google/go-containerregistry/internal/compression/compression.go
generated
vendored
97
vendor/github.com/google/go-containerregistry/internal/compression/compression.go
generated
vendored
@ -0,0 +1,97 @@ |
||||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compression abstracts over gzip and zstd.
|
||||
package compression |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip" |
||||
"github.com/google/go-containerregistry/internal/zstd" |
||||
"github.com/google/go-containerregistry/pkg/compression" |
||||
) |
||||
|
||||
// Opener represents e.g. opening a file.
|
||||
type Opener = func() (io.ReadCloser, error) |
||||
|
||||
// GetCompression detects whether an Opener is compressed and which algorithm is used.
|
||||
func GetCompression(opener Opener) (compression.Compression, error) { |
||||
rc, err := opener() |
||||
if err != nil { |
||||
return compression.None, err |
||||
} |
||||
defer rc.Close() |
||||
|
||||
cp, _, err := PeekCompression(rc) |
||||
if err != nil { |
||||
return compression.None, err |
||||
} |
||||
|
||||
return cp, nil |
||||
} |
||||
|
||||
// PeekCompression detects whether the input stream is compressed and which algorithm is used.
|
||||
//
|
||||
// If r implements Peek, we will use that directly, otherwise a small number
|
||||
// of bytes are buffered to Peek at the gzip/zstd header, and the returned
|
||||
// PeekReader can be used as a replacement for the consumed input io.Reader.
|
||||
func PeekCompression(r io.Reader) (compression.Compression, PeekReader, error) { |
||||
pr := intoPeekReader(r) |
||||
|
||||
if isGZip, _, err := checkHeader(pr, gzip.MagicHeader); err != nil { |
||||
return compression.None, pr, err |
||||
} else if isGZip { |
||||
return compression.GZip, pr, nil |
||||
} |
||||
|
||||
if isZStd, _, err := checkHeader(pr, zstd.MagicHeader); err != nil { |
||||
return compression.None, pr, err |
||||
} else if isZStd { |
||||
return compression.ZStd, pr, nil |
||||
} |
||||
|
||||
return compression.None, pr, nil |
||||
} |
||||
|
||||
// PeekReader is an io.Reader that also implements Peek a la bufio.Reader.
|
||||
type PeekReader interface { |
||||
io.Reader |
||||
Peek(n int) ([]byte, error) |
||||
} |
||||
|
||||
// IntoPeekReader creates a PeekReader from an io.Reader.
|
||||
// If the reader already has a Peek method, it will just return the passed reader.
|
||||
func intoPeekReader(r io.Reader) PeekReader { |
||||
if p, ok := r.(PeekReader); ok { |
||||
return p |
||||
} |
||||
|
||||
return bufio.NewReader(r) |
||||
} |
||||
|
||||
// CheckHeader checks whether the first bytes from a PeekReader match an expected header
|
||||
func checkHeader(pr PeekReader, expectedHeader []byte) (bool, PeekReader, error) { |
||||
header, err := pr.Peek(len(expectedHeader)) |
||||
if err != nil { |
||||
// https://github.com/google/go-containerregistry/issues/367
|
||||
if err == io.EOF { |
||||
return false, pr, nil |
||||
} |
||||
return false, pr, err |
||||
} |
||||
return bytes.Equal(header, expectedHeader), pr, nil |
||||
} |
@ -0,0 +1,118 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package gzip provides helper functions for interacting with gzipped streams.
|
||||
package gzip |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"compress/gzip" |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/and" |
||||
) |
||||
|
||||
// MagicHeader is the start of gzip files.
|
||||
var MagicHeader = []byte{'\x1f', '\x8b'} |
||||
|
||||
// ReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// This uses gzip.BestSpeed for the compression level.
|
||||
func ReadCloser(r io.ReadCloser) io.ReadCloser { |
||||
return ReadCloserLevel(r, gzip.BestSpeed) |
||||
} |
||||
|
||||
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// Refer to compress/gzip for the level:
|
||||
// https://golang.org/pkg/compress/gzip/#pkg-constants
|
||||
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { |
||||
pr, pw := io.Pipe() |
||||
|
||||
// For highly compressible layers, gzip.Writer will output a very small
|
||||
// number of bytes per Write(). This is normally fine, but when pushing
|
||||
// to a registry, we want to ensure that we're taking full advantage of
|
||||
// the available bandwidth instead of sending tons of tiny writes over
|
||||
// the wire.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(pw, 2<<16) |
||||
|
||||
// Returns err so we can pw.CloseWithError(err)
|
||||
go func() error { |
||||
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
|
||||
// Context: https://golang.org/issue/24283
|
||||
gw, err := gzip.NewWriterLevel(bw, level) |
||||
if err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
if _, err := io.Copy(gw, r); err != nil { |
||||
defer r.Close() |
||||
defer gw.Close() |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// Close gzip writer to Flush it and write gzip trailers.
|
||||
if err := gw.Close(); err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// Flush bufio writer to ensure we write out everything.
|
||||
if err := bw.Flush(); err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// We don't really care if these fail.
|
||||
defer pw.Close() |
||||
defer r.Close() |
||||
|
||||
return nil |
||||
}() |
||||
|
||||
return pr |
||||
} |
||||
|
||||
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which uncompressed data may be read.
|
||||
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { |
||||
gr, err := gzip.NewReader(r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &and.ReadCloser{ |
||||
Reader: gr, |
||||
CloseFunc: func() error { |
||||
// If the unzip fails, then this seems to return the same
|
||||
// error as the read. We don't want this to interfere with
|
||||
// us closing the main ReadCloser, since this could leave
|
||||
// an open file descriptor (fails on Windows).
|
||||
gr.Close() |
||||
return r.Close() |
||||
}, |
||||
}, nil |
||||
} |
||||
|
||||
// Is detects whether the input stream is compressed.
|
||||
func Is(r io.Reader) (bool, error) { |
||||
magicHeader := make([]byte, 2) |
||||
n, err := r.Read(magicHeader) |
||||
if n == 0 && err == io.EOF { |
||||
return false, nil |
||||
} |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
return bytes.Equal(magicHeader, MagicHeader), nil |
||||
} |
@ -0,0 +1,89 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package redact contains a simple context signal for redacting requests.
|
||||
package redact |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"net/url" |
||||
) |
||||
|
||||
type contextKey string |
||||
|
||||
var redactKey = contextKey("redact") |
||||
|
||||
// NewContext creates a new ctx with the reason for redaction.
|
||||
func NewContext(ctx context.Context, reason string) context.Context { |
||||
return context.WithValue(ctx, redactKey, reason) |
||||
} |
||||
|
||||
// FromContext returns the redaction reason, if any.
|
||||
func FromContext(ctx context.Context) (bool, string) { |
||||
reason, ok := ctx.Value(redactKey).(string) |
||||
return ok, reason |
||||
} |
||||
|
||||
// Error redacts potentially sensitive query parameter values in the URL from the error's message.
|
||||
//
|
||||
// If the error is a *url.Error, this returns a *url.Error with the URL redacted.
|
||||
// Any other error type, or nil, is returned unchanged.
|
||||
func Error(err error) error { |
||||
// If the error is a url.Error, we can redact the URL.
|
||||
// Otherwise (including if err is nil), we can't redact.
|
||||
var uerr *url.Error |
||||
if ok := errors.As(err, &uerr); !ok { |
||||
return err |
||||
} |
||||
u, perr := url.Parse(uerr.URL) |
||||
if perr != nil { |
||||
return err // If the URL can't be parsed, just return the original error.
|
||||
} |
||||
uerr.URL = URL(u).String() // Update the URL to the redacted URL.
|
||||
return uerr |
||||
} |
||||
|
||||
// The set of query string keys that we expect to send as part of the registry
|
||||
// protocol. Anything else is potentially dangerous to leak, as it's probably
|
||||
// from a redirect. These redirects often included tokens or signed URLs.
|
||||
var paramAllowlist = map[string]struct{}{ |
||||
// Token exchange
|
||||
"scope": {}, |
||||
"service": {}, |
||||
// Cross-repo mounting
|
||||
"mount": {}, |
||||
"from": {}, |
||||
// Layer PUT
|
||||
"digest": {}, |
||||
// Listing tags and catalog
|
||||
"n": {}, |
||||
"last": {}, |
||||
} |
||||
|
||||
// URL redacts potentially sensitive query parameter values from the URL's query string.
|
||||
func URL(u *url.URL) *url.URL { |
||||
qs := u.Query() |
||||
for k, v := range qs { |
||||
for i := range v { |
||||
if _, ok := paramAllowlist[k]; !ok { |
||||
// key is not in the Allowlist
|
||||
v[i] = "REDACTED" |
||||
} |
||||
} |
||||
} |
||||
r := *u |
||||
r.RawQuery = qs.Encode() |
||||
return &r |
||||
} |
@ -0,0 +1,94 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides methods for retrying operations. It is a thin wrapper
|
||||
// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier.
|
||||
package retry |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
|
||||
"github.com/google/go-containerregistry/internal/retry/wait" |
||||
) |
||||
|
||||
// Backoff is an alias of our own wait.Backoff to avoid name conflicts with
|
||||
// the kubernetes wait package. Typing retry.Backoff is aesier than fixing
|
||||
// the wrong import every time you use wait.Backoff.
|
||||
type Backoff = wait.Backoff |
||||
|
||||
// This is implemented by several errors in the net package as well as our
|
||||
// transport.Error.
|
||||
type temporary interface { |
||||
Temporary() bool |
||||
} |
||||
|
||||
// IsTemporary returns true if err implements Temporary() and it returns true.
|
||||
func IsTemporary(err error) bool { |
||||
if errors.Is(err, context.DeadlineExceeded) { |
||||
return false |
||||
} |
||||
if te, ok := err.(temporary); ok && te.Temporary() { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// IsNotNil returns true if err is not nil.
|
||||
func IsNotNil(err error) bool { |
||||
return err != nil |
||||
} |
||||
|
||||
// Predicate determines whether an error should be retried.
|
||||
type Predicate func(error) (retry bool) |
||||
|
||||
// Retry retries a given function, f, until a predicate is satisfied, using
|
||||
// exponential backoff. If the predicate is never satisfied, it will return the
|
||||
// last error returned by f.
|
||||
func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { |
||||
if f == nil { |
||||
return fmt.Errorf("nil f passed to retry") |
||||
} |
||||
if p == nil { |
||||
return fmt.Errorf("nil p passed to retry") |
||||
} |
||||
|
||||
condition := func() (bool, error) { |
||||
err = f() |
||||
if p(err) { |
||||
return false, nil |
||||
} |
||||
return true, err |
||||
} |
||||
|
||||
wait.ExponentialBackoff(backoff, condition) |
||||
return |
||||
} |
||||
|
||||
type contextKey string |
||||
|
||||
var key = contextKey("never") |
||||
|
||||
// Never returns a context that signals something should not be retried.
|
||||
// This is a hack and can be used to communicate across package boundaries
|
||||
// to avoid retry amplification.
|
||||
func Never(ctx context.Context) context.Context { |
||||
return context.WithValue(ctx, key, true) |
||||
} |
||||
|
||||
// Ever returns true if the context was wrapped by Never.
|
||||
func Ever(ctx context.Context) bool { |
||||
return ctx.Value(key) == nil |
||||
} |
@ -0,0 +1,123 @@ |
||||
/* |
||||
Copyright 2014 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
// Package wait is a subset of k8s.io/apimachinery to avoid conflicts
|
||||
// in dependencies (specifically, logging).
|
||||
package wait |
||||
|
||||
import ( |
||||
"errors" |
||||
"math/rand" |
||||
"time" |
||||
) |
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
// This allows clients to avoid converging on periodic behavior. If maxFactor
|
||||
// is 0.0, a suggested default value will be chosen.
|
||||
func Jitter(duration time.Duration, maxFactor float64) time.Duration { |
||||
if maxFactor <= 0.0 { |
||||
maxFactor = 1.0 |
||||
} |
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) |
||||
return wait |
||||
} |
||||
|
||||
// ErrWaitTimeout is returned when the condition exited without success.
|
||||
var ErrWaitTimeout = errors.New("timed out waiting for the condition") |
||||
|
||||
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||
// if the loop should be aborted.
|
||||
type ConditionFunc func() (done bool, err error) |
||||
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct { |
||||
// The initial duration.
|
||||
Duration time.Duration |
||||
// Duration is multiplied by factor each iteration, if factor is not zero
|
||||
// and the limits imposed by Steps and Cap have not been reached.
|
||||
// Should not be negative.
|
||||
// The jitter does not contribute to the updates to the duration parameter.
|
||||
Factor float64 |
||||
// The sleep at each iteration is the duration plus an additional
|
||||
// amount chosen uniformly at random from the interval between
|
||||
// zero and `jitter*duration`.
|
||||
Jitter float64 |
||||
// The remaining number of iterations in which the duration
|
||||
// parameter may change (but progress can be stopped earlier by
|
||||
// hitting the cap). If not positive, the duration is not
|
||||
// changed. Used for exponential backoff in combination with
|
||||
// Factor and Cap.
|
||||
Steps int |
||||
// A limit on revised values of the duration parameter. If a
|
||||
// multiplication by the factor parameter would make the duration
|
||||
// exceed the cap then the duration is set to the cap and the
|
||||
// steps parameter is set to zero.
|
||||
Cap time.Duration |
||||
} |
||||
|
||||
// Step (1) returns an amount of time to sleep determined by the
|
||||
// original Duration and Jitter and (2) mutates the provided Backoff
|
||||
// to update its Steps and Duration.
|
||||
func (b *Backoff) Step() time.Duration { |
||||
if b.Steps < 1 { |
||||
if b.Jitter > 0 { |
||||
return Jitter(b.Duration, b.Jitter) |
||||
} |
||||
return b.Duration |
||||
} |
||||
b.Steps-- |
||||
|
||||
duration := b.Duration |
||||
|
||||
// calculate the next step
|
||||
if b.Factor != 0 { |
||||
b.Duration = time.Duration(float64(b.Duration) * b.Factor) |
||||
if b.Cap > 0 && b.Duration > b.Cap { |
||||
b.Duration = b.Cap |
||||
b.Steps = 0 |
||||
} |
||||
} |
||||
|
||||
if b.Jitter > 0 { |
||||
duration = Jitter(duration, b.Jitter) |
||||
} |
||||
return duration |
||||
} |
||||
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
|
||||
// to determine the length of the sleep and adjust Duration and Steps.
|
||||
// Stops and returns as soon as:
|
||||
// 1. the condition check returns true or an error,
|
||||
// 2. `backoff.Steps` checks of the condition have been done, or
|
||||
// 3. a sleep truncated by the cap on duration has been completed.
|
||||
// In case (1) the returned error is what the condition function returned.
|
||||
// In all other cases, ErrWaitTimeout is returned.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { |
||||
for backoff.Steps > 0 { |
||||
if ok, err := condition(); err != nil || ok { |
||||
return err |
||||
} |
||||
if backoff.Steps == 1 { |
||||
break |
||||
} |
||||
time.Sleep(backoff.Step()) |
||||
} |
||||
return ErrWaitTimeout |
||||
} |
@ -0,0 +1,122 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package verify provides a ReadCloser that verifies content matches the
|
||||
// expected hash values.
|
||||
package verify |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/hex" |
||||
"errors" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/and" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
) |
||||
|
||||
// SizeUnknown is a sentinel value to indicate that the expected size is not known.
|
||||
const SizeUnknown = -1 |
||||
|
||||
type verifyReader struct { |
||||
inner io.Reader |
||||
hasher hash.Hash |
||||
expected v1.Hash |
||||
gotSize, wantSize int64 |
||||
} |
||||
|
||||
// Error provides information about the failed hash verification.
|
||||
type Error struct { |
||||
got string |
||||
want v1.Hash |
||||
gotSize int64 |
||||
} |
||||
|
||||
func (v Error) Error() string { |
||||
return fmt.Sprintf("error verifying %s checksum after reading %d bytes; got %q, want %q", |
||||
v.want.Algorithm, v.gotSize, v.got, v.want) |
||||
} |
||||
|
||||
// Read implements io.Reader
|
||||
func (vc *verifyReader) Read(b []byte) (int, error) { |
||||
n, err := vc.inner.Read(b) |
||||
vc.gotSize += int64(n) |
||||
if err == io.EOF { |
||||
if vc.wantSize != SizeUnknown && vc.gotSize != vc.wantSize { |
||||
return n, fmt.Errorf("error verifying size; got %d, want %d", vc.gotSize, vc.wantSize) |
||||
} |
||||
got := hex.EncodeToString(vc.hasher.Sum(nil)) |
||||
if want := vc.expected.Hex; got != want { |
||||
return n, Error{ |
||||
got: vc.expected.Algorithm + ":" + got, |
||||
want: vc.expected, |
||||
gotSize: vc.gotSize, |
||||
} |
||||
} |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
// ReadCloser wraps the given io.ReadCloser to verify that its contents match
|
||||
// the provided v1.Hash before io.EOF is returned.
|
||||
//
|
||||
// The reader will only be read up to size bytes, to prevent resource
|
||||
// exhaustion. If EOF is returned before size bytes are read, an error is
|
||||
// returned.
|
||||
//
|
||||
// A size of SizeUnknown (-1) indicates disables size verification when the size
|
||||
// is unknown ahead of time.
|
||||
func ReadCloser(r io.ReadCloser, size int64, h v1.Hash) (io.ReadCloser, error) { |
||||
w, err := v1.Hasher(h.Algorithm) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
r2 := io.TeeReader(r, w) // pass all writes to the hasher.
|
||||
if size != SizeUnknown { |
||||
r2 = io.LimitReader(r2, size) // if we know the size, limit to that size.
|
||||
} |
||||
return &and.ReadCloser{ |
||||
Reader: &verifyReader{ |
||||
inner: r2, |
||||
hasher: w, |
||||
expected: h, |
||||
wantSize: size, |
||||
}, |
||||
CloseFunc: r.Close, |
||||
}, nil |
||||
} |
||||
|
||||
// Descriptor verifies that the embedded Data field matches the Size and Digest
|
||||
// fields of the given v1.Descriptor, returning an error if the Data field is
|
||||
// missing or if it contains incorrect data.
|
||||
func Descriptor(d v1.Descriptor) error { |
||||
if d.Data == nil { |
||||
return errors.New("error verifying descriptor; Data == nil") |
||||
} |
||||
|
||||
h, sz, err := v1.SHA256(bytes.NewReader(d.Data)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if h != d.Digest { |
||||
return fmt.Errorf("error verifying Digest; got %q, want %q", h, d.Digest) |
||||
} |
||||
if sz != d.Size { |
||||
return fmt.Errorf("error verifying Size; got %d, want %d", sz, d.Size) |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,116 @@ |
||||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package zstd provides helper functions for interacting with zstd streams.
|
||||
package zstd |
||||
|
||||
import ( |
||||
"bufio" |
||||
"bytes" |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/and" |
||||
"github.com/klauspost/compress/zstd" |
||||
) |
||||
|
||||
// MagicHeader is the start of zstd files.
|
||||
var MagicHeader = []byte{'\x28', '\xb5', '\x2f', '\xfd'} |
||||
|
||||
// ReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// This uses zstd level 1 for the compression.
|
||||
func ReadCloser(r io.ReadCloser) io.ReadCloser { |
||||
return ReadCloserLevel(r, 1) |
||||
} |
||||
|
||||
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { |
||||
pr, pw := io.Pipe() |
||||
|
||||
// For highly compressible layers, zstd.Writer will output a very small
|
||||
// number of bytes per Write(). This is normally fine, but when pushing
|
||||
// to a registry, we want to ensure that we're taking full advantage of
|
||||
// the available bandwidth instead of sending tons of tiny writes over
|
||||
// the wire.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(pw, 2<<16) |
||||
|
||||
// Returns err so we can pw.CloseWithError(err)
|
||||
go func() error { |
||||
// TODO(go1.14): Just defer {pw,zw,r}.Close like you'd expect.
|
||||
// Context: https://golang.org/issue/24283
|
||||
zw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level))) |
||||
if err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
if _, err := io.Copy(zw, r); err != nil { |
||||
defer r.Close() |
||||
defer zw.Close() |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// Close zstd writer to Flush it and write zstd trailers.
|
||||
if err := zw.Close(); err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// Flush bufio writer to ensure we write out everything.
|
||||
if err := bw.Flush(); err != nil { |
||||
return pw.CloseWithError(err) |
||||
} |
||||
|
||||
// We don't really care if these fail.
|
||||
defer pw.Close() |
||||
defer r.Close() |
||||
|
||||
return nil |
||||
}() |
||||
|
||||
return pr |
||||
} |
||||
|
||||
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which uncompressed data may be read.
|
||||
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { |
||||
gr, err := zstd.NewReader(r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &and.ReadCloser{ |
||||
Reader: gr, |
||||
CloseFunc: func() error { |
||||
// If the unzip fails, then this seems to return the same
|
||||
// error as the read. We don't want this to interfere with
|
||||
// us closing the main ReadCloser, since this could leave
|
||||
// an open file descriptor (fails on Windows).
|
||||
gr.Close() |
||||
return r.Close() |
||||
}, |
||||
}, nil |
||||
} |
||||
|
||||
// Is detects whether the input stream is compressed.
|
||||
func Is(r io.Reader) (bool, error) { |
||||
magicHeader := make([]byte, 4) |
||||
n, err := r.Read(magicHeader) |
||||
if n == 0 && err == io.EOF { |
||||
return false, nil |
||||
} |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
return bytes.Equal(magicHeader, MagicHeader), nil |
||||
} |
@ -0,0 +1,322 @@ |
||||
# `authn` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn) |
||||
|
||||
This README outlines how we acquire and use credentials when interacting with a registry. |
||||
|
||||
As much as possible, we attempt to emulate `docker`'s authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with `docker`; however, when things don't work, a basic understanding of what's going on can help with debugging. |
||||
|
||||
The official documentation for how authentication with `docker` works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here. |
||||
|
||||
## tl;dr for consumers of this package |
||||
|
||||
By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images. |
||||
|
||||
To use the credentials found in your Docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.: |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||
) |
||||
|
||||
func main() { |
||||
ref, err := name.ParseReference("registry.example.com/private/repo") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Fetch the manifest using default credentials. |
||||
img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Prints the digest of registry.example.com/private/repo |
||||
fmt.Println(img.Digest) |
||||
} |
||||
``` |
||||
|
||||
The `DefaultKeychain` will use credentials as described in your Docker config file -- usually `~/.docker/config.json`, or `%USERPROFILE%\.docker\config.json` on Windows -- or the location described by the `DOCKER_CONFIG` environment variable, if set. |
||||
|
||||
If those are not found, `DefaultKeychain` will look for credentials configured using [Podman's expectation](https://docs.podman.io/en/latest/markdown/podman-login.1.html) that these are found in `${XDG_RUNTIME_DIR}/containers/auth.json`. |
||||
|
||||
[See below](#docker-config-auth) for more information about what is configured in this file. |
||||
|
||||
## Emulating Cloud Provider Credential Helpers |
||||
|
||||
[`pkg/v1/google.Keychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#Keychain) provides a `Keychain` implementation that emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr) to find credentials in the environment. |
||||
See [`google.NewEnvAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewEnvAuthenticator) and [`google.NewGcloudAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewGcloudAuthenticator) for more information. |
||||
|
||||
To emulate other credential helpers without requiring them to be available as executables, [`NewKeychainFromHelper`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewKeychainFromHelper) provides an adapter that takes a Go implementation satisfying a subset of the [`credentials.Helper`](https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper) interface, and makes it available as a `Keychain`. |
||||
|
||||
This means that you can emulate, for example, [Amazon ECR's `docker-credential-ecr-login` credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) using the same implementation: |
||||
|
||||
```go |
||||
import ( |
||||
ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" |
||||
"github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||
) |
||||
|
||||
func main() { |
||||
// ... |
||||
ecrHelper := ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}} |
||||
img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(ecrHelper))) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
// ... |
||||
} |
||||
``` |
||||
|
||||
Likewise, you can emulate [Azure's ACR `docker-credential-acr-env` credential helper](https://github.com/chrismellard/docker-credential-acr-env): |
||||
|
||||
```go |
||||
import ( |
||||
"github.com/chrismellard/docker-credential-acr-env/pkg/credhelper" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||
) |
||||
|
||||
func main() { |
||||
// ... |
||||
acrHelper := credhelper.NewACRCredentialsHelper() |
||||
img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(acrHelper))) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
// ... |
||||
} |
||||
``` |
||||
|
||||
<!-- TODO(jasonhall): Wrap these in docker-credential-magic and reference those from here. --> |
||||
|
||||
## Using Multiple `Keychain`s |
||||
|
||||
[`NewMultiKeychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewMultiKeychain) allows you to specify multiple `Keychain` implementations, which will be checked in order when credentials are needed. |
||||
|
||||
For example: |
||||
|
||||
```go |
||||
kc := authn.NewMultiKeychain( |
||||
authn.DefaultKeychain, |
||||
google.Keychain, |
||||
authn.NewKeychainFromHelper(ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}}), |
||||
authn.NewKeychainFromHelper(acr.ACRCredHelper{}), |
||||
) |
||||
``` |
||||
|
||||
This multi-keychain will: |
||||
|
||||
- first check for credentials found in the Docker config file, as describe above, then |
||||
- check for GCP credentials available in the environment, as described above, then |
||||
- check for ECR credentials by emulating the ECR credential helper, then |
||||
- check for ACR credentials by emulating the ACR credential helper. |
||||
|
||||
If any keychain implementation is able to provide credentials for the request, they will be used, and further keychain implementations will not be consulted. |
||||
|
||||
If no implementations are able to provide credentials, `Anonymous` credentials will be used. |
||||
|
||||
## Docker Config Auth |
||||
|
||||
What follows attempts to gather useful information about Docker's config.json and make it available in one place. |
||||
|
||||
If you have questions, please [file an issue](https://github.com/google/go-containerregistry/issues/new). |
||||
|
||||
### Plaintext |
||||
|
||||
The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: |
||||
|
||||
```json |
||||
{ |
||||
"auths": { |
||||
"registry.example.com": { |
||||
"auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" |
||||
} |
||||
} |
||||
} |
||||
``` |
||||
|
||||
The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). |
||||
|
||||
**NOTE**: This means that your credentials are stored _in plaintext_: |
||||
|
||||
```bash |
||||
$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d |
||||
AzureDiamond:hunter2 |
||||
``` |
||||
|
||||
For what it's worth, this config file is equivalent to: |
||||
|
||||
```json |
||||
{ |
||||
"auths": { |
||||
"registry.example.com": { |
||||
"username": "AzureDiamond", |
||||
"password": "hunter2" |
||||
} |
||||
} |
||||
} |
||||
``` |
||||
|
||||
... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. |
||||
|
||||
### Helpers |
||||
|
||||
If you log in like this, `docker` will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! |
||||
|
||||
To configure a global credential helper: |
||||
```json |
||||
{ |
||||
"credsStore": "osxkeychain" |
||||
} |
||||
``` |
||||
|
||||
To configure a per-registry credential helper: |
||||
```json |
||||
{ |
||||
"credHelpers": { |
||||
"gcr.io": "gcr" |
||||
} |
||||
} |
||||
``` |
||||
|
||||
We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. |
||||
|
||||
## Credential Helpers |
||||
|
||||
The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. |
||||
|
||||
The protocol has several verbs, but the one we most care about is `get`. |
||||
|
||||
For example, using the following config file: |
||||
```json |
||||
{ |
||||
"credHelpers": { |
||||
"gcr.io": "gcr", |
||||
"eu.gcr.io": "gcr" |
||||
} |
||||
} |
||||
``` |
||||
|
||||
To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find |
||||
the credential helper for `gcr.io` is `gcr`. By appending that value to |
||||
`docker-credential-`, we can get the name of the binary we need to use. |
||||
|
||||
For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. |
||||
We'll then invoke that binary to get credentials: |
||||
|
||||
```bash |
||||
$ echo "gcr.io" | docker-credential-gcr get |
||||
{"Username":"_token","Secret":"<long access token>"} |
||||
``` |
||||
|
||||
You can configure the same credential helper for multiple registries, which is |
||||
why we need to pass the domain in via STDIN, e.g. if we were trying to access |
||||
`eu.gcr.io`, we'd do this instead: |
||||
|
||||
```bash |
||||
$ echo "eu.gcr.io" | docker-credential-gcr get |
||||
{"Username":"_token","Secret":"<long access token>"} |
||||
``` |
||||
|
||||
### Debugging credential helpers |
||||
|
||||
If a credential helper is configured but doesn't seem to be working, it can be |
||||
challenging to debug. Implementing a fake credential helper lets you poke around |
||||
to make it easier to see where the failure is happening. |
||||
|
||||
This "implements" a credential helper with hard-coded values: |
||||
``` |
||||
#!/usr/bin/env bash |
||||
echo '{"Username":"<token>","Secret":"hunter2"}' |
||||
``` |
||||
|
||||
|
||||
This implements a credential helper that prints the output of |
||||
`docker-credential-gcr` to both stderr and whatever called it, which allows you |
||||
to snoop on another credential helper: |
||||
``` |
||||
#!/usr/bin/env bash |
||||
docker-credential-gcr $@ | tee >(cat 1>&2) |
||||
``` |
||||
|
||||
Put those files somewhere on your path, naming them e.g. |
||||
`docker-credential-hardcoded` and `docker-credential-tee`, then modify the |
||||
config file to use them: |
||||
|
||||
```json |
||||
{ |
||||
"credHelpers": { |
||||
"gcr.io": "tee", |
||||
"eu.gcr.io": "hardcoded" |
||||
} |
||||
} |
||||
``` |
||||
|
||||
The `docker-credential-tee` trick works with both `crane` and `docker`: |
||||
|
||||
```bash |
||||
$ crane manifest gcr.io/google-containers/pause > /dev/null |
||||
{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":"<redacted>"} |
||||
|
||||
$ docker pull gcr.io/google-containers/pause |
||||
Using default tag: latest |
||||
{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":"<redacted>"} |
||||
latest: Pulling from google-containers/pause |
||||
a3ed95caeb02: Pull complete |
||||
4964c72cd024: Pull complete |
||||
Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f |
||||
Status: Downloaded newer image for gcr.io/google-containers/pause:latest |
||||
gcr.io/google-containers/pause:latest |
||||
``` |
||||
|
||||
## The Registry |
||||
|
||||
There are two methods for authenticating against a registry: |
||||
[token](https://docs.docker.com/registry/spec/auth/token/) and |
||||
[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). |
||||
|
||||
Both methods are used to acquire an opaque `Bearer` token (or |
||||
[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) |
||||
to use in the `Authorization` header. The registry will return a `401 |
||||
Unauthorized` during the [version |
||||
check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) |
||||
(or during normal operations) with |
||||
[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge |
||||
indicating how to proceed. |
||||
|
||||
### Token |
||||
|
||||
If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) |
||||
or |
||||
[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), |
||||
we'll use the token method for authentication: |
||||
|
||||
![basic](../../images/credhelper-basic.svg) |
||||
|
||||
### OAuth 2 |
||||
|
||||
If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) |
||||
we'll use the oauth2 method for authentication: |
||||
|
||||
![oauth](../../images/credhelper-oauth.svg) |
||||
|
||||
This happens when a credential helper returns a response with the |
||||
[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) |
||||
set to `<token>` (no, that's not a placeholder, the literal string `"<token>"`). |
||||
It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). |
||||
|
||||
We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), |
||||
since it's impossible to determine from the registry response whether we should |
||||
use oauth, and the token method for authentication is widely implemented by |
||||
registries. |
@ -0,0 +1,26 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
// anonymous implements Authenticator for anonymous authentication.
|
||||
type anonymous struct{} |
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (a *anonymous) Authorization() (*AuthConfig, error) { |
||||
return &AuthConfig{}, nil |
||||
} |
||||
|
||||
// Anonymous is a singleton Authenticator for providing anonymous auth.
|
||||
var Anonymous Authenticator = &anonymous{} |
@ -0,0 +1,30 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
// auth is an Authenticator that simply returns the wrapped AuthConfig.
|
||||
type auth struct { |
||||
config AuthConfig |
||||
} |
||||
|
||||
// FromConfig returns an Authenticator that just returns the given AuthConfig.
|
||||
func FromConfig(cfg AuthConfig) Authenticator { |
||||
return &auth{cfg} |
||||
} |
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (a *auth) Authorization() (*AuthConfig, error) { |
||||
return &a.config, nil |
||||
} |
@ -0,0 +1,115 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
import ( |
||||
"encoding/base64" |
||||
"encoding/json" |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
// Authenticator is used to authenticate Docker transports.
|
||||
type Authenticator interface { |
||||
// Authorization returns the value to use in an http transport's Authorization header.
|
||||
Authorization() (*AuthConfig, error) |
||||
} |
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
// Inlined what we use from github.com/docker/cli/cli/config/types
|
||||
type AuthConfig struct { |
||||
Username string `json:"username,omitempty"` |
||||
Password string `json:"password,omitempty"` |
||||
Auth string `json:"auth,omitempty"` |
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"` |
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"` |
||||
} |
||||
|
||||
// This is effectively a copy of the type AuthConfig. This simplifies
|
||||
// JSON unmarshalling since AuthConfig methods are not inherited
|
||||
type authConfig AuthConfig |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler
|
||||
func (a *AuthConfig) UnmarshalJSON(data []byte) error { |
||||
var shadow authConfig |
||||
err := json.Unmarshal(data, &shadow) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
*a = (AuthConfig)(shadow) |
||||
|
||||
if len(shadow.Auth) != 0 { |
||||
var derr error |
||||
a.Username, a.Password, derr = decodeDockerConfigFieldAuth(shadow.Auth) |
||||
if derr != nil { |
||||
err = fmt.Errorf("unable to decode auth field: %w", derr) |
||||
} |
||||
} else if len(a.Username) != 0 && len(a.Password) != 0 { |
||||
a.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (a AuthConfig) MarshalJSON() ([]byte, error) { |
||||
shadow := (authConfig)(a) |
||||
shadow.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) |
||||
return json.Marshal(shadow) |
||||
} |
||||
|
||||
// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a
|
||||
// username and a password. The format of the auth field is base64(<username>:<password>).
|
||||
//
|
||||
// From https://github.com/kubernetes/kubernetes/blob/75e49ec824b183288e1dbaccfd7dbe77d89db381/pkg/credentialprovider/config.go
|
||||
// Copyright 2014 The Kubernetes Authors.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { |
||||
var decoded []byte |
||||
// StdEncoding can only decode padded string
|
||||
// RawStdEncoding can only decode unpadded string
|
||||
if strings.HasSuffix(strings.TrimSpace(field), "=") { |
||||
// decode padded data
|
||||
decoded, err = base64.StdEncoding.DecodeString(field) |
||||
} else { |
||||
// decode unpadded data
|
||||
decoded, err = base64.RawStdEncoding.DecodeString(field) |
||||
} |
||||
|
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
parts := strings.SplitN(string(decoded), ":", 2) |
||||
if len(parts) != 2 { |
||||
err = fmt.Errorf("must be formatted as base64(username:password)") |
||||
return |
||||
} |
||||
|
||||
username = parts[0] |
||||
password = parts[1] |
||||
|
||||
return |
||||
} |
||||
|
||||
func encodeDockerConfigFieldAuth(username, password string) string { |
||||
return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) |
||||
} |
@ -0,0 +1,29 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
// Basic implements Authenticator for basic authentication.
|
||||
type Basic struct { |
||||
Username string |
||||
Password string |
||||
} |
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (b *Basic) Authorization() (*AuthConfig, error) { |
||||
return &AuthConfig{ |
||||
Username: b.Username, |
||||
Password: b.Password, |
||||
}, nil |
||||
} |
@ -0,0 +1,27 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
// Bearer implements Authenticator for bearer authentication.
|
||||
type Bearer struct { |
||||
Token string `json:"token"` |
||||
} |
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (b *Bearer) Authorization() (*AuthConfig, error) { |
||||
return &AuthConfig{ |
||||
RegistryToken: b.Token, |
||||
}, nil |
||||
} |
@ -0,0 +1,17 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package authn defines different methods of authentication for
|
||||
// talking to a container registry.
|
||||
package authn |
@ -0,0 +1,180 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
import ( |
||||
"os" |
||||
"path/filepath" |
||||
"sync" |
||||
|
||||
"github.com/docker/cli/cli/config" |
||||
"github.com/docker/cli/cli/config/configfile" |
||||
"github.com/docker/cli/cli/config/types" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/mitchellh/go-homedir" |
||||
) |
||||
|
||||
// Resource represents a registry or repository that can be authenticated against.
|
||||
type Resource interface { |
||||
// String returns the full string representation of the target, e.g.
|
||||
// gcr.io/my-project or just gcr.io.
|
||||
String() string |
||||
|
||||
// RegistryStr returns just the registry portion of the target, e.g. for
|
||||
// gcr.io/my-project, this should just return gcr.io. This is needed to
|
||||
// pull out an appropriate hostname.
|
||||
RegistryStr() string |
||||
} |
||||
|
||||
// Keychain is an interface for resolving an image reference to a credential.
|
||||
type Keychain interface { |
||||
// Resolve looks up the most appropriate credential for the specified target.
|
||||
Resolve(Resource) (Authenticator, error) |
||||
} |
||||
|
||||
// defaultKeychain implements Keychain with the semantics of the standard Docker
|
||||
// credential keychain.
|
||||
type defaultKeychain struct { |
||||
mu sync.Mutex |
||||
} |
||||
|
||||
var ( |
||||
// DefaultKeychain implements Keychain by interpreting the docker config file.
|
||||
DefaultKeychain Keychain = &defaultKeychain{} |
||||
) |
||||
|
||||
const ( |
||||
// DefaultAuthKey is the key used for dockerhub in config files, which
|
||||
// is hardcoded for historical reasons.
|
||||
DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" |
||||
) |
||||
|
||||
// Resolve implements Keychain.
|
||||
func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { |
||||
dk.mu.Lock() |
||||
defer dk.mu.Unlock() |
||||
|
||||
// Podman users may have their container registry auth configured in a
|
||||
// different location, that Docker packages aren't aware of.
|
||||
// If the Docker config file isn't found, we'll fallback to look where
|
||||
// Podman configures it, and parse that as a Docker auth config instead.
|
||||
|
||||
// First, check $HOME/.docker/config.json
|
||||
foundDockerConfig := false |
||||
home, err := homedir.Dir() |
||||
if err == nil { |
||||
foundDockerConfig = fileExists(filepath.Join(home, ".docker/config.json")) |
||||
} |
||||
// If $HOME/.docker/config.json isn't found, check $DOCKER_CONFIG (if set)
|
||||
if !foundDockerConfig && os.Getenv("DOCKER_CONFIG") != "" { |
||||
foundDockerConfig = fileExists(filepath.Join(os.Getenv("DOCKER_CONFIG"), "config.json")) |
||||
} |
||||
// If either of those locations are found, load it using Docker's
|
||||
// config.Load, which may fail if the config can't be parsed.
|
||||
//
|
||||
// If neither was found, look for Podman's auth at
|
||||
// $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a
|
||||
// Docker config.
|
||||
//
|
||||
// If neither are found, fallback to Anonymous.
|
||||
var cf *configfile.ConfigFile |
||||
if foundDockerConfig { |
||||
cf, err = config.Load(os.Getenv("DOCKER_CONFIG")) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} else { |
||||
f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) |
||||
if err != nil { |
||||
return Anonymous, nil |
||||
} |
||||
defer f.Close() |
||||
cf, err = config.LoadFromReader(f) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
// See:
|
||||
// https://github.com/google/ko/issues/90
|
||||
// https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404
|
||||
var cfg, empty types.AuthConfig |
||||
for _, key := range []string{ |
||||
target.String(), |
||||
target.RegistryStr(), |
||||
} { |
||||
if key == name.DefaultRegistry { |
||||
key = DefaultAuthKey |
||||
} |
||||
|
||||
cfg, err = cf.GetAuthConfig(key) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// cf.GetAuthConfig automatically sets the ServerAddress attribute. Since
|
||||
// we don't make use of it, clear the value for a proper "is-empty" test.
|
||||
// See: https://github.com/google/go-containerregistry/issues/1510
|
||||
cfg.ServerAddress = "" |
||||
if cfg != empty { |
||||
break |
||||
} |
||||
} |
||||
if cfg == empty { |
||||
return Anonymous, nil |
||||
} |
||||
|
||||
return FromConfig(AuthConfig{ |
||||
Username: cfg.Username, |
||||
Password: cfg.Password, |
||||
Auth: cfg.Auth, |
||||
IdentityToken: cfg.IdentityToken, |
||||
RegistryToken: cfg.RegistryToken, |
||||
}), nil |
||||
} |
||||
|
||||
// fileExists returns true if the given path exists and is not a directory.
|
||||
func fileExists(path string) bool { |
||||
fi, err := os.Stat(path) |
||||
return err == nil && !fi.IsDir() |
||||
} |
||||
|
||||
// Helper is a subset of the Docker credential helper credentials.Helper
|
||||
// interface used by NewKeychainFromHelper.
|
||||
//
|
||||
// See:
|
||||
// https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper
|
||||
type Helper interface { |
||||
Get(serverURL string) (string, string, error) |
||||
} |
||||
|
||||
// NewKeychainFromHelper returns a Keychain based on a Docker credential helper
|
||||
// implementation that can Get username and password credentials for a given
|
||||
// server URL.
|
||||
func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} } |
||||
|
||||
type wrapper struct{ h Helper } |
||||
|
||||
func (w wrapper) Resolve(r Resource) (Authenticator, error) { |
||||
u, p, err := w.h.Get(r.RegistryStr()) |
||||
if err != nil { |
||||
return Anonymous, nil |
||||
} |
||||
// If the secret being stored is an identity token, the Username should be set to <token>
|
||||
// ref: https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol
|
||||
if u == "<token>" { |
||||
return FromConfig(AuthConfig{Username: u, IdentityToken: p}), nil |
||||
} |
||||
return FromConfig(AuthConfig{Username: u, Password: p}), nil |
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn |
||||
|
||||
type multiKeychain struct { |
||||
keychains []Keychain |
||||
} |
||||
|
||||
// Assert that our multi-keychain implements Keychain.
|
||||
var _ (Keychain) = (*multiKeychain)(nil) |
||||
|
||||
// NewMultiKeychain composes a list of keychains into one new keychain.
|
||||
func NewMultiKeychain(kcs ...Keychain) Keychain { |
||||
return &multiKeychain{keychains: kcs} |
||||
} |
||||
|
||||
// Resolve implements Keychain.
|
||||
func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { |
||||
for _, kc := range mk.keychains { |
||||
auth, err := kc.Resolve(target) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if auth != Anonymous { |
||||
return auth, nil |
||||
} |
||||
} |
||||
return Anonymous, nil |
||||
} |
@ -0,0 +1,26 @@ |
||||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compression abstracts over gzip and zstd.
|
||||
package compression |
||||
|
||||
// Compression is an enumeration of the supported compression algorithms
|
||||
type Compression string |
||||
|
||||
// The collection of known MediaType values.
|
||||
const ( |
||||
None Compression = "none" |
||||
GZip Compression = "gzip" |
||||
ZStd Compression = "zstd" |
||||
) |
@ -0,0 +1,39 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logs exposes the loggers used by this library.
|
||||
package logs |
||||
|
||||
import ( |
||||
"io" |
||||
"log" |
||||
) |
||||
|
||||
var ( |
||||
// Warn is used to log non-fatal errors.
|
||||
Warn = log.New(io.Discard, "", log.LstdFlags) |
||||
|
||||
// Progress is used to log notable, successful events.
|
||||
Progress = log.New(io.Discard, "", log.LstdFlags) |
||||
|
||||
// Debug is used to log information that is useful for debugging.
|
||||
Debug = log.New(io.Discard, "", log.LstdFlags) |
||||
) |
||||
|
||||
// Enabled checks to see if the logger's writer is set to something other
|
||||
// than io.Discard. This allows callers to avoid expensive operations
|
||||
// that will end up in /dev/null anyway.
|
||||
func Enabled(l *log.Logger) bool { |
||||
return l.Writer() != io.Discard |
||||
} |
@ -0,0 +1,3 @@ |
||||
# `name` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) |
@ -0,0 +1,43 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"strings" |
||||
"unicode/utf8" |
||||
) |
||||
|
||||
// stripRunesFn returns a function which returns -1 (i.e. a value which
|
||||
// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise.
|
||||
func stripRunesFn(runes string) func(rune) rune { |
||||
return func(r rune) rune { |
||||
if strings.ContainsRune(runes, r) { |
||||
return -1 |
||||
} |
||||
return r |
||||
} |
||||
} |
||||
|
||||
// checkElement checks a given named element matches character and length restrictions.
|
||||
// Returns true if the given element adheres to the given restrictions, false otherwise.
|
||||
func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { |
||||
numRunes := utf8.RuneCountInString(element) |
||||
if (numRunes < minRunes) || (maxRunes < numRunes) { |
||||
return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element) |
||||
} else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { |
||||
return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element) |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,93 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
_ "crypto/sha256" // Recommended by go-digest.
|
||||
"strings" |
||||
|
||||
"github.com/opencontainers/go-digest" |
||||
) |
||||
|
||||
const digestDelim = "@" |
||||
|
||||
// Digest stores a digest name in a structured form.
|
||||
type Digest struct { |
||||
Repository |
||||
digest string |
||||
original string |
||||
} |
||||
|
||||
// Ensure Digest implements Reference
|
||||
var _ Reference = (*Digest)(nil) |
||||
|
||||
// Context implements Reference.
|
||||
func (d Digest) Context() Repository { |
||||
return d.Repository |
||||
} |
||||
|
||||
// Identifier implements Reference.
|
||||
func (d Digest) Identifier() string { |
||||
return d.DigestStr() |
||||
} |
||||
|
||||
// DigestStr returns the digest component of the Digest.
|
||||
func (d Digest) DigestStr() string { |
||||
return d.digest |
||||
} |
||||
|
||||
// Name returns the name from which the Digest was derived.
|
||||
func (d Digest) Name() string { |
||||
return d.Repository.Name() + digestDelim + d.DigestStr() |
||||
} |
||||
|
||||
// String returns the original input string.
|
||||
func (d Digest) String() string { |
||||
return d.original |
||||
} |
||||
|
||||
// NewDigest returns a new Digest representing the given name.
|
||||
func NewDigest(name string, opts ...Option) (Digest, error) { |
||||
// Split on "@"
|
||||
parts := strings.Split(name, digestDelim) |
||||
if len(parts) != 2 { |
||||
return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) |
||||
} |
||||
base := parts[0] |
||||
dig := parts[1] |
||||
prefix := digest.Canonical.String() + ":" |
||||
if !strings.HasPrefix(dig, prefix) { |
||||
return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) |
||||
} |
||||
hex := strings.TrimPrefix(dig, prefix) |
||||
if err := digest.Canonical.Validate(hex); err != nil { |
||||
return Digest{}, err |
||||
} |
||||
|
||||
tag, err := NewTag(base, opts...) |
||||
if err == nil { |
||||
base = tag.Repository.Name() |
||||
} |
||||
|
||||
repo, err := NewRepository(base, opts...) |
||||
if err != nil { |
||||
return Digest{}, err |
||||
} |
||||
return Digest{ |
||||
Repository: repo, |
||||
digest: dig, |
||||
original: name, |
||||
}, nil |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package name defines structured types for representing image references.
|
||||
//
|
||||
// What's in a name? For image references, not nearly enough!
|
||||
//
|
||||
// Image references look a lot like URLs, but they differ in that they don't
|
||||
// contain the scheme (http or https), they can end with a :tag or a @digest
|
||||
// (the latter being validated), and they perform defaulting for missing
|
||||
// components.
|
||||
//
|
||||
// Since image references don't contain the scheme, we do our best to infer
|
||||
// if we use http or https from the given hostname. We allow http fallback for
|
||||
// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in
|
||||
// ".local", or is in the "private" address space per RFC 1918. For everything
|
||||
// else, we assume https only. To override this heuristic, use the Insecure
|
||||
// option.
|
||||
//
|
||||
// Image references with a digest signal to us that we should verify the content
|
||||
// of the image matches the digest. E.g. when pulling a Digest reference, we'll
|
||||
// calculate the sha256 of the manifest returned by the registry and error out
|
||||
// if it doesn't match what we asked for.
|
||||
//
|
||||
// For defaulting, we interpret "ubuntu" as
|
||||
// "index.docker.io/library/ubuntu:latest" because we add the missing repo
|
||||
// "library", the missing registry "index.docker.io", and the missing tag
|
||||
// "latest". To disable this defaulting, use the StrictValidation option. This
|
||||
// is useful e.g. to only allow image references that explicitly set a tag or
|
||||
// digest, so that you don't accidentally pull "latest".
|
||||
package name |
@ -0,0 +1,48 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
) |
||||
|
||||
// ErrBadName is an error for when a bad docker name is supplied.
|
||||
type ErrBadName struct { |
||||
info string |
||||
} |
||||
|
||||
func (e *ErrBadName) Error() string { |
||||
return e.info |
||||
} |
||||
|
||||
// Is reports whether target is an error of type ErrBadName
|
||||
func (e *ErrBadName) Is(target error) bool { |
||||
var berr *ErrBadName |
||||
return errors.As(target, &berr) |
||||
} |
||||
|
||||
// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
|
||||
func newErrBadName(fmtStr string, args ...any) *ErrBadName { |
||||
return &ErrBadName{fmt.Sprintf(fmtStr, args...)} |
||||
} |
||||
|
||||
// IsErrBadName returns true if the given error is an ErrBadName.
|
||||
//
|
||||
// Deprecated: Use errors.Is.
|
||||
func IsErrBadName(err error) bool { |
||||
var berr *ErrBadName |
||||
return errors.As(err, &berr) |
||||
} |
@ -0,0 +1,83 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
const ( |
||||
// DefaultRegistry is the registry name that will be used if no registry
|
||||
// provided and the default is not overridden.
|
||||
DefaultRegistry = "index.docker.io" |
||||
defaultRegistryAlias = "docker.io" |
||||
|
||||
// DefaultTag is the tag name that will be used if no tag provided and the
|
||||
// default is not overridden.
|
||||
DefaultTag = "latest" |
||||
) |
||||
|
||||
type options struct { |
||||
strict bool // weak by default
|
||||
insecure bool // secure by default
|
||||
defaultRegistry string |
||||
defaultTag string |
||||
} |
||||
|
||||
func makeOptions(opts ...Option) options { |
||||
opt := options{ |
||||
defaultRegistry: DefaultRegistry, |
||||
defaultTag: DefaultTag, |
||||
} |
||||
for _, o := range opts { |
||||
o(&opt) |
||||
} |
||||
return opt |
||||
} |
||||
|
||||
// Option is a functional option for name parsing.
|
||||
type Option func(*options) |
||||
|
||||
// StrictValidation is an Option that requires image references to be fully
|
||||
// specified; i.e. no defaulting for registry (dockerhub), repo (library),
|
||||
// or tag (latest).
|
||||
func StrictValidation(opts *options) { |
||||
opts.strict = true |
||||
} |
||||
|
||||
// WeakValidation is an Option that sets defaults when parsing names, see
|
||||
// StrictValidation.
|
||||
func WeakValidation(opts *options) { |
||||
opts.strict = false |
||||
} |
||||
|
||||
// Insecure is an Option that allows image references to be fetched without TLS.
|
||||
func Insecure(opts *options) { |
||||
opts.insecure = true |
||||
} |
||||
|
||||
// OptionFn is a function that returns an option.
|
||||
type OptionFn func() Option |
||||
|
||||
// WithDefaultRegistry sets the default registry that will be used if one is not
|
||||
// provided.
|
||||
func WithDefaultRegistry(r string) Option { |
||||
return func(opts *options) { |
||||
opts.defaultRegistry = r |
||||
} |
||||
} |
||||
|
||||
// WithDefaultTag sets the default tag that will be used if one is not provided.
|
||||
func WithDefaultTag(t string) Option { |
||||
return func(opts *options) { |
||||
opts.defaultTag = t |
||||
} |
||||
} |
@ -0,0 +1,75 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"fmt" |
||||
) |
||||
|
||||
// Reference defines the interface that consumers use when they can
|
||||
// take either a tag or a digest.
|
||||
type Reference interface { |
||||
fmt.Stringer |
||||
|
||||
// Context accesses the Repository context of the reference.
|
||||
Context() Repository |
||||
|
||||
// Identifier accesses the type-specific portion of the reference.
|
||||
Identifier() string |
||||
|
||||
// Name is the fully-qualified reference name.
|
||||
Name() string |
||||
|
||||
// Scope is the scope needed to access this reference.
|
||||
Scope(string) string |
||||
} |
||||
|
||||
// ParseReference parses the string as a reference, either by tag or digest.
|
||||
func ParseReference(s string, opts ...Option) (Reference, error) { |
||||
if t, err := NewTag(s, opts...); err == nil { |
||||
return t, nil |
||||
} |
||||
if d, err := NewDigest(s, opts...); err == nil { |
||||
return d, nil |
||||
} |
||||
return nil, newErrBadName("could not parse reference: " + s) |
||||
} |
||||
|
||||
type stringConst string |
||||
|
||||
// MustParseReference behaves like ParseReference, but panics instead of
|
||||
// returning an error. It's intended for use in tests, or when a value is
|
||||
// expected to be valid at code authoring time.
|
||||
//
|
||||
// To discourage its use in scenarios where the value is not known at code
|
||||
// authoring time, it must be passed a string constant:
|
||||
//
|
||||
// const str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference("another/valid/string")
|
||||
// MustParseReference(str + "/and/more")
|
||||
//
|
||||
// These will not compile:
|
||||
//
|
||||
// var str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
|
||||
func MustParseReference(s stringConst, opts ...Option) Reference { |
||||
ref, err := ParseReference(string(s), opts...) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return ref |
||||
} |
@ -0,0 +1,136 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"net" |
||||
"net/url" |
||||
"regexp" |
||||
"strings" |
||||
) |
||||
|
||||
// Detect more complex forms of local references.
|
||||
var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) |
||||
|
||||
// Detect the loopback IP (127.0.0.1)
|
||||
var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) |
||||
|
||||
// Detect the loopback IPV6 (::1)
|
||||
var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) |
||||
|
||||
// Registry stores a docker registry name in a structured form.
|
||||
type Registry struct { |
||||
insecure bool |
||||
registry string |
||||
} |
||||
|
||||
// RegistryStr returns the registry component of the Registry.
|
||||
func (r Registry) RegistryStr() string { |
||||
return r.registry |
||||
} |
||||
|
||||
// Name returns the name from which the Registry was derived.
|
||||
func (r Registry) Name() string { |
||||
return r.RegistryStr() |
||||
} |
||||
|
||||
func (r Registry) String() string { |
||||
return r.Name() |
||||
} |
||||
|
||||
// Scope returns the scope required to access the registry.
|
||||
func (r Registry) Scope(string) string { |
||||
// The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
|
||||
return "registry:catalog:*" |
||||
} |
||||
|
||||
func (r Registry) isRFC1918() bool { |
||||
ipStr := strings.Split(r.Name(), ":")[0] |
||||
ip := net.ParseIP(ipStr) |
||||
if ip == nil { |
||||
return false |
||||
} |
||||
for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { |
||||
_, block, _ := net.ParseCIDR(cidr) |
||||
if block.Contains(ip) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
|
||||
func (r Registry) Scheme() string { |
||||
if r.insecure { |
||||
return "http" |
||||
} |
||||
if r.isRFC1918() { |
||||
return "http" |
||||
} |
||||
if strings.HasPrefix(r.Name(), "localhost:") { |
||||
return "http" |
||||
} |
||||
if reLocal.MatchString(r.Name()) { |
||||
return "http" |
||||
} |
||||
if reLoopback.MatchString(r.Name()) { |
||||
return "http" |
||||
} |
||||
if reipv6Loopback.MatchString(r.Name()) { |
||||
return "http" |
||||
} |
||||
return "https" |
||||
} |
||||
|
||||
func checkRegistry(name string) error { |
||||
// Per RFC 3986, registries (authorities) are required to be prefixed with "//"
|
||||
// url.Host == hostname[:port] == authority
|
||||
if url, err := url.Parse("//" + name); err != nil || url.Host != name { |
||||
return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// NewRegistry returns a Registry based on the given name.
|
||||
// Strict validation requires explicit, valid RFC 3986 URI authorities to be given.
|
||||
func NewRegistry(name string, opts ...Option) (Registry, error) { |
||||
opt := makeOptions(opts...) |
||||
if opt.strict && len(name) == 0 { |
||||
return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined") |
||||
} |
||||
|
||||
if err := checkRegistry(name); err != nil { |
||||
return Registry{}, err |
||||
} |
||||
|
||||
if name == "" { |
||||
name = opt.defaultRegistry |
||||
} |
||||
// Rewrite "docker.io" to "index.docker.io".
|
||||
// See: https://github.com/google/go-containerregistry/issues/68
|
||||
if name == defaultRegistryAlias { |
||||
name = DefaultRegistry |
||||
} |
||||
|
||||
return Registry{registry: name, insecure: opt.insecure}, nil |
||||
} |
||||
|
||||
// NewInsecureRegistry returns an Insecure Registry based on the given name.
|
||||
//
|
||||
// Deprecated: Use the Insecure Option with NewRegistry instead.
|
||||
func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { |
||||
opts = append(opts, Insecure) |
||||
return NewRegistry(name, opts...) |
||||
} |
@ -0,0 +1,121 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
const ( |
||||
defaultNamespace = "library" |
||||
repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" |
||||
regRepoDelimiter = "/" |
||||
) |
||||
|
||||
// Repository stores a docker repository name in a structured form.
|
||||
type Repository struct { |
||||
Registry |
||||
repository string |
||||
} |
||||
|
||||
// See https://docs.docker.com/docker-hub/official_repos
|
||||
func hasImplicitNamespace(repo string, reg Registry) bool { |
||||
return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry |
||||
} |
||||
|
||||
// RepositoryStr returns the repository component of the Repository.
|
||||
func (r Repository) RepositoryStr() string { |
||||
if hasImplicitNamespace(r.repository, r.Registry) { |
||||
return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) |
||||
} |
||||
return r.repository |
||||
} |
||||
|
||||
// Name returns the name from which the Repository was derived.
|
||||
func (r Repository) Name() string { |
||||
regName := r.Registry.Name() |
||||
if regName != "" { |
||||
return regName + regRepoDelimiter + r.RepositoryStr() |
||||
} |
||||
// TODO: As far as I can tell, this is unreachable.
|
||||
return r.RepositoryStr() |
||||
} |
||||
|
||||
func (r Repository) String() string { |
||||
return r.Name() |
||||
} |
||||
|
||||
// Scope returns the scope required to perform the given action on the registry.
|
||||
// TODO(jonjohnsonjr): consider moving scopes to a separate package.
|
||||
func (r Repository) Scope(action string) string { |
||||
return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) |
||||
} |
||||
|
||||
func checkRepository(repository string) error { |
||||
return checkElement("repository", repository, repositoryChars, 2, 255) |
||||
} |
||||
|
||||
// NewRepository returns a new Repository representing the given name, according to the given strictness.
|
||||
func NewRepository(name string, opts ...Option) (Repository, error) { |
||||
opt := makeOptions(opts...) |
||||
if len(name) == 0 { |
||||
return Repository{}, newErrBadName("a repository name must be specified") |
||||
} |
||||
|
||||
var registry string |
||||
repo := name |
||||
parts := strings.SplitN(name, regRepoDelimiter, 2) |
||||
if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { |
||||
// The first part of the repository is treated as the registry domain
|
||||
// iff it contains a '.' or ':' character, otherwise it is all repository
|
||||
// and the domain defaults to Docker Hub.
|
||||
registry = parts[0] |
||||
repo = parts[1] |
||||
} |
||||
|
||||
if err := checkRepository(repo); err != nil { |
||||
return Repository{}, err |
||||
} |
||||
|
||||
reg, err := NewRegistry(registry, opts...) |
||||
if err != nil { |
||||
return Repository{}, err |
||||
} |
||||
if hasImplicitNamespace(repo, reg) && opt.strict { |
||||
return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')") |
||||
} |
||||
return Repository{reg, repo}, nil |
||||
} |
||||
|
||||
// Tag returns a Tag in this Repository.
|
||||
func (r Repository) Tag(identifier string) Tag { |
||||
t := Tag{ |
||||
tag: identifier, |
||||
Repository: r, |
||||
} |
||||
t.original = t.Name() |
||||
return t |
||||
} |
||||
|
||||
// Digest returns a Digest in this Repository.
|
||||
func (r Repository) Digest(identifier string) Digest { |
||||
d := Digest{ |
||||
digest: identifier, |
||||
Repository: r, |
||||
} |
||||
d.original = d.Name() |
||||
return d |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name |
||||
|
||||
import ( |
||||
"strings" |
||||
) |
||||
|
||||
const ( |
||||
// TODO(dekkagaijin): use the docker/distribution regexes for validation.
|
||||
tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
||||
tagDelim = ":" |
||||
) |
||||
|
||||
// Tag stores a docker tag name in a structured form.
|
||||
type Tag struct { |
||||
Repository |
||||
tag string |
||||
original string |
||||
} |
||||
|
||||
// Ensure Tag implements Reference
|
||||
var _ Reference = (*Tag)(nil) |
||||
|
||||
// Context implements Reference.
|
||||
func (t Tag) Context() Repository { |
||||
return t.Repository |
||||
} |
||||
|
||||
// Identifier implements Reference.
|
||||
func (t Tag) Identifier() string { |
||||
return t.TagStr() |
||||
} |
||||
|
||||
// TagStr returns the tag component of the Tag.
|
||||
func (t Tag) TagStr() string { |
||||
return t.tag |
||||
} |
||||
|
||||
// Name returns the name from which the Tag was derived.
|
||||
func (t Tag) Name() string { |
||||
return t.Repository.Name() + tagDelim + t.TagStr() |
||||
} |
||||
|
||||
// String returns the original input string.
|
||||
func (t Tag) String() string { |
||||
return t.original |
||||
} |
||||
|
||||
// Scope returns the scope required to perform the given action on the tag.
|
||||
func (t Tag) Scope(action string) string { |
||||
return t.Repository.Scope(action) |
||||
} |
||||
|
||||
func checkTag(name string) error { |
||||
return checkElement("tag", name, tagChars, 1, 128) |
||||
} |
||||
|
||||
// NewTag returns a new Tag representing the given name, according to the given strictness.
|
||||
func NewTag(name string, opts ...Option) (Tag, error) { |
||||
opt := makeOptions(opts...) |
||||
base := name |
||||
tag := "" |
||||
|
||||
// Split on ":"
|
||||
parts := strings.Split(name, tagDelim) |
||||
// Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
|
||||
if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { |
||||
base = strings.Join(parts[:len(parts)-1], tagDelim) |
||||
tag = parts[len(parts)-1] |
||||
} |
||||
|
||||
// We don't require a tag, but if we get one check it's valid,
|
||||
// even when not being strict.
|
||||
// If we are being strict, we want to validate the tag regardless in case
|
||||
// it's empty.
|
||||
if tag != "" || opt.strict { |
||||
if err := checkTag(tag); err != nil { |
||||
return Tag{}, err |
||||
} |
||||
} |
||||
|
||||
if tag == "" { |
||||
tag = opt.defaultTag |
||||
} |
||||
|
||||
repo, err := NewRepository(base, opts...) |
||||
if err != nil { |
||||
return Tag{}, err |
||||
} |
||||
return Tag{ |
||||
Repository: repo, |
||||
tag: tag, |
||||
original: name, |
||||
}, nil |
||||
} |
@ -0,0 +1,136 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"io" |
||||
"time" |
||||
) |
||||
|
||||
// ConfigFile is the configuration file that holds the metadata describing
|
||||
// how to launch a container. See:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/config.md
|
||||
//
|
||||
// docker_version and os.version are not part of the spec but included
|
||||
// for backwards compatibility.
|
||||
type ConfigFile struct { |
||||
Architecture string `json:"architecture"` |
||||
Author string `json:"author,omitempty"` |
||||
Container string `json:"container,omitempty"` |
||||
Created Time `json:"created,omitempty"` |
||||
DockerVersion string `json:"docker_version,omitempty"` |
||||
History []History `json:"history,omitempty"` |
||||
OS string `json:"os"` |
||||
RootFS RootFS `json:"rootfs"` |
||||
Config Config `json:"config"` |
||||
OSVersion string `json:"os.version,omitempty"` |
||||
Variant string `json:"variant,omitempty"` |
||||
} |
||||
|
||||
// History is one entry of a list recording how this container image was built.
|
||||
type History struct { |
||||
Author string `json:"author,omitempty"` |
||||
Created Time `json:"created,omitempty"` |
||||
CreatedBy string `json:"created_by,omitempty"` |
||||
Comment string `json:"comment,omitempty"` |
||||
EmptyLayer bool `json:"empty_layer,omitempty"` |
||||
} |
||||
|
||||
// Time is a wrapper around time.Time to help with deep copying
|
||||
type Time struct { |
||||
time.Time |
||||
} |
||||
|
||||
// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time
|
||||
// type is effectively immutable in the time API, so it is safe to
|
||||
// copy-by-assign, despite the presence of (unexported) Pointer fields.
|
||||
func (t *Time) DeepCopyInto(out *Time) { |
||||
*out = *t |
||||
} |
||||
|
||||
// RootFS holds the ordered list of file system deltas that comprise the
|
||||
// container image's root filesystem.
|
||||
type RootFS struct { |
||||
Type string `json:"type"` |
||||
DiffIDs []Hash `json:"diff_ids"` |
||||
} |
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct { |
||||
// Test is the test to perform to check that the container is healthy.
|
||||
// An empty slice means to inherit the default.
|
||||
// The options are:
|
||||
// {} : inherit healthcheck
|
||||
// {"NONE"} : disable healthcheck
|
||||
// {"CMD", args...} : exec arguments directly
|
||||
// {"CMD-SHELL", command} : run command with system's default shell
|
||||
Test []string `json:",omitempty"` |
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
Retries int `json:",omitempty"` |
||||
} |
||||
|
||||
// Config is a submessage of the config file described as:
|
||||
//
|
||||
// The execution parameters which SHOULD be used as a base when running
|
||||
// a container using the image.
|
||||
//
|
||||
// The names of the fields in this message are chosen to reflect the JSON
|
||||
// payload of the Config as defined here:
|
||||
// https://git.io/vrAET
|
||||
// and
|
||||
// https://github.com/opencontainers/image-spec/blob/master/config.md
|
||||
type Config struct { |
||||
AttachStderr bool `json:"AttachStderr,omitempty"` |
||||
AttachStdin bool `json:"AttachStdin,omitempty"` |
||||
AttachStdout bool `json:"AttachStdout,omitempty"` |
||||
Cmd []string `json:"Cmd,omitempty"` |
||||
Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` |
||||
Domainname string `json:"Domainname,omitempty"` |
||||
Entrypoint []string `json:"Entrypoint,omitempty"` |
||||
Env []string `json:"Env,omitempty"` |
||||
Hostname string `json:"Hostname,omitempty"` |
||||
Image string `json:"Image,omitempty"` |
||||
Labels map[string]string `json:"Labels,omitempty"` |
||||
OnBuild []string `json:"OnBuild,omitempty"` |
||||
OpenStdin bool `json:"OpenStdin,omitempty"` |
||||
StdinOnce bool `json:"StdinOnce,omitempty"` |
||||
Tty bool `json:"Tty,omitempty"` |
||||
User string `json:"User,omitempty"` |
||||
Volumes map[string]struct{} `json:"Volumes,omitempty"` |
||||
WorkingDir string `json:"WorkingDir,omitempty"` |
||||
ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` |
||||
ArgsEscaped bool `json:"ArgsEscaped,omitempty"` |
||||
NetworkDisabled bool `json:"NetworkDisabled,omitempty"` |
||||
MacAddress string `json:"MacAddress,omitempty"` |
||||
StopSignal string `json:"StopSignal,omitempty"` |
||||
Shell []string `json:"Shell,omitempty"` |
||||
} |
||||
|
||||
// ParseConfigFile parses the io.Reader's contents into a ConfigFile.
|
||||
func ParseConfigFile(r io.Reader) (*ConfigFile, error) { |
||||
cf := ConfigFile{} |
||||
if err := json.NewDecoder(r).Decode(&cf); err != nil { |
||||
return nil, err |
||||
} |
||||
return &cf, nil |
||||
} |
@ -0,0 +1,18 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1 defines structured types for OCI v1 images
|
||||
package v1 |
@ -0,0 +1,123 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"crypto/sha256" |
||||
"encoding/hex" |
||||
"encoding/json" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"strconv" |
||||
"strings" |
||||
) |
||||
|
||||
// Hash is an unqualified digest of some content, e.g. sha256:deadbeef
|
||||
type Hash struct { |
||||
// Algorithm holds the algorithm used to compute the hash.
|
||||
Algorithm string |
||||
|
||||
// Hex holds the hex portion of the content hash.
|
||||
Hex string |
||||
} |
||||
|
||||
// String reverses NewHash returning the string-form of the hash.
|
||||
func (h Hash) String() string { |
||||
return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) |
||||
} |
||||
|
||||
// NewHash validates the input string is a hash and returns a strongly type Hash object.
|
||||
func NewHash(s string) (Hash, error) { |
||||
h := Hash{} |
||||
if err := h.parse(s); err != nil { |
||||
return Hash{}, err |
||||
} |
||||
return h, nil |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (h Hash) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(h.String()) |
||||
} |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler
|
||||
func (h *Hash) UnmarshalJSON(data []byte) error { |
||||
s, err := strconv.Unquote(string(data)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return h.parse(s) |
||||
} |
||||
|
||||
// MarshalText implements encoding.TextMarshaler. This is required to use
|
||||
// v1.Hash as a key in a map when marshalling JSON.
|
||||
func (h Hash) MarshalText() (text []byte, err error) { |
||||
return []byte(h.String()), nil |
||||
} |
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler. This is required to use
|
||||
// v1.Hash as a key in a map when unmarshalling JSON.
|
||||
func (h *Hash) UnmarshalText(text []byte) error { |
||||
return h.parse(string(text)) |
||||
} |
||||
|
||||
// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256")
|
||||
func Hasher(name string) (hash.Hash, error) { |
||||
switch name { |
||||
case "sha256": |
||||
return sha256.New(), nil |
||||
default: |
||||
return nil, fmt.Errorf("unsupported hash: %q", name) |
||||
} |
||||
} |
||||
|
||||
func (h *Hash) parse(unquoted string) error { |
||||
parts := strings.Split(unquoted, ":") |
||||
if len(parts) != 2 { |
||||
return fmt.Errorf("cannot parse hash: %q", unquoted) |
||||
} |
||||
|
||||
rest := strings.TrimLeft(parts[1], "0123456789abcdef") |
||||
if len(rest) != 0 { |
||||
return fmt.Errorf("found non-hex character in hash: %c", rest[0]) |
||||
} |
||||
|
||||
hasher, err := Hasher(parts[0]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Compare the hex to the expected size (2 hex characters per byte)
|
||||
if len(parts[1]) != hasher.Size()*2 { |
||||
return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) |
||||
} |
||||
|
||||
h.Algorithm = parts[0] |
||||
h.Hex = parts[1] |
||||
return nil |
||||
} |
||||
|
||||
// SHA256 computes the Hash of the provided io.Reader's content.
|
||||
func SHA256(r io.Reader) (Hash, int64, error) { |
||||
hasher := sha256.New() |
||||
n, err := io.Copy(hasher, r) |
||||
if err != nil { |
||||
return Hash{}, 0, err |
||||
} |
||||
return Hash{ |
||||
Algorithm: "sha256", |
||||
Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), |
||||
}, n, nil |
||||
} |
@ -0,0 +1,59 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// Image defines the interface for interacting with an OCI v1 image.
|
||||
type Image interface { |
||||
// Layers returns the ordered collection of filesystem layers that comprise this image.
|
||||
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
||||
Layers() ([]Layer, error) |
||||
|
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error) |
||||
|
||||
// Size returns the size of the manifest.
|
||||
Size() (int64, error) |
||||
|
||||
// ConfigName returns the hash of the image's config file, also known as
|
||||
// the Image ID.
|
||||
ConfigName() (Hash, error) |
||||
|
||||
// ConfigFile returns this image's config file.
|
||||
ConfigFile() (*ConfigFile, error) |
||||
|
||||
// RawConfigFile returns the serialized bytes of ConfigFile().
|
||||
RawConfigFile() ([]byte, error) |
||||
|
||||
// Digest returns the sha256 of this image's manifest.
|
||||
Digest() (Hash, error) |
||||
|
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*Manifest, error) |
||||
|
||||
// RawManifest returns the serialized bytes of Manifest()
|
||||
RawManifest() ([]byte, error) |
||||
|
||||
// LayerByDigest returns a Layer for interacting with a particular layer of
|
||||
// the image, looking it up by "digest" (the compressed hash).
|
||||
LayerByDigest(Hash) (Layer, error) |
||||
|
||||
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
|
||||
// (the uncompressed hash).
|
||||
LayerByDiffID(Hash) (Layer, error) |
||||
} |
@ -0,0 +1,43 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// ImageIndex defines the interface for interacting with an OCI image index.
|
||||
type ImageIndex interface { |
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error) |
||||
|
||||
// Digest returns the sha256 of this index's manifest.
|
||||
Digest() (Hash, error) |
||||
|
||||
// Size returns the size of the manifest.
|
||||
Size() (int64, error) |
||||
|
||||
// IndexManifest returns this image index's manifest object.
|
||||
IndexManifest() (*IndexManifest, error) |
||||
|
||||
// RawManifest returns the serialized bytes of IndexManifest().
|
||||
RawManifest() ([]byte, error) |
||||
|
||||
// Image returns a v1.Image that this ImageIndex references.
|
||||
Image(Hash) (Image, error) |
||||
|
||||
// ImageIndex returns a v1.ImageIndex that this ImageIndex references.
|
||||
ImageIndex(Hash) (ImageIndex, error) |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// Layer is an interface for accessing the properties of a particular layer of a v1.Image
|
||||
type Layer interface { |
||||
// Digest returns the Hash of the compressed layer.
|
||||
Digest() (Hash, error) |
||||
|
||||
// DiffID returns the Hash of the uncompressed layer.
|
||||
DiffID() (Hash, error) |
||||
|
||||
// Compressed returns an io.ReadCloser for the compressed layer contents.
|
||||
Compressed() (io.ReadCloser, error) |
||||
|
||||
// Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
|
||||
Uncompressed() (io.ReadCloser, error) |
||||
|
||||
// Size returns the compressed size of the Layer.
|
||||
Size() (int64, error) |
||||
|
||||
// MediaType returns the media type of the Layer.
|
||||
MediaType() (types.MediaType, error) |
||||
} |
@ -0,0 +1,68 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// Manifest represents the OCI image manifest in a structured way.
|
||||
type Manifest struct { |
||||
SchemaVersion int64 `json:"schemaVersion"` |
||||
MediaType types.MediaType `json:"mediaType,omitempty"` |
||||
Config Descriptor `json:"config"` |
||||
Layers []Descriptor `json:"layers"` |
||||
Annotations map[string]string `json:"annotations,omitempty"` |
||||
} |
||||
|
||||
// IndexManifest represents an OCI image index in a structured way.
|
||||
type IndexManifest struct { |
||||
SchemaVersion int64 `json:"schemaVersion"` |
||||
MediaType types.MediaType `json:"mediaType,omitempty"` |
||||
Manifests []Descriptor `json:"manifests"` |
||||
Annotations map[string]string `json:"annotations,omitempty"` |
||||
} |
||||
|
||||
// Descriptor holds a reference from the manifest to one of its constituent elements.
|
||||
type Descriptor struct { |
||||
MediaType types.MediaType `json:"mediaType"` |
||||
Size int64 `json:"size"` |
||||
Digest Hash `json:"digest"` |
||||
Data []byte `json:"data,omitempty"` |
||||
URLs []string `json:"urls,omitempty"` |
||||
Annotations map[string]string `json:"annotations,omitempty"` |
||||
Platform *Platform `json:"platform,omitempty"` |
||||
} |
||||
|
||||
// ParseManifest parses the io.Reader's contents into a Manifest.
|
||||
func ParseManifest(r io.Reader) (*Manifest, error) { |
||||
m := Manifest{} |
||||
if err := json.NewDecoder(r).Decode(&m); err != nil { |
||||
return nil, err |
||||
} |
||||
return &m, nil |
||||
} |
||||
|
||||
// ParseIndexManifest parses the io.Reader's contents into an IndexManifest.
|
||||
func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { |
||||
im := IndexManifest{} |
||||
if err := json.NewDecoder(r).Decode(&im); err != nil { |
||||
return nil, err |
||||
} |
||||
return &im, nil |
||||
} |
@ -0,0 +1,92 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package match provides functionality for conveniently matching a v1.Descriptor.
|
||||
package match |
||||
|
||||
import ( |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1" |
||||
) |
||||
|
||||
// Matcher function that is given a v1.Descriptor, and returns whether or
|
||||
// not it matches a given rule. Can match on anything it wants in the Descriptor.
|
||||
type Matcher func(desc v1.Descriptor) bool |
||||
|
||||
// Name returns a match.Matcher that matches based on the value of the
|
||||
//
|
||||
// "org.opencontainers.image.ref.name" annotation:
|
||||
//
|
||||
// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys
|
||||
func Name(name string) Matcher { |
||||
return Annotation(imagespec.AnnotationRefName, name) |
||||
} |
||||
|
||||
// Annotation returns a match.Matcher that matches based on the provided annotation.
|
||||
func Annotation(key, value string) Matcher { |
||||
return func(desc v1.Descriptor) bool { |
||||
if desc.Annotations == nil { |
||||
return false |
||||
} |
||||
if aValue, ok := desc.Annotations[key]; ok && aValue == value { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
} |
||||
|
||||
// Platforms returns a match.Matcher that matches on any one of the provided platforms.
|
||||
// Ignores any descriptors that do not have a platform.
|
||||
func Platforms(platforms ...v1.Platform) Matcher { |
||||
return func(desc v1.Descriptor) bool { |
||||
if desc.Platform == nil { |
||||
return false |
||||
} |
||||
for _, platform := range platforms { |
||||
if desc.Platform.Equals(platform) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
} |
||||
|
||||
// MediaTypes returns a match.Matcher that matches at least one of the provided media types.
|
||||
func MediaTypes(mediaTypes ...string) Matcher { |
||||
mts := map[string]bool{} |
||||
for _, media := range mediaTypes { |
||||
mts[media] = true |
||||
} |
||||
return func(desc v1.Descriptor) bool { |
||||
if desc.MediaType == "" { |
||||
return false |
||||
} |
||||
if _, ok := mts[string(desc.MediaType)]; ok { |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
} |
||||
|
||||
// Digests returns a match.Matcher that matches at least one of the provided Digests
|
||||
func Digests(digests ...v1.Hash) Matcher { |
||||
digs := map[v1.Hash]bool{} |
||||
for _, digest := range digests { |
||||
digs[digest] = true |
||||
} |
||||
return func(desc v1.Descriptor) bool { |
||||
_, ok := digs[desc.Digest] |
||||
return ok |
||||
} |
||||
} |
@ -0,0 +1,82 @@ |
||||
# `partial` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) |
||||
|
||||
## Partial Implementations |
||||
|
||||
There are roughly two kinds of image representations: compressed and uncompressed. |
||||
|
||||
The implementations for these kinds of images are almost identical, with the only |
||||
major difference being how blobs (config and layers) are fetched. This common |
||||
code lives in this package, where you provide a _partial_ implementation of a |
||||
compressed or uncompressed image, and you get back a full `v1.Image` implementation. |
||||
|
||||
### Examples |
||||
|
||||
In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms |
||||
of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: |
||||
|
||||
```go |
||||
type CompressedImageCore interface { |
||||
RawConfigFile() ([]byte, error) |
||||
MediaType() (types.MediaType, error) |
||||
RawManifest() ([]byte, error) |
||||
LayerByDigest(v1.Hash) (CompressedLayer, error) |
||||
} |
||||
``` |
||||
|
||||
In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms |
||||
of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: |
||||
|
||||
```go |
||||
type UncompressedImageCore interface { |
||||
RawConfigFile() ([]byte, error) |
||||
MediaType() (types.MediaType, error) |
||||
LayerByDiffID(v1.Hash) (UncompressedLayer, error) |
||||
} |
||||
``` |
||||
|
||||
## Optional Methods |
||||
|
||||
Where possible, we access some information via optional methods as an optimization. |
||||
|
||||
### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) |
||||
|
||||
There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: |
||||
|
||||
* `MediaType` |
||||
* `Platform` |
||||
* `URLs` |
||||
* `Annotations` |
||||
|
||||
For example, in a `tarball.Image`, there is a `LayerSources` field that contains |
||||
an entire layer descriptor with `URLs` information for foreign layers. This |
||||
information can be passed through to callers by implementing this optional |
||||
`Descriptor` method. |
||||
|
||||
See [`#654`](https://github.com/google/go-containerregistry/pull/654). |
||||
|
||||
### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) |
||||
|
||||
Usually, you don't need to know the uncompressed size of a layer, since that |
||||
information isn't stored in a config file (just he sha256 is needed); however, |
||||
there are cases where it is very helpful to know the layer size, e.g. when |
||||
writing the uncompressed layer into a tarball. |
||||
|
||||
See [`#655`](https://github.com/google/go-containerregistry/pull/655). |
||||
|
||||
### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists) |
||||
|
||||
We generally don't care about the existence of something as granular as a |
||||
layer, and would rather ensure all the invariants of an image are upheld via |
||||
the `validate` package. However, there are situations where we want to do a |
||||
quick smoke test to ensure that the underlying storage engine hasn't been |
||||
corrupted by something e.g. deleting files or blobs. Thus, we've exposed an |
||||
optional `Exists` method that does an existence check without actually reading |
||||
any bytes. |
||||
|
||||
The `remote` package implements this via `HEAD` requests. |
||||
|
||||
The `layout` package implements this via `os.Stat`. |
||||
|
||||
See [`#838`](https://github.com/google/go-containerregistry/pull/838). |
@ -0,0 +1,188 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial |
||||
|
||||
import ( |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/and" |
||||
"github.com/google/go-containerregistry/internal/compression" |
||||
"github.com/google/go-containerregistry/internal/gzip" |
||||
"github.com/google/go-containerregistry/internal/zstd" |
||||
comp "github.com/google/go-containerregistry/pkg/compression" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// CompressedLayer represents the bare minimum interface a natively
|
||||
// compressed layer must implement for us to produce a v1.Layer
|
||||
type CompressedLayer interface { |
||||
// Digest returns the Hash of the compressed layer.
|
||||
Digest() (v1.Hash, error) |
||||
|
||||
// Compressed returns an io.ReadCloser for the compressed layer contents.
|
||||
Compressed() (io.ReadCloser, error) |
||||
|
||||
// Size returns the compressed size of the Layer.
|
||||
Size() (int64, error) |
||||
|
||||
// Returns the mediaType for the compressed Layer
|
||||
MediaType() (types.MediaType, error) |
||||
} |
||||
|
||||
// compressedLayerExtender implements v1.Image using the compressed base properties.
|
||||
type compressedLayerExtender struct { |
||||
CompressedLayer |
||||
} |
||||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { |
||||
rc, err := cle.Compressed() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Often, the "compressed" bytes are not actually-compressed.
|
||||
// Peek at the first two bytes to determine whether it's correct to
|
||||
// wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser.
|
||||
cp, pr, err := compression.PeekCompression(rc) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
prc := &and.ReadCloser{ |
||||
Reader: pr, |
||||
CloseFunc: rc.Close, |
||||
} |
||||
|
||||
switch cp { |
||||
case comp.GZip: |
||||
return gzip.UnzipReadCloser(prc) |
||||
case comp.ZStd: |
||||
return zstd.UnzipReadCloser(prc) |
||||
default: |
||||
return prc, nil |
||||
} |
||||
} |
||||
|
||||
// DiffID implements v1.Layer
|
||||
func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { |
||||
// If our nested CompressedLayer implements DiffID,
|
||||
// then delegate to it instead.
|
||||
if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { |
||||
return wdi.DiffID() |
||||
} |
||||
r, err := cle.Uncompressed() |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
defer r.Close() |
||||
h, _, err := v1.SHA256(r) |
||||
return h, err |
||||
} |
||||
|
||||
// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer
|
||||
func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { |
||||
return &compressedLayerExtender{ul}, nil |
||||
} |
||||
|
||||
// CompressedImageCore represents the base minimum interface a natively
|
||||
// compressed image must implement for us to produce a v1.Image.
|
||||
type CompressedImageCore interface { |
||||
ImageCore |
||||
|
||||
// RawManifest returns the serialized bytes of the manifest.
|
||||
RawManifest() ([]byte, error) |
||||
|
||||
// LayerByDigest is a variation on the v1.Image method, which returns
|
||||
// a CompressedLayer instead.
|
||||
LayerByDigest(v1.Hash) (CompressedLayer, error) |
||||
} |
||||
|
||||
// compressedImageExtender implements v1.Image by extending CompressedImageCore with the
|
||||
// appropriate methods computed from the minimal core.
|
||||
type compressedImageExtender struct { |
||||
CompressedImageCore |
||||
} |
||||
|
||||
// Assert that our extender type completes the v1.Image interface
|
||||
var _ v1.Image = (*compressedImageExtender)(nil) |
||||
|
||||
// Digest implements v1.Image
|
||||
func (i *compressedImageExtender) Digest() (v1.Hash, error) { |
||||
return Digest(i) |
||||
} |
||||
|
||||
// ConfigName implements v1.Image
|
||||
func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { |
||||
return ConfigName(i) |
||||
} |
||||
|
||||
// Layers implements v1.Image
|
||||
func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { |
||||
hs, err := FSLayers(i) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ls := make([]v1.Layer, 0, len(hs)) |
||||
for _, h := range hs { |
||||
l, err := i.LayerByDigest(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ls = append(ls, l) |
||||
} |
||||
return ls, nil |
||||
} |
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { |
||||
cl, err := i.CompressedImageCore.LayerByDigest(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return CompressedToLayer(cl) |
||||
} |
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { |
||||
h, err := DiffIDToBlob(i, h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return i.LayerByDigest(h) |
||||
} |
||||
|
||||
// ConfigFile implements v1.Image
|
||||
func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { |
||||
return ConfigFile(i) |
||||
} |
||||
|
||||
// Manifest implements v1.Image
|
||||
func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { |
||||
return Manifest(i) |
||||
} |
||||
|
||||
// Size implements v1.Image
|
||||
func (i *compressedImageExtender) Size() (int64, error) { |
||||
return Size(i) |
||||
} |
||||
|
||||
// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image
|
||||
func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { |
||||
return &compressedImageExtender{ |
||||
CompressedImageCore: cic, |
||||
}, nil |
||||
} |
@ -0,0 +1,17 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package partial defines methods for building up a v1.Image from
|
||||
// minimal subsets that are sufficient for defining a v1.Image.
|
||||
package partial |
@ -0,0 +1,28 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial |
||||
|
||||
import ( |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// ImageCore is the core set of properties without which we cannot build a v1.Image
|
||||
type ImageCore interface { |
||||
// RawConfigFile returns the serialized bytes of this image's config file.
|
||||
RawConfigFile() ([]byte, error) |
||||
|
||||
// MediaType of this image's manifest.
|
||||
MediaType() (types.MediaType, error) |
||||
} |
@ -0,0 +1,85 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/match" |
||||
) |
||||
|
||||
// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher.
|
||||
func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { |
||||
// get the actual manifest list
|
||||
indexManifest, err := index.IndexManifest() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("unable to get raw index: %w", err) |
||||
} |
||||
manifests := []v1.Descriptor{} |
||||
// try to get the root of our image
|
||||
for _, manifest := range indexManifest.Manifests { |
||||
if matcher(manifest) { |
||||
manifests = append(manifests, manifest) |
||||
} |
||||
} |
||||
return manifests, nil |
||||
} |
||||
|
||||
// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor
|
||||
// matches the provider Matcher, but the referenced item is not an Image, ignores it.
|
||||
// Only returns those that match the Matcher and are images.
|
||||
func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { |
||||
matches := []v1.Image{} |
||||
manifests, err := FindManifests(index, matcher) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, desc := range manifests { |
||||
// if it is not an image, ignore it
|
||||
if !desc.MediaType.IsImage() { |
||||
continue |
||||
} |
||||
img, err := index.Image(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
matches = append(matches, img) |
||||
} |
||||
return matches, nil |
||||
} |
||||
|
||||
// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor
|
||||
// matches the provider Matcher, but the referenced item is not an Index, ignores it.
|
||||
// Only returns those that match the Matcher and are indexes.
|
||||
func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { |
||||
matches := []v1.ImageIndex{} |
||||
manifests, err := FindManifests(index, matcher) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, desc := range manifests { |
||||
if !desc.MediaType.IsIndex() { |
||||
continue |
||||
} |
||||
// if it is not an index, ignore it
|
||||
idx, err := index.ImageIndex(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
matches = append(matches, idx) |
||||
} |
||||
return matches, nil |
||||
} |
@ -0,0 +1,223 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io" |
||||
"sync" |
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// UncompressedLayer represents the bare minimum interface a natively
|
||||
// uncompressed layer must implement for us to produce a v1.Layer
|
||||
type UncompressedLayer interface { |
||||
// DiffID returns the Hash of the uncompressed layer.
|
||||
DiffID() (v1.Hash, error) |
||||
|
||||
// Uncompressed returns an io.ReadCloser for the uncompressed layer contents.
|
||||
Uncompressed() (io.ReadCloser, error) |
||||
|
||||
// Returns the mediaType for the compressed Layer
|
||||
MediaType() (types.MediaType, error) |
||||
} |
||||
|
||||
// uncompressedLayerExtender implements v1.Image using the uncompressed base properties.
|
||||
type uncompressedLayerExtender struct { |
||||
UncompressedLayer |
||||
// Memoize size/hash so that the methods aren't twice as
|
||||
// expensive as doing this manually.
|
||||
hash v1.Hash |
||||
size int64 |
||||
hashSizeError error |
||||
once sync.Once |
||||
} |
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { |
||||
u, err := ule.Uncompressed() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return gzip.ReadCloser(u), nil |
||||
} |
||||
|
||||
// Digest implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { |
||||
ule.calcSizeHash() |
||||
return ule.hash, ule.hashSizeError |
||||
} |
||||
|
||||
// Size implements v1.Layer
|
||||
func (ule *uncompressedLayerExtender) Size() (int64, error) { |
||||
ule.calcSizeHash() |
||||
return ule.size, ule.hashSizeError |
||||
} |
||||
|
||||
func (ule *uncompressedLayerExtender) calcSizeHash() { |
||||
ule.once.Do(func() { |
||||
var r io.ReadCloser |
||||
r, ule.hashSizeError = ule.Compressed() |
||||
if ule.hashSizeError != nil { |
||||
return |
||||
} |
||||
defer r.Close() |
||||
ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) |
||||
}) |
||||
} |
||||
|
||||
// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer
|
||||
func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { |
||||
return &uncompressedLayerExtender{UncompressedLayer: ul}, nil |
||||
} |
||||
|
||||
// UncompressedImageCore represents the bare minimum interface a natively
|
||||
// uncompressed image must implement for us to produce a v1.Image
|
||||
type UncompressedImageCore interface { |
||||
ImageCore |
||||
|
||||
// LayerByDiffID is a variation on the v1.Image method, which returns
|
||||
// an UncompressedLayer instead.
|
||||
LayerByDiffID(v1.Hash) (UncompressedLayer, error) |
||||
} |
||||
|
||||
// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image.
|
||||
func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { |
||||
return &uncompressedImageExtender{ |
||||
UncompressedImageCore: uic, |
||||
}, nil |
||||
} |
||||
|
||||
// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the
|
||||
// appropriate methods computed from the minimal core.
|
||||
type uncompressedImageExtender struct { |
||||
UncompressedImageCore |
||||
|
||||
lock sync.Mutex |
||||
manifest *v1.Manifest |
||||
} |
||||
|
||||
// Assert that our extender type completes the v1.Image interface
|
||||
var _ v1.Image = (*uncompressedImageExtender)(nil) |
||||
|
||||
// Digest implements v1.Image
|
||||
func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { |
||||
return Digest(i) |
||||
} |
||||
|
||||
// Manifest implements v1.Image
|
||||
func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { |
||||
i.lock.Lock() |
||||
defer i.lock.Unlock() |
||||
if i.manifest != nil { |
||||
return i.manifest, nil |
||||
} |
||||
|
||||
b, err := i.RawConfigFile() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m := &v1.Manifest{ |
||||
SchemaVersion: 2, |
||||
MediaType: types.DockerManifestSchema2, |
||||
Config: v1.Descriptor{ |
||||
MediaType: types.DockerConfigJSON, |
||||
Size: cfgSize, |
||||
Digest: cfgHash, |
||||
}, |
||||
} |
||||
|
||||
ls, err := i.Layers() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m.Layers = make([]v1.Descriptor, len(ls)) |
||||
for i, l := range ls { |
||||
desc, err := Descriptor(l) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
m.Layers[i] = *desc |
||||
} |
||||
|
||||
i.manifest = m |
||||
return i.manifest, nil |
||||
} |
||||
|
||||
// RawManifest implements v1.Image
|
||||
func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { |
||||
return RawManifest(i) |
||||
} |
||||
|
||||
// Size implements v1.Image
|
||||
func (i *uncompressedImageExtender) Size() (int64, error) { |
||||
return Size(i) |
||||
} |
||||
|
||||
// ConfigName implements v1.Image
|
||||
func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { |
||||
return ConfigName(i) |
||||
} |
||||
|
||||
// ConfigFile implements v1.Image
|
||||
func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { |
||||
return ConfigFile(i) |
||||
} |
||||
|
||||
// Layers implements v1.Image
|
||||
func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { |
||||
diffIDs, err := DiffIDs(i) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ls := make([]v1.Layer, 0, len(diffIDs)) |
||||
for _, h := range diffIDs { |
||||
l, err := i.LayerByDiffID(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ls = append(ls, l) |
||||
} |
||||
return ls, nil |
||||
} |
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { |
||||
ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return UncompressedToLayer(ul) |
||||
} |
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { |
||||
diffID, err := BlobToDiffID(i, h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return i.LayerByDiffID(diffID) |
||||
} |
@ -0,0 +1,401 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package partial |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// WithRawConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithRawConfigFile interface { |
||||
// RawConfigFile returns the serialized bytes of this image's config file.
|
||||
RawConfigFile() ([]byte, error) |
||||
} |
||||
|
||||
// ConfigFile is a helper for implementing v1.Image
|
||||
func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { |
||||
b, err := i.RawConfigFile() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return v1.ParseConfigFile(bytes.NewReader(b)) |
||||
} |
||||
|
||||
// ConfigName is a helper for implementing v1.Image
|
||||
func ConfigName(i WithRawConfigFile) (v1.Hash, error) { |
||||
b, err := i.RawConfigFile() |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
h, _, err := v1.SHA256(bytes.NewReader(b)) |
||||
return h, err |
||||
} |
||||
|
||||
type configLayer struct { |
||||
hash v1.Hash |
||||
content []byte |
||||
} |
||||
|
||||
// Digest implements v1.Layer
|
||||
func (cl *configLayer) Digest() (v1.Hash, error) { |
||||
return cl.hash, nil |
||||
} |
||||
|
||||
// DiffID implements v1.Layer
|
||||
func (cl *configLayer) DiffID() (v1.Hash, error) { |
||||
return cl.hash, nil |
||||
} |
||||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { |
||||
return io.NopCloser(bytes.NewBuffer(cl.content)), nil |
||||
} |
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (cl *configLayer) Compressed() (io.ReadCloser, error) { |
||||
return io.NopCloser(bytes.NewBuffer(cl.content)), nil |
||||
} |
||||
|
||||
// Size implements v1.Layer
|
||||
func (cl *configLayer) Size() (int64, error) { |
||||
return int64(len(cl.content)), nil |
||||
} |
||||
|
||||
func (cl *configLayer) MediaType() (types.MediaType, error) { |
||||
// Defaulting this to OCIConfigJSON as it should remain
|
||||
// backwards compatible with DockerConfigJSON
|
||||
return types.OCIConfigJSON, nil |
||||
} |
||||
|
||||
var _ v1.Layer = (*configLayer)(nil) |
||||
|
||||
// withConfigLayer allows partial image implementations to provide a layer
|
||||
// for their config file.
|
||||
type withConfigLayer interface { |
||||
ConfigLayer() (v1.Layer, error) |
||||
} |
||||
|
||||
// ConfigLayer implements v1.Layer from the raw config bytes.
|
||||
// This is so that clients (e.g. remote) can access the config as a blob.
|
||||
//
|
||||
// Images that want to return a specific layer implementation can implement
|
||||
// withConfigLayer.
|
||||
func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { |
||||
if wcl, ok := unwrap(i).(withConfigLayer); ok { |
||||
return wcl.ConfigLayer() |
||||
} |
||||
|
||||
h, err := ConfigName(i) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
rcfg, err := i.RawConfigFile() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &configLayer{ |
||||
hash: h, |
||||
content: rcfg, |
||||
}, nil |
||||
} |
||||
|
||||
// WithConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithConfigFile interface { |
||||
// ConfigFile returns this image's config file.
|
||||
ConfigFile() (*v1.ConfigFile, error) |
||||
} |
||||
|
||||
// DiffIDs is a helper for implementing v1.Image
|
||||
func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { |
||||
cfg, err := i.ConfigFile() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return cfg.RootFS.DiffIDs, nil |
||||
} |
||||
|
||||
// RawConfigFile is a helper for implementing v1.Image
|
||||
func RawConfigFile(i WithConfigFile) ([]byte, error) { |
||||
cfg, err := i.ConfigFile() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return json.Marshal(cfg) |
||||
} |
||||
|
||||
// WithRawManifest defines the subset of v1.Image used by these helper methods
|
||||
type WithRawManifest interface { |
||||
// RawManifest returns the serialized bytes of this image's config file.
|
||||
RawManifest() ([]byte, error) |
||||
} |
||||
|
||||
// Digest is a helper for implementing v1.Image
|
||||
func Digest(i WithRawManifest) (v1.Hash, error) { |
||||
mb, err := i.RawManifest() |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
digest, _, err := v1.SHA256(bytes.NewReader(mb)) |
||||
return digest, err |
||||
} |
||||
|
||||
// Manifest is a helper for implementing v1.Image
|
||||
func Manifest(i WithRawManifest) (*v1.Manifest, error) { |
||||
b, err := i.RawManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return v1.ParseManifest(bytes.NewReader(b)) |
||||
} |
||||
|
||||
// WithManifest defines the subset of v1.Image used by these helper methods
|
||||
type WithManifest interface { |
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*v1.Manifest, error) |
||||
} |
||||
|
||||
// RawManifest is a helper for implementing v1.Image
|
||||
func RawManifest(i WithManifest) ([]byte, error) { |
||||
m, err := i.Manifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return json.Marshal(m) |
||||
} |
||||
|
||||
// Size is a helper for implementing v1.Image
|
||||
func Size(i WithRawManifest) (int64, error) { |
||||
b, err := i.RawManifest() |
||||
if err != nil { |
||||
return -1, err |
||||
} |
||||
return int64(len(b)), nil |
||||
} |
||||
|
||||
// FSLayers is a helper for implementing v1.Image
|
||||
func FSLayers(i WithManifest) ([]v1.Hash, error) { |
||||
m, err := i.Manifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
fsl := make([]v1.Hash, len(m.Layers)) |
||||
for i, l := range m.Layers { |
||||
fsl[i] = l.Digest |
||||
} |
||||
return fsl, nil |
||||
} |
||||
|
||||
// BlobSize is a helper for implementing v1.Image
|
||||
func BlobSize(i WithManifest, h v1.Hash) (int64, error) { |
||||
d, err := BlobDescriptor(i, h) |
||||
if err != nil { |
||||
return -1, err |
||||
} |
||||
return d.Size, nil |
||||
} |
||||
|
||||
// BlobDescriptor is a helper for implementing v1.Image
|
||||
func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { |
||||
m, err := i.Manifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if m.Config.Digest == h { |
||||
return &m.Config, nil |
||||
} |
||||
|
||||
for _, l := range m.Layers { |
||||
if l.Digest == h { |
||||
return &l, nil |
||||
} |
||||
} |
||||
return nil, fmt.Errorf("blob %v not found", h) |
||||
} |
||||
|
||||
// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods
|
||||
type WithManifestAndConfigFile interface { |
||||
WithConfigFile |
||||
|
||||
// Manifest returns this image's Manifest object.
|
||||
Manifest() (*v1.Manifest, error) |
||||
} |
||||
|
||||
// BlobToDiffID is a helper for mapping between compressed
|
||||
// and uncompressed blob hashes.
|
||||
func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { |
||||
blobs, err := FSLayers(i) |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
diffIDs, err := DiffIDs(i) |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
if len(blobs) != len(diffIDs) { |
||||
return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) |
||||
} |
||||
for i, blob := range blobs { |
||||
if blob == h { |
||||
return diffIDs[i], nil |
||||
} |
||||
} |
||||
return v1.Hash{}, fmt.Errorf("unknown blob %v", h) |
||||
} |
||||
|
||||
// DiffIDToBlob is a helper for mapping between uncompressed
|
||||
// and compressed blob hashes.
|
||||
func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { |
||||
blobs, err := FSLayers(wm) |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
diffIDs, err := DiffIDs(wm) |
||||
if err != nil { |
||||
return v1.Hash{}, err |
||||
} |
||||
if len(blobs) != len(diffIDs) { |
||||
return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) |
||||
} |
||||
for i, diffID := range diffIDs { |
||||
if diffID == h { |
||||
return blobs[i], nil |
||||
} |
||||
} |
||||
return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) |
||||
} |
||||
|
||||
// WithDiffID defines the subset of v1.Layer for exposing the DiffID method.
|
||||
type WithDiffID interface { |
||||
DiffID() (v1.Hash, error) |
||||
} |
||||
|
||||
// withDescriptor allows partial layer implementations to provide a layer
|
||||
// descriptor to the partial image manifest builder. This allows partial
|
||||
// uncompressed layers to provide foreign layer metadata like URLs to the
|
||||
// uncompressed image manifest.
|
||||
type withDescriptor interface { |
||||
Descriptor() (*v1.Descriptor, error) |
||||
} |
||||
|
||||
// Describable represents something for which we can produce a v1.Descriptor.
|
||||
type Describable interface { |
||||
Digest() (v1.Hash, error) |
||||
MediaType() (types.MediaType, error) |
||||
Size() (int64, error) |
||||
} |
||||
|
||||
// Descriptor returns a v1.Descriptor given a Describable. It also encodes
|
||||
// some logic for unwrapping things that have been wrapped by
|
||||
// CompressedToLayer, UncompressedToLayer, CompressedToImage, or
|
||||
// UncompressedToImage.
|
||||
func Descriptor(d Describable) (*v1.Descriptor, error) { |
||||
// If Describable implements Descriptor itself, return that.
|
||||
if wd, ok := unwrap(d).(withDescriptor); ok { |
||||
return wd.Descriptor() |
||||
} |
||||
|
||||
// If all else fails, compute the descriptor from the individual methods.
|
||||
var ( |
||||
desc v1.Descriptor |
||||
err error |
||||
) |
||||
|
||||
if desc.Size, err = d.Size(); err != nil { |
||||
return nil, err |
||||
} |
||||
if desc.Digest, err = d.Digest(); err != nil { |
||||
return nil, err |
||||
} |
||||
if desc.MediaType, err = d.MediaType(); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &desc, nil |
||||
} |
||||
|
||||
type withUncompressedSize interface { |
||||
UncompressedSize() (int64, error) |
||||
} |
||||
|
||||
// UncompressedSize returns the size of the Uncompressed layer. If the
|
||||
// underlying implementation doesn't implement UncompressedSize directly,
|
||||
// this will compute the uncompressedSize by reading everything returned
|
||||
// by Compressed(). This is potentially expensive and may consume the contents
|
||||
// for streaming layers.
|
||||
func UncompressedSize(l v1.Layer) (int64, error) { |
||||
// If the layer implements UncompressedSize itself, return that.
|
||||
if wus, ok := unwrap(l).(withUncompressedSize); ok { |
||||
return wus.UncompressedSize() |
||||
} |
||||
|
||||
// The layer doesn't implement UncompressedSize, we need to compute it.
|
||||
rc, err := l.Uncompressed() |
||||
if err != nil { |
||||
return -1, err |
||||
} |
||||
defer rc.Close() |
||||
|
||||
return io.Copy(io.Discard, rc) |
||||
} |
||||
|
||||
type withExists interface { |
||||
Exists() (bool, error) |
||||
} |
||||
|
||||
// Exists checks to see if a layer exists. This is a hack to work around the
|
||||
// mistakes of the partial package. Don't use this.
|
||||
func Exists(l v1.Layer) (bool, error) { |
||||
// If the layer implements Exists itself, return that.
|
||||
if we, ok := unwrap(l).(withExists); ok { |
||||
return we.Exists() |
||||
} |
||||
|
||||
// The layer doesn't implement Exists, so we hope that calling Compressed()
|
||||
// is enough to trigger an error if the layer does not exist.
|
||||
rc, err := l.Compressed() |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
defer rc.Close() |
||||
|
||||
// We may want to try actually reading a single byte, but if we need to do
|
||||
// that, we should just fix this hack.
|
||||
return true, nil |
||||
} |
||||
|
||||
// Recursively unwrap our wrappers so that we can check for the original implementation.
|
||||
// We might want to expose this?
|
||||
func unwrap(i any) any { |
||||
if ule, ok := i.(*uncompressedLayerExtender); ok { |
||||
return unwrap(ule.UncompressedLayer) |
||||
} |
||||
if cle, ok := i.(*compressedLayerExtender); ok { |
||||
return unwrap(cle.CompressedLayer) |
||||
} |
||||
if uie, ok := i.(*uncompressedImageExtender); ok { |
||||
return unwrap(uie.UncompressedImageCore) |
||||
} |
||||
if cie, ok := i.(*compressedImageExtender); ok { |
||||
return unwrap(cie.CompressedImageCore) |
||||
} |
||||
return i |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sort" |
||||
"strings" |
||||
) |
||||
|
||||
// Platform represents the target os/arch for an image.
|
||||
type Platform struct { |
||||
Architecture string `json:"architecture"` |
||||
OS string `json:"os"` |
||||
OSVersion string `json:"os.version,omitempty"` |
||||
OSFeatures []string `json:"os.features,omitempty"` |
||||
Variant string `json:"variant,omitempty"` |
||||
Features []string `json:"features,omitempty"` |
||||
} |
||||
|
||||
func (p Platform) String() string { |
||||
if p.OS == "" { |
||||
return "" |
||||
} |
||||
var b strings.Builder |
||||
b.WriteString(p.OS) |
||||
if p.Architecture != "" { |
||||
b.WriteString("/") |
||||
b.WriteString(p.Architecture) |
||||
} |
||||
if p.Variant != "" { |
||||
b.WriteString("/") |
||||
b.WriteString(p.Variant) |
||||
} |
||||
if p.OSVersion != "" { |
||||
b.WriteString(":") |
||||
b.WriteString(p.OSVersion) |
||||
} |
||||
return b.String() |
||||
} |
||||
|
||||
// ParsePlatform parses a string representing a Platform, if possible.
|
||||
func ParsePlatform(s string) (*Platform, error) { |
||||
var p Platform |
||||
parts := strings.Split(strings.TrimSpace(s), ":") |
||||
if len(parts) == 2 { |
||||
p.OSVersion = parts[1] |
||||
} |
||||
parts = strings.Split(parts[0], "/") |
||||
if len(parts) > 0 { |
||||
p.OS = parts[0] |
||||
} |
||||
if len(parts) > 1 { |
||||
p.Architecture = parts[1] |
||||
} |
||||
if len(parts) > 2 { |
||||
p.Variant = parts[2] |
||||
} |
||||
if len(parts) > 3 { |
||||
return nil, fmt.Errorf("too many slashes in platform spec: %s", s) |
||||
} |
||||
return &p, nil |
||||
} |
||||
|
||||
// Equals returns true if the given platform is semantically equivalent to this one.
|
||||
// The order of Features and OSFeatures is not important.
|
||||
func (p Platform) Equals(o Platform) bool { |
||||
return p.OS == o.OS && |
||||
p.Architecture == o.Architecture && |
||||
p.Variant == o.Variant && |
||||
p.OSVersion == o.OSVersion && |
||||
stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && |
||||
stringSliceEqualIgnoreOrder(p.Features, o.Features) |
||||
} |
||||
|
||||
// stringSliceEqual compares 2 string slices and returns if their contents are identical.
|
||||
func stringSliceEqual(a, b []string) bool { |
||||
if len(a) != len(b) { |
||||
return false |
||||
} |
||||
for i, elm := range a { |
||||
if elm != b[i] { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order
|
||||
func stringSliceEqualIgnoreOrder(a, b []string) bool { |
||||
if a != nil && b != nil { |
||||
sort.Strings(a) |
||||
sort.Strings(b) |
||||
} |
||||
return stringSliceEqual(a, b) |
||||
} |
@ -0,0 +1,25 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1 |
||||
|
||||
// Update representation of an update of transfer progress. Some functions
|
||||
// in this module can take a channel to which updates will be sent while a
|
||||
// transfer is in progress.
|
||||
// +k8s:deepcopy-gen=false
|
||||
type Update struct { |
||||
Total int64 |
||||
Complete int64 |
||||
Error error |
||||
} |
@ -0,0 +1,117 @@ |
||||
# `remote` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) |
||||
|
||||
The `remote` package implements a client for accessing a registry, |
||||
per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). |
||||
|
||||
It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the |
||||
authentication handshake and structured errors. |
||||
|
||||
## Usage |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||
) |
||||
|
||||
func main() { |
||||
ref, err := name.ParseReference("gcr.io/google-containers/pause") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// do stuff with img |
||||
} |
||||
``` |
||||
|
||||
## Structure |
||||
|
||||
<p align="center"> |
||||
<img src="/images/remote.dot.svg" /> |
||||
</p> |
||||
|
||||
|
||||
## Background |
||||
|
||||
There are a lot of confusingly similar terms that come up when talking about images in registries. |
||||
|
||||
### Anatomy of an image |
||||
|
||||
In general... |
||||
|
||||
* A tag refers to an image manifest. |
||||
* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. |
||||
* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. |
||||
* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. |
||||
|
||||
For example, an image with two layers would look something like this: |
||||
|
||||
![image anatomy](/images/image-anatomy.dot.svg) |
||||
|
||||
### Anatomy of an index |
||||
|
||||
In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. |
||||
This was the original use case for a [manifest |
||||
list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). |
||||
|
||||
![image index anatomy](/images/index-anatomy.dot.svg) |
||||
|
||||
It is possible for an index to reference another index, per the OCI |
||||
[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). |
||||
In theory, both an image and image index can reference arbitrary things via |
||||
[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), |
||||
e.g. see the [image layout |
||||
example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), |
||||
which references an application/xml file from an image index. |
||||
|
||||
That could look something like this: |
||||
|
||||
![strange image index anatomy](/images/index-anatomy-strange.dot.svg) |
||||
|
||||
Using a recursive index like this might not be possible with all registries, |
||||
but this flexibility allows for some interesting applications, e.g. the |
||||
[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. |
||||
|
||||
### Anatomy of an image upload |
||||
|
||||
The structure of an image requires a delicate ordering when uploading an image to a registry. |
||||
Below is a (slightly simplified) figure that describes how an image is prepared for upload |
||||
to a registry and how the data flows between various artifacts: |
||||
|
||||
![upload](/images/upload.dot.svg) |
||||
|
||||
Note that: |
||||
|
||||
* A config file references the uncompressed layer contents by sha256. |
||||
* A manifest references the compressed layer contents by sha256 and the size of the layer. |
||||
* A manifest references the config file contents by sha256 and the size of the file. |
||||
|
||||
It follows that during an upload, we need to upload layers before the config file, |
||||
and we need to upload the config file before the manifest. |
||||
|
||||
Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), |
||||
so the ordering is less important. |
||||
|
||||
In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), |
||||
we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. |
||||
|
||||
## Caveats |
||||
|
||||
### schema 1 |
||||
|
||||
This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), |
||||
however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), |
||||
which doesn't try to interpret what is returned by the registry. |
||||
|
||||
[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, |
||||
see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). |
@ -0,0 +1,154 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"net/http" |
||||
"net/url" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
type catalog struct { |
||||
Repos []string `json:"repositories"` |
||||
} |
||||
|
||||
// CatalogPage calls /_catalog, returning the list of repositories on the registry.
|
||||
func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { |
||||
o, err := makeOptions(target, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)} |
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n) |
||||
|
||||
uri := url.URL{ |
||||
Scheme: target.Scheme(), |
||||
Host: target.RegistryStr(), |
||||
Path: "/v2/_catalog", |
||||
RawQuery: query, |
||||
} |
||||
|
||||
client := http.Client{Transport: tr} |
||||
req, err := http.NewRequest(http.MethodGet, uri.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Do(req.WithContext(o.context)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var parsed catalog |
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return parsed.Repos, nil |
||||
} |
||||
|
||||
// Catalog calls /_catalog, returning the list of repositories on the registry.
|
||||
func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { |
||||
o, err := makeOptions(target, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)} |
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
uri := &url.URL{ |
||||
Scheme: target.Scheme(), |
||||
Host: target.RegistryStr(), |
||||
Path: "/v2/_catalog", |
||||
} |
||||
|
||||
if o.pageSize > 0 { |
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) |
||||
} |
||||
|
||||
client := http.Client{Transport: tr} |
||||
|
||||
// WithContext overrides the ctx passed directly.
|
||||
if o.context != context.Background() { |
||||
ctx = o.context |
||||
} |
||||
|
||||
var ( |
||||
parsed catalog |
||||
repoList []string |
||||
) |
||||
|
||||
// get responses until there is no next page
|
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return nil, ctx.Err() |
||||
default: |
||||
} |
||||
|
||||
req, err := http.NewRequest("GET", uri.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
req = req.WithContext(ctx) |
||||
|
||||
resp, err := client.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { |
||||
return nil, err |
||||
} |
||||
if err := resp.Body.Close(); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
repoList = append(repoList, parsed.Repos...) |
||||
|
||||
uri, err = getNextPageURL(resp) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// no next page
|
||||
if uri == nil { |
||||
break |
||||
} |
||||
} |
||||
return repoList, nil |
||||
} |
@ -0,0 +1,72 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
// CheckPushPermission returns an error if the given keychain cannot authorize
|
||||
// a push operation to the given ref.
|
||||
//
|
||||
// This can be useful to check whether the caller has permission to push an
|
||||
// image before doing work to construct the image.
|
||||
//
|
||||
// TODO(#412): Remove the need for this method.
|
||||
func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { |
||||
auth, err := kc.Resolve(ref.Context().Registry) |
||||
if err != nil { |
||||
return fmt.Errorf("resolving authorization for %v failed: %w", ref.Context().Registry, err) |
||||
} |
||||
|
||||
scopes := []string{ref.Scope(transport.PushScope)} |
||||
tr, err := transport.NewWithContext(context.TODO(), ref.Context().Registry, auth, t, scopes) |
||||
if err != nil { |
||||
return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err) |
||||
} |
||||
// TODO(jasonhall): Against GCR, just doing the token handshake is
|
||||
// enough, but this doesn't extend to Dockerhub
|
||||
// (https://github.com/docker/hub-feedback/issues/1771), so we actually
|
||||
// need to initiate an upload to tell whether the credentials can
|
||||
// authorize a push. Figure out how to return early here when we can,
|
||||
// to avoid a roundtrip for spec-compliant registries.
|
||||
w := writer{ |
||||
repo: ref.Context(), |
||||
client: &http.Client{Transport: tr}, |
||||
} |
||||
loc, _, err := w.initiateUpload(context.Background(), "", "", "") |
||||
if loc != "" { |
||||
// Since we're only initiating the upload to check whether we
|
||||
// can, we should attempt to cancel it, in case initiating
|
||||
// reserves some resources on the server. We shouldn't wait for
|
||||
// cancelling to complete, and we don't care if it fails.
|
||||
go w.cancelUpload(loc) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func (w *writer) cancelUpload(loc string) { |
||||
req, err := http.NewRequest(http.MethodDelete, loc, nil) |
||||
if err != nil { |
||||
return |
||||
} |
||||
_, _ = w.client.Do(req) |
||||
} |
@ -0,0 +1,57 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"net/url" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
// Delete removes the specified image reference from the remote registry.
|
||||
func Delete(ref name.Reference, options ...Option) error { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
scopes := []string{ref.Scope(transport.DeleteScope)} |
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
c := &http.Client{Transport: tr} |
||||
|
||||
u := url.URL{ |
||||
Scheme: ref.Context().Registry.Scheme(), |
||||
Host: ref.Context().RegistryStr(), |
||||
Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), |
||||
} |
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, u.String(), nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
resp, err := c.Do(req.WithContext(o.context)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) |
||||
} |
@ -0,0 +1,455 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/internal/verify" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// ErrSchema1 indicates that we received a schema1 manifest from the registry.
|
||||
// This library doesn't have plans to support this legacy image format:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
type ErrSchema1 struct { |
||||
schema string |
||||
} |
||||
|
||||
// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType.
|
||||
func newErrSchema1(schema types.MediaType) error { |
||||
return &ErrSchema1{ |
||||
schema: string(schema), |
||||
} |
||||
} |
||||
|
||||
// Error implements error.
|
||||
func (e *ErrSchema1) Error() string { |
||||
return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema) |
||||
} |
||||
|
||||
// Descriptor provides access to metadata about remote artifact and accessors
|
||||
// for efficiently converting it into a v1.Image or v1.ImageIndex.
|
||||
type Descriptor struct { |
||||
fetcher |
||||
v1.Descriptor |
||||
Manifest []byte |
||||
|
||||
// So we can share this implementation with Image..
|
||||
platform v1.Platform |
||||
} |
||||
|
||||
// RawManifest exists to satisfy the Taggable interface.
|
||||
func (d *Descriptor) RawManifest() ([]byte, error) { |
||||
return d.Manifest, nil |
||||
} |
||||
|
||||
// Get returns a remote.Descriptor for the given reference. The response from
|
||||
// the registry is left un-interpreted, for the most part. This is useful for
|
||||
// querying what kind of artifact a reference represents.
|
||||
//
|
||||
// See Head if you don't need the response body.
|
||||
func Get(ref name.Reference, options ...Option) (*Descriptor, error) { |
||||
acceptable := []types.MediaType{ |
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1, |
||||
types.DockerManifestSchema1Signed, |
||||
} |
||||
acceptable = append(acceptable, acceptableImageMediaTypes...) |
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...) |
||||
return get(ref, acceptable, options...) |
||||
} |
||||
|
||||
// Head returns a v1.Descriptor for the given reference by issuing a HEAD
|
||||
// request.
|
||||
//
|
||||
// Note that the server response will not have a body, so any errors encountered
|
||||
// should be retried with Get to get more details.
|
||||
func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { |
||||
acceptable := []types.MediaType{ |
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1, |
||||
types.DockerManifestSchema1Signed, |
||||
} |
||||
acceptable = append(acceptable, acceptableImageMediaTypes...) |
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...) |
||||
|
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
f, err := makeFetcher(ref, o) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return f.headManifest(ref, acceptable) |
||||
} |
||||
|
||||
// Handle options and fetch the manifest with the acceptable MediaTypes in the
|
||||
// Accept header.
|
||||
func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f, err := makeFetcher(ref, o) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
b, desc, err := f.fetchManifest(ref, acceptable) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &Descriptor{ |
||||
fetcher: *f, |
||||
Manifest: b, |
||||
Descriptor: *desc, |
||||
platform: o.platform, |
||||
}, nil |
||||
} |
||||
|
||||
// Image converts the Descriptor into a v1.Image.
|
||||
//
|
||||
// If the fetched artifact is already an image, it will just return it.
|
||||
//
|
||||
// If the fetched artifact is an index, it will attempt to resolve the index to
|
||||
// a child image with the appropriate platform.
|
||||
//
|
||||
// See WithPlatform to set the desired platform.
|
||||
func (d *Descriptor) Image() (v1.Image, error) { |
||||
switch d.MediaType { |
||||
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: |
||||
// We don't care to support schema 1 images:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
return nil, newErrSchema1(d.MediaType) |
||||
case types.OCIImageIndex, types.DockerManifestList: |
||||
// We want an image but the registry has an index, resolve it to an image.
|
||||
return d.remoteIndex().imageByPlatform(d.platform) |
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||
// These are expected. Enumerated here to allow a default case.
|
||||
default: |
||||
// We could just return an error here, but some registries (e.g. static
|
||||
// registries) don't set the Content-Type headers correctly, so instead...
|
||||
logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) |
||||
} |
||||
|
||||
// Wrap the v1.Layers returned by this v1.Image in a hint for downstream
|
||||
// remote.Write calls to facilitate cross-repo "mounting".
|
||||
imgCore, err := partial.CompressedToImage(d.remoteImage()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &mountableImage{ |
||||
Image: imgCore, |
||||
Reference: d.Ref, |
||||
}, nil |
||||
} |
||||
|
||||
// ImageIndex converts the Descriptor into a v1.ImageIndex.
|
||||
func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { |
||||
switch d.MediaType { |
||||
case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: |
||||
// We don't care to support schema 1 images:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
return nil, newErrSchema1(d.MediaType) |
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||
// We want an index but the registry has an image, nothing we can do.
|
||||
return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) |
||||
case types.OCIImageIndex, types.DockerManifestList: |
||||
// These are expected.
|
||||
default: |
||||
// We could just return an error here, but some registries (e.g. static
|
||||
// registries) don't set the Content-Type headers correctly, so instead...
|
||||
logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) |
||||
} |
||||
return d.remoteIndex(), nil |
||||
} |
||||
|
||||
func (d *Descriptor) remoteImage() *remoteImage { |
||||
return &remoteImage{ |
||||
fetcher: d.fetcher, |
||||
manifest: d.Manifest, |
||||
mediaType: d.MediaType, |
||||
descriptor: &d.Descriptor, |
||||
} |
||||
} |
||||
|
||||
func (d *Descriptor) remoteIndex() *remoteIndex { |
||||
return &remoteIndex{ |
||||
fetcher: d.fetcher, |
||||
manifest: d.Manifest, |
||||
mediaType: d.MediaType, |
||||
descriptor: &d.Descriptor, |
||||
} |
||||
} |
||||
|
||||
// https://github.com/docker/hub-feedback/issues/2107#issuecomment-1371293316
|
||||
//
|
||||
// DockerHub supports plugins, which look like normal manifests, but will
|
||||
// return a 401 with an incorrect challenge if you attempt to fetch them.
|
||||
//
|
||||
// They require you send, e.g.:
|
||||
// 'repository(plugin):vieux/sshfs:pull' not 'repository:vieux/sshfs:pull'.
|
||||
//
|
||||
// Hack around this by always including the plugin-ified version in the initial
|
||||
// scopes. The request will succeed with the correct subset, so it is safe to
|
||||
// have extraneous scopes here.
|
||||
func fixPluginScopes(ref name.Reference, scopes []string) []string { |
||||
if ref.Context().Registry.String() == name.DefaultRegistry { |
||||
for _, scope := range scopes { |
||||
if strings.HasPrefix(scope, "repository") { |
||||
scopes = append(scopes, strings.Replace(scope, "repository", "repository(plugin)", 1)) |
||||
} |
||||
} |
||||
} |
||||
return scopes |
||||
} |
||||
|
||||
// fetcher implements methods for reading from a registry.
|
||||
type fetcher struct { |
||||
Ref name.Reference |
||||
Client *http.Client |
||||
context context.Context |
||||
} |
||||
|
||||
func makeFetcher(ref name.Reference, o *options) (*fetcher, error) { |
||||
scopes := []string{ref.Scope(transport.PullScope)} |
||||
scopes = fixPluginScopes(ref, scopes) |
||||
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &fetcher{ |
||||
Ref: ref, |
||||
Client: &http.Client{Transport: tr}, |
||||
context: o.context, |
||||
}, nil |
||||
} |
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (f *fetcher) url(resource, identifier string) url.URL { |
||||
return url.URL{ |
||||
Scheme: f.Ref.Context().Registry.Scheme(), |
||||
Host: f.Ref.Context().RegistryStr(), |
||||
Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), |
||||
} |
||||
} |
||||
|
||||
func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { |
||||
u := f.url("manifests", ref.Identifier()) |
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
accept := []string{} |
||||
for _, mt := range acceptable { |
||||
accept = append(accept, string(mt)) |
||||
} |
||||
req.Header.Set("Accept", strings.Join(accept, ",")) |
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context)) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
manifest, err := io.ReadAll(resp.Body) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
digest, size, err := v1.SHA256(bytes.NewReader(manifest)) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
mediaType := types.MediaType(resp.Header.Get("Content-Type")) |
||||
contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) |
||||
if err == nil && mediaType == types.DockerManifestSchema1Signed { |
||||
// If we can parse the digest from the header, and it's a signed schema 1
|
||||
// manifest, let's use that for the digest to appease older registries.
|
||||
digest = contentDigest |
||||
} |
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok { |
||||
if digest.String() != dgst.DigestStr() { |
||||
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) |
||||
} |
||||
} |
||||
// Do nothing for tags; I give up.
|
||||
//
|
||||
// We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
|
||||
// but so many registries implement this incorrectly that it's not worth checking.
|
||||
//
|
||||
// For reference:
|
||||
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
desc := v1.Descriptor{ |
||||
Digest: digest, |
||||
Size: size, |
||||
MediaType: mediaType, |
||||
} |
||||
|
||||
return manifest, &desc, nil |
||||
} |
||||
|
||||
func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { |
||||
u := f.url("manifests", ref.Identifier()) |
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
accept := []string{} |
||||
for _, mt := range acceptable { |
||||
accept = append(accept, string(mt)) |
||||
} |
||||
req.Header.Set("Accept", strings.Join(accept, ",")) |
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
mth := resp.Header.Get("Content-Type") |
||||
if mth == "" { |
||||
return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) |
||||
} |
||||
mediaType := types.MediaType(mth) |
||||
|
||||
size := resp.ContentLength |
||||
if size == -1 { |
||||
return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) |
||||
} |
||||
|
||||
dh := resp.Header.Get("Docker-Content-Digest") |
||||
if dh == "" { |
||||
return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) |
||||
} |
||||
digest, err := v1.NewHash(dh) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok { |
||||
if digest.String() != dgst.DigestStr() { |
||||
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) |
||||
} |
||||
} |
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
return &v1.Descriptor{ |
||||
Digest: digest, |
||||
Size: size, |
||||
MediaType: mediaType, |
||||
}, nil |
||||
} |
||||
|
||||
func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { |
||||
u := f.url("blobs", h.String()) |
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
resp, err := f.Client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return nil, redact.Error(err) |
||||
} |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
resp.Body.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
// Do whatever we can.
|
||||
// If we have an expected size and Content-Length doesn't match, return an error.
|
||||
// If we don't have an expected size and we do have a Content-Length, use Content-Length.
|
||||
if hsize := resp.ContentLength; hsize != -1 { |
||||
if size == verify.SizeUnknown { |
||||
size = hsize |
||||
} else if hsize != size { |
||||
return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) |
||||
} |
||||
} |
||||
|
||||
return verify.ReadCloser(resp.Body, size, h) |
||||
} |
||||
|
||||
func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { |
||||
u := f.url("blobs", h.String()) |
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context)) |
||||
if err != nil { |
||||
return nil, redact.Error(err) |
||||
} |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
resp.Body.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
return resp, nil |
||||
} |
||||
|
||||
func (f *fetcher) blobExists(h v1.Hash) (bool, error) { |
||||
u := f.url("blobs", h.String()) |
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context)) |
||||
if err != nil { |
||||
return false, redact.Error(err) |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
return resp.StatusCode == http.StatusOK, nil |
||||
} |
@ -0,0 +1,17 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package remote provides facilities for reading/writing v1.Images from/to
|
||||
// a remote image registry.
|
||||
package remote |
@ -0,0 +1,247 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"sync" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/internal/verify" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
var acceptableImageMediaTypes = []types.MediaType{ |
||||
types.DockerManifestSchema2, |
||||
types.OCIManifestSchema1, |
||||
} |
||||
|
||||
// remoteImage accesses an image from a remote registry
|
||||
type remoteImage struct { |
||||
fetcher |
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte |
||||
configLock sync.Mutex // Protects config
|
||||
config []byte |
||||
mediaType types.MediaType |
||||
descriptor *v1.Descriptor |
||||
} |
||||
|
||||
var _ partial.CompressedImageCore = (*remoteImage)(nil) |
||||
|
||||
// Image provides access to a remote image reference.
|
||||
func Image(ref name.Reference, options ...Option) (v1.Image, error) { |
||||
desc, err := Get(ref, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return desc.Image() |
||||
} |
||||
|
||||
func (r *remoteImage) MediaType() (types.MediaType, error) { |
||||
if string(r.mediaType) != "" { |
||||
return r.mediaType, nil |
||||
} |
||||
return types.DockerManifestSchema2, nil |
||||
} |
||||
|
||||
func (r *remoteImage) RawManifest() ([]byte, error) { |
||||
r.manifestLock.Lock() |
||||
defer r.manifestLock.Unlock() |
||||
if r.manifest != nil { |
||||
return r.manifest, nil |
||||
} |
||||
|
||||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteImage.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if r.descriptor == nil { |
||||
r.descriptor = desc |
||||
} |
||||
r.mediaType = desc.MediaType |
||||
r.manifest = manifest |
||||
return r.manifest, nil |
||||
} |
||||
|
||||
func (r *remoteImage) RawConfigFile() ([]byte, error) { |
||||
r.configLock.Lock() |
||||
defer r.configLock.Unlock() |
||||
if r.config != nil { |
||||
return r.config, nil |
||||
} |
||||
|
||||
m, err := partial.Manifest(r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if m.Config.Data != nil { |
||||
if err := verify.Descriptor(m.Config); err != nil { |
||||
return nil, err |
||||
} |
||||
r.config = m.Config.Data |
||||
return r.config, nil |
||||
} |
||||
|
||||
body, err := r.fetchBlob(r.context, m.Config.Size, m.Config.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer body.Close() |
||||
|
||||
r.config, err = io.ReadAll(body) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return r.config, nil |
||||
} |
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { |
||||
// kind of a hack, but RawManifest does appropriate locking/memoization
|
||||
// and makes sure r.descriptor is populated.
|
||||
_, err := r.RawManifest() |
||||
return r.descriptor, err |
||||
} |
||||
|
||||
// remoteImageLayer implements partial.CompressedLayer
|
||||
type remoteImageLayer struct { |
||||
ri *remoteImage |
||||
digest v1.Hash |
||||
} |
||||
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Digest() (v1.Hash, error) { |
||||
return rl.digest, nil |
||||
} |
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { |
||||
urls := []url.URL{rl.ri.url("blobs", rl.digest.String())} |
||||
|
||||
// Add alternative layer sources from URLs (usually none).
|
||||
d, err := partial.BlobDescriptor(rl, rl.digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if d.Data != nil { |
||||
return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) |
||||
} |
||||
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs") |
||||
|
||||
for _, s := range d.URLs { |
||||
u, err := url.Parse(s) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
urls = append(urls, *u) |
||||
} |
||||
|
||||
// The lastErr for most pulls will be the same (the first error), but for
|
||||
// foreign layers we'll want to surface the last one, since we try to pull
|
||||
// from the registry first, which would often fail.
|
||||
// TODO: Maybe we don't want to try pulling from the registry first?
|
||||
var lastErr error |
||||
for _, u := range urls { |
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
resp, err := rl.ri.Client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
lastErr = err |
||||
continue |
||||
} |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
resp.Body.Close() |
||||
lastErr = err |
||||
continue |
||||
} |
||||
|
||||
return verify.ReadCloser(resp.Body, d.Size, rl.digest) |
||||
} |
||||
|
||||
return nil, lastErr |
||||
} |
||||
|
||||
// Manifest implements partial.WithManifest so that we can use partial.BlobSize below.
|
||||
func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { |
||||
return partial.Manifest(rl.ri) |
||||
} |
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { |
||||
bd, err := partial.BlobDescriptor(rl, rl.digest) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
return bd.MediaType, nil |
||||
} |
||||
|
||||
// Size implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Size() (int64, error) { |
||||
// Look up the size of this digest in the manifest to avoid a request.
|
||||
return partial.BlobSize(rl, rl.digest) |
||||
} |
||||
|
||||
// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below.
|
||||
func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { |
||||
return partial.ConfigFile(rl.ri) |
||||
} |
||||
|
||||
// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have
|
||||
// available in our ConfigFile.
|
||||
func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { |
||||
return partial.BlobToDiffID(rl, rl.digest) |
||||
} |
||||
|
||||
// Descriptor retains the original descriptor from an image manifest.
|
||||
// See partial.Descriptor.
|
||||
func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { |
||||
return partial.BlobDescriptor(rl, rl.digest) |
||||
} |
||||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteImageLayer) Exists() (bool, error) { |
||||
return rl.ri.blobExists(rl.digest) |
||||
} |
||||
|
||||
// LayerByDigest implements partial.CompressedLayer
|
||||
func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { |
||||
return &remoteImageLayer{ |
||||
ri: r, |
||||
digest: h, |
||||
}, nil |
||||
} |
@ -0,0 +1,309 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/google/go-containerregistry/internal/verify" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
var acceptableIndexMediaTypes = []types.MediaType{ |
||||
types.DockerManifestList, |
||||
types.OCIImageIndex, |
||||
} |
||||
|
||||
// remoteIndex accesses an index from a remote registry
|
||||
type remoteIndex struct { |
||||
fetcher |
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte |
||||
mediaType types.MediaType |
||||
descriptor *v1.Descriptor |
||||
} |
||||
|
||||
// Index provides access to a remote index reference.
|
||||
func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { |
||||
desc, err := get(ref, acceptableIndexMediaTypes, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return desc.ImageIndex() |
||||
} |
||||
|
||||
func (r *remoteIndex) MediaType() (types.MediaType, error) { |
||||
if string(r.mediaType) != "" { |
||||
return r.mediaType, nil |
||||
} |
||||
return types.DockerManifestList, nil |
||||
} |
||||
|
||||
func (r *remoteIndex) Digest() (v1.Hash, error) { |
||||
return partial.Digest(r) |
||||
} |
||||
|
||||
func (r *remoteIndex) Size() (int64, error) { |
||||
return partial.Size(r) |
||||
} |
||||
|
||||
func (r *remoteIndex) RawManifest() ([]byte, error) { |
||||
r.manifestLock.Lock() |
||||
defer r.manifestLock.Unlock() |
||||
if r.manifest != nil { |
||||
return r.manifest, nil |
||||
} |
||||
|
||||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteIndex.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if r.descriptor == nil { |
||||
r.descriptor = desc |
||||
} |
||||
r.mediaType = desc.MediaType |
||||
r.manifest = manifest |
||||
return r.manifest, nil |
||||
} |
||||
|
||||
func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { |
||||
b, err := r.RawManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return v1.ParseIndexManifest(bytes.NewReader(b)) |
||||
} |
||||
|
||||
func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { |
||||
desc, err := r.childByHash(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Descriptor.Image will handle coercing nested indexes into an Image.
|
||||
return desc.Image() |
||||
} |
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { |
||||
// kind of a hack, but RawManifest does appropriate locking/memoization
|
||||
// and makes sure r.descriptor is populated.
|
||||
_, err := r.RawManifest() |
||||
return r.descriptor, err |
||||
} |
||||
|
||||
func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { |
||||
desc, err := r.childByHash(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return desc.ImageIndex() |
||||
} |
||||
|
||||
// Workaround for #819.
|
||||
func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { |
||||
index, err := r.IndexManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, childDesc := range index.Manifests { |
||||
if h == childDesc.Digest { |
||||
l, err := partial.CompressedToLayer(&remoteLayer{ |
||||
fetcher: r.fetcher, |
||||
digest: h, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &MountableLayer{ |
||||
Layer: l, |
||||
Reference: r.Ref.Context().Digest(h.String()), |
||||
}, nil |
||||
} |
||||
} |
||||
return nil, fmt.Errorf("layer not found: %s", h) |
||||
} |
||||
|
||||
// Experiment with a better API for v1.ImageIndex. We might want to move this
|
||||
// to partial?
|
||||
func (r *remoteIndex) Manifests() ([]partial.Describable, error) { |
||||
m, err := r.IndexManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
manifests := []partial.Describable{} |
||||
for _, desc := range m.Manifests { |
||||
switch { |
||||
case desc.MediaType.IsImage(): |
||||
img, err := r.Image(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
manifests = append(manifests, img) |
||||
case desc.MediaType.IsIndex(): |
||||
idx, err := r.ImageIndex(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
manifests = append(manifests, idx) |
||||
default: |
||||
layer, err := r.Layer(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
manifests = append(manifests, layer) |
||||
} |
||||
} |
||||
|
||||
return manifests, nil |
||||
} |
||||
|
||||
func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { |
||||
desc, err := r.childByPlatform(platform) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Descriptor.Image will handle coercing nested indexes into an Image.
|
||||
return desc.Image() |
||||
} |
||||
|
||||
// This naively matches the first manifest with matching platform attributes.
|
||||
//
|
||||
// We should probably use this instead:
|
||||
//
|
||||
// github.com/containerd/containerd/platforms
|
||||
//
|
||||
// But first we'd need to migrate to:
|
||||
//
|
||||
// github.com/opencontainers/image-spec/specs-go/v1
|
||||
func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { |
||||
index, err := r.IndexManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, childDesc := range index.Manifests { |
||||
// If platform is missing from child descriptor, assume it's amd64/linux.
|
||||
p := defaultPlatform |
||||
if childDesc.Platform != nil { |
||||
p = *childDesc.Platform |
||||
} |
||||
|
||||
if matchesPlatform(p, platform) { |
||||
return r.childDescriptor(childDesc, platform) |
||||
} |
||||
} |
||||
return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.Ref) |
||||
} |
||||
|
||||
func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { |
||||
index, err := r.IndexManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, childDesc := range index.Manifests { |
||||
if h == childDesc.Digest { |
||||
return r.childDescriptor(childDesc, defaultPlatform) |
||||
} |
||||
} |
||||
return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref) |
||||
} |
||||
|
||||
// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option.
|
||||
func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { |
||||
ref := r.Ref.Context().Digest(child.Digest.String()) |
||||
var ( |
||||
manifest []byte |
||||
err error |
||||
) |
||||
if child.Data != nil { |
||||
if err := verify.Descriptor(child); err != nil { |
||||
return nil, err |
||||
} |
||||
manifest = child.Data |
||||
} else { |
||||
manifest, _, err = r.fetchManifest(ref, []types.MediaType{child.MediaType}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return &Descriptor{ |
||||
fetcher: fetcher{ |
||||
Ref: ref, |
||||
Client: r.Client, |
||||
context: r.context, |
||||
}, |
||||
Manifest: manifest, |
||||
Descriptor: child, |
||||
platform: platform, |
||||
}, nil |
||||
} |
||||
|
||||
// matchesPlatform checks if the given platform matches the required platforms.
|
||||
// The given platform matches the required platform if
|
||||
// - architecture and OS are identical.
|
||||
// - OS version and variant are identical if provided.
|
||||
// - features and OS features of the required platform are subsets of those of the given platform.
|
||||
func matchesPlatform(given, required v1.Platform) bool { |
||||
// Required fields that must be identical.
|
||||
if given.Architecture != required.Architecture || given.OS != required.OS { |
||||
return false |
||||
} |
||||
|
||||
// Optional fields that may be empty, but must be identical if provided.
|
||||
if required.OSVersion != "" && given.OSVersion != required.OSVersion { |
||||
return false |
||||
} |
||||
if required.Variant != "" && given.Variant != required.Variant { |
||||
return false |
||||
} |
||||
|
||||
// Verify required platform's features are a subset of given platform's features.
|
||||
if !isSubset(given.OSFeatures, required.OSFeatures) { |
||||
return false |
||||
} |
||||
if !isSubset(given.Features, required.Features) { |
||||
return false |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// isSubset checks if the required array of strings is a subset of the given lst.
|
||||
func isSubset(lst, required []string) bool { |
||||
set := make(map[string]bool) |
||||
for _, value := range lst { |
||||
set[value] = true |
||||
} |
||||
|
||||
for _, value := range required { |
||||
if _, ok := set[value]; !ok { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
return true |
||||
} |
@ -0,0 +1,94 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"io" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/internal/verify" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
// remoteImagelayer implements partial.CompressedLayer
|
||||
type remoteLayer struct { |
||||
fetcher |
||||
digest v1.Hash |
||||
} |
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { |
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.context, "omitting binary blobs from logs") |
||||
return rl.fetchBlob(ctx, verify.SizeUnknown, rl.digest) |
||||
} |
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Size() (int64, error) { |
||||
resp, err := rl.headBlob(rl.digest) |
||||
if err != nil { |
||||
return -1, err |
||||
} |
||||
defer resp.Body.Close() |
||||
return resp.ContentLength, nil |
||||
} |
||||
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Digest() (v1.Hash, error) { |
||||
return rl.digest, nil |
||||
} |
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (rl *remoteLayer) MediaType() (types.MediaType, error) { |
||||
return types.DockerLayer, nil |
||||
} |
||||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteLayer) Exists() (bool, error) { |
||||
return rl.blobExists(rl.digest) |
||||
} |
||||
|
||||
// Layer reads the given blob reference from a registry as a Layer. A blob
|
||||
// reference here is just a punned name.Digest where the digest portion is the
|
||||
// digest of the blob to be read and the repository portion is the repo where
|
||||
// that blob lives.
|
||||
func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f, err := makeFetcher(ref, o) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
h, err := v1.NewHash(ref.Identifier()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
l, err := partial.CompressedToLayer(&remoteLayer{ |
||||
fetcher: *f, |
||||
digest: h, |
||||
}) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &MountableLayer{ |
||||
Layer: l, |
||||
Reference: ref, |
||||
}, nil |
||||
} |
@ -0,0 +1,141 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"net/http" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
type tags struct { |
||||
Name string `json:"name"` |
||||
Tags []string `json:"tags"` |
||||
} |
||||
|
||||
// ListWithContext calls List with the given context.
|
||||
//
|
||||
// Deprecated: Use List and WithContext. This will be removed in a future release.
|
||||
func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { |
||||
return List(repo, append(options, WithContext(ctx))...) |
||||
} |
||||
|
||||
// List calls /tags/list for the given repository, returning the list of tags
|
||||
// in the "tags" property.
|
||||
func List(repo name.Repository, options ...Option) ([]string, error) { |
||||
o, err := makeOptions(repo, options...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
scopes := []string{repo.Scope(transport.PullScope)} |
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
uri := &url.URL{ |
||||
Scheme: repo.Registry.Scheme(), |
||||
Host: repo.Registry.RegistryStr(), |
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), |
||||
} |
||||
|
||||
if o.pageSize > 0 { |
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) |
||||
} |
||||
|
||||
client := http.Client{Transport: tr} |
||||
tagList := []string{} |
||||
parsed := tags{} |
||||
|
||||
// get responses until there is no next page
|
||||
for { |
||||
select { |
||||
case <-o.context.Done(): |
||||
return nil, o.context.Err() |
||||
default: |
||||
} |
||||
|
||||
req, err := http.NewRequestWithContext(o.context, "GET", uri.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
resp, err := client.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
if err := resp.Body.Close(); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
tagList = append(tagList, parsed.Tags...) |
||||
|
||||
uri, err = getNextPageURL(resp) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// no next page
|
||||
if uri == nil { |
||||
break |
||||
} |
||||
} |
||||
|
||||
return tagList, nil |
||||
} |
||||
|
||||
// getNextPageURL checks if there is a Link header in a http.Response which
|
||||
// contains a link to the next page. If yes it returns the url.URL of the next
|
||||
// page otherwise it returns nil.
|
||||
func getNextPageURL(resp *http.Response) (*url.URL, error) { |
||||
link := resp.Header.Get("Link") |
||||
if link == "" { |
||||
return nil, nil |
||||
} |
||||
|
||||
if link[0] != '<' { |
||||
return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) |
||||
} |
||||
|
||||
end := strings.Index(link, ">") |
||||
if end == -1 { |
||||
return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) |
||||
} |
||||
link = link[1:end] |
||||
|
||||
linkURL, err := url.Parse(link) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if resp.Request == nil || resp.Request.URL == nil { |
||||
return nil, nil |
||||
} |
||||
linkURL = resp.Request.URL.ResolveReference(linkURL) |
||||
return linkURL, nil |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
) |
||||
|
||||
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
|
||||
// "mounted" when published to another registry.
|
||||
type MountableLayer struct { |
||||
v1.Layer |
||||
|
||||
Reference name.Reference |
||||
} |
||||
|
||||
// Descriptor retains the original descriptor from an image manifest.
|
||||
// See partial.Descriptor.
|
||||
func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { |
||||
return partial.Descriptor(ml.Layer) |
||||
} |
||||
|
||||
// Exists is a hack. See partial.Exists.
|
||||
func (ml *MountableLayer) Exists() (bool, error) { |
||||
return partial.Exists(ml.Layer) |
||||
} |
||||
|
||||
// mountableImage wraps the v1.Layer references returned by the embedded v1.Image
|
||||
// in MountableLayer's so that remote.Write might attempt to mount them from their
|
||||
// source repository.
|
||||
type mountableImage struct { |
||||
v1.Image |
||||
|
||||
Reference name.Reference |
||||
} |
||||
|
||||
// Layers implements v1.Image
|
||||
func (mi *mountableImage) Layers() ([]v1.Layer, error) { |
||||
ls, err := mi.Image.Layers() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
mls := make([]v1.Layer, 0, len(ls)) |
||||
for _, l := range ls { |
||||
mls = append(mls, &MountableLayer{ |
||||
Layer: l, |
||||
Reference: mi.Reference, |
||||
}) |
||||
} |
||||
return mls, nil |
||||
} |
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { |
||||
l, err := mi.Image.LayerByDigest(d) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &MountableLayer{ |
||||
Layer: l, |
||||
Reference: mi.Reference, |
||||
}, nil |
||||
} |
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { |
||||
l, err := mi.Image.LayerByDiffID(d) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &MountableLayer{ |
||||
Layer: l, |
||||
Reference: mi.Reference, |
||||
}, nil |
||||
} |
||||
|
||||
// Descriptor retains the original descriptor from an index manifest.
|
||||
// See partial.Descriptor.
|
||||
func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { |
||||
return partial.Descriptor(mi.Image) |
||||
} |
||||
|
||||
// ConfigLayer retains the original reference so that it can be mounted.
|
||||
// See partial.ConfigLayer.
|
||||
func (mi *mountableImage) ConfigLayer() (v1.Layer, error) { |
||||
l, err := partial.ConfigLayer(mi.Image) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &MountableLayer{ |
||||
Layer: l, |
||||
Reference: mi.Reference, |
||||
}, nil |
||||
} |
@ -0,0 +1,302 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
"golang.org/x/sync/errgroup" |
||||
) |
||||
|
||||
// MultiWrite writes the given Images or ImageIndexes to the given refs, as
|
||||
// efficiently as possible, by deduping shared layer blobs and uploading layers
|
||||
// in parallel, then uploading all manifests in parallel.
|
||||
//
|
||||
// Current limitations:
|
||||
// - All refs must share the same repository.
|
||||
// - Images cannot consist of stream.Layers.
|
||||
func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { |
||||
// Determine the repository being pushed to; if asked to push to
|
||||
// multiple repositories, give up.
|
||||
var repo, zero name.Repository |
||||
for ref := range m { |
||||
if repo == zero { |
||||
repo = ref.Context() |
||||
} else if ref.Context() != repo { |
||||
return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context()) |
||||
} |
||||
} |
||||
|
||||
o, err := makeOptions(repo, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Collect unique blobs (layers and config blobs).
|
||||
blobs := map[v1.Hash]v1.Layer{} |
||||
newManifests := []map[name.Reference]Taggable{} |
||||
// Separate originally requested images and indexes, so we can push images first.
|
||||
images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{} |
||||
for ref, i := range m { |
||||
if img, ok := i.(v1.Image); ok { |
||||
images[ref] = i |
||||
if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil { |
||||
return err |
||||
} |
||||
continue |
||||
} |
||||
if idx, ok := i.(v1.ImageIndex); ok { |
||||
indexes[ref] = i |
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
continue |
||||
} |
||||
return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i) |
||||
} |
||||
|
||||
// Determine if any of the layers are Mountable, because if so we need
|
||||
// to request Pull scope too.
|
||||
ls := []v1.Layer{} |
||||
for _, l := range blobs { |
||||
ls = append(ls, l) |
||||
} |
||||
scopes := scopesForUploadingImage(repo, ls) |
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w := writer{ |
||||
repo: repo, |
||||
client: &http.Client{Transport: tr}, |
||||
backoff: o.retryBackoff, |
||||
predicate: o.retryPredicate, |
||||
} |
||||
|
||||
// Collect the total size of blobs and manifests we're about to write.
|
||||
if o.updates != nil { |
||||
w.progress = &progress{updates: o.updates} |
||||
w.progress.lastUpdate = &v1.Update{} |
||||
defer close(o.updates) |
||||
defer func() { _ = w.progress.err(rerr) }() |
||||
for _, b := range blobs { |
||||
size, err := b.Size() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w.progress.total(size) |
||||
} |
||||
countManifest := func(t Taggable) error { |
||||
b, err := t.RawManifest() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w.progress.total(int64(len(b))) |
||||
return nil |
||||
} |
||||
for _, i := range images { |
||||
if err := countManifest(i); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
for _, nm := range newManifests { |
||||
for _, i := range nm { |
||||
if err := countManifest(i); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
for _, i := range indexes { |
||||
if err := countManifest(i); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs) |
||||
ctx := o.context |
||||
g, gctx := errgroup.WithContext(o.context) |
||||
for i := 0; i < o.jobs; i++ { |
||||
// Start N workers consuming blobs to upload.
|
||||
g.Go(func() error { |
||||
for b := range blobChan { |
||||
if err := w.uploadOne(gctx, b); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
g.Go(func() error { |
||||
defer close(blobChan) |
||||
for _, b := range blobs { |
||||
select { |
||||
case blobChan <- b: |
||||
case <-gctx.Done(): |
||||
return gctx.Err() |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
if err := g.Wait(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
commitMany := func(ctx context.Context, m map[name.Reference]Taggable) error { |
||||
g, ctx := errgroup.WithContext(ctx) |
||||
// With all of the constituent elements uploaded, upload the manifests
|
||||
// to commit the images and indexes, and collect any errors.
|
||||
type task struct { |
||||
i Taggable |
||||
ref name.Reference |
||||
} |
||||
taskChan := make(chan task, 2*o.jobs) |
||||
for i := 0; i < o.jobs; i++ { |
||||
// Start N workers consuming tasks to upload manifests.
|
||||
g.Go(func() error { |
||||
for t := range taskChan { |
||||
if err := w.commitManifest(ctx, t.i, t.ref); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
go func() { |
||||
for ref, i := range m { |
||||
taskChan <- task{i, ref} |
||||
} |
||||
close(taskChan) |
||||
}() |
||||
return g.Wait() |
||||
} |
||||
// Push originally requested image manifests. These have no
|
||||
// dependencies.
|
||||
if err := commitMany(ctx, images); err != nil { |
||||
return err |
||||
} |
||||
// Push new manifests from lowest levels up.
|
||||
for i := len(newManifests) - 1; i >= 0; i-- { |
||||
if err := commitMany(ctx, newManifests[i]); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
// Push originally requested index manifests, which might depend on
|
||||
// newly discovered manifests.
|
||||
|
||||
return commitMany(ctx, indexes) |
||||
} |
||||
|
||||
// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
|
||||
// returns the latest copy of the ordered collection of manifests to upload.
|
||||
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) { |
||||
if lvl > len(newManifests)-1 { |
||||
newManifests = append(newManifests, map[name.Reference]Taggable{}) |
||||
} |
||||
|
||||
im, err := idx.IndexManifest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, desc := range im.Manifests { |
||||
switch desc.MediaType { |
||||
case types.OCIImageIndex, types.DockerManifestList: |
||||
idx, err := idx.ImageIndex(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Also track the sub-index manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = idx |
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||
img, err := idx.Image(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Also track the sub-image manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = img |
||||
default: |
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok { |
||||
layer, err := wl.Layer(desc.Digest) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil { |
||||
return nil, err |
||||
} |
||||
} else { |
||||
return nil, fmt.Errorf("unknown media type: %v", desc.MediaType) |
||||
} |
||||
} |
||||
} |
||||
return newManifests, nil |
||||
} |
||||
|
||||
func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { |
||||
// Ignore foreign layers.
|
||||
mt, err := l.MediaType() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if mt.IsDistributable() || allowNondistributableArtifacts { |
||||
d, err := l.Digest() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
blobs[d] = l |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { |
||||
ls, err := img.Layers() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Collect all layers.
|
||||
for _, l := range ls { |
||||
if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
// Collect config blob.
|
||||
cl, err := partial.ConfigLayer(img) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return addLayerBlob(cl, blobs, allowNondistributableArtifacts) |
||||
} |
@ -0,0 +1,305 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"syscall" |
||||
"time" |
||||
|
||||
"github.com/google/go-containerregistry/internal/retry" |
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
// Option is a functional option for remote operations.
|
||||
type Option func(*options) error |
||||
|
||||
type options struct { |
||||
auth authn.Authenticator |
||||
keychain authn.Keychain |
||||
transport http.RoundTripper |
||||
platform v1.Platform |
||||
context context.Context |
||||
jobs int |
||||
userAgent string |
||||
allowNondistributableArtifacts bool |
||||
updates chan<- v1.Update |
||||
pageSize int |
||||
retryBackoff Backoff |
||||
retryPredicate retry.Predicate |
||||
} |
||||
|
||||
var defaultPlatform = v1.Platform{ |
||||
Architecture: "amd64", |
||||
OS: "linux", |
||||
} |
||||
|
||||
// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib
|
||||
type Backoff = retry.Backoff |
||||
|
||||
var defaultRetryPredicate retry.Predicate = func(err error) bool { |
||||
// Various failure modes here, as we're often reading from and writing to
|
||||
// the network.
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { |
||||
logs.Warn.Printf("retrying %v", err) |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Try this three times, waiting 1s after first failure, 3s after second.
|
||||
var defaultRetryBackoff = Backoff{ |
||||
Duration: 1.0 * time.Second, |
||||
Factor: 3.0, |
||||
Jitter: 0.1, |
||||
Steps: 3, |
||||
} |
||||
|
||||
// Useful for tests
|
||||
var fastBackoff = Backoff{ |
||||
Duration: 1.0 * time.Millisecond, |
||||
Factor: 3.0, |
||||
Jitter: 0.1, |
||||
Steps: 3, |
||||
} |
||||
|
||||
var retryableStatusCodes = []int{ |
||||
http.StatusRequestTimeout, |
||||
http.StatusInternalServerError, |
||||
http.StatusBadGateway, |
||||
http.StatusServiceUnavailable, |
||||
http.StatusGatewayTimeout, |
||||
} |
||||
|
||||
const ( |
||||
defaultJobs = 4 |
||||
|
||||
// ECR returns an error if n > 1000:
|
||||
// https://github.com/google/go-containerregistry/issues/1091
|
||||
defaultPageSize = 1000 |
||||
) |
||||
|
||||
// DefaultTransport is based on http.DefaultTransport with modifications
|
||||
// documented inline below.
|
||||
var DefaultTransport http.RoundTripper = &http.Transport{ |
||||
Proxy: http.ProxyFromEnvironment, |
||||
DialContext: (&net.Dialer{ |
||||
Timeout: 30 * time.Second, |
||||
KeepAlive: 30 * time.Second, |
||||
}).DialContext, |
||||
ForceAttemptHTTP2: true, |
||||
MaxIdleConns: 100, |
||||
IdleConnTimeout: 90 * time.Second, |
||||
TLSHandshakeTimeout: 10 * time.Second, |
||||
ExpectContinueTimeout: 1 * time.Second, |
||||
} |
||||
|
||||
func makeOptions(target authn.Resource, opts ...Option) (*options, error) { |
||||
o := &options{ |
||||
transport: DefaultTransport, |
||||
platform: defaultPlatform, |
||||
context: context.Background(), |
||||
jobs: defaultJobs, |
||||
pageSize: defaultPageSize, |
||||
retryPredicate: defaultRetryPredicate, |
||||
retryBackoff: defaultRetryBackoff, |
||||
} |
||||
|
||||
for _, option := range opts { |
||||
if err := option(o); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
switch { |
||||
case o.auth != nil && o.keychain != nil: |
||||
// It is a better experience to explicitly tell a caller their auth is misconfigured
|
||||
// than potentially fail silently when the correct auth is overridden by option misuse.
|
||||
return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both") |
||||
case o.keychain != nil: |
||||
auth, err := o.keychain.Resolve(target) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
o.auth = auth |
||||
case o.auth == nil: |
||||
o.auth = authn.Anonymous |
||||
} |
||||
|
||||
// transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping.
|
||||
// This is to allow consumers full control over the transports logic, such as providing retry logic.
|
||||
if _, ok := o.transport.(*transport.Wrapper); !ok { |
||||
// Wrap the transport in something that logs requests and responses.
|
||||
// It's expensive to generate the dumps, so skip it if we're writing
|
||||
// to nothing.
|
||||
if logs.Enabled(logs.Debug) { |
||||
o.transport = transport.NewLogger(o.transport) |
||||
} |
||||
|
||||
// Wrap the transport in something that can retry network flakes.
|
||||
o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...)) |
||||
|
||||
// Wrap this last to prevent transport.New from double-wrapping.
|
||||
if o.userAgent != "" { |
||||
o.transport = transport.NewUserAgent(o.transport, o.userAgent) |
||||
} |
||||
} |
||||
|
||||
return o, nil |
||||
} |
||||
|
||||
// WithTransport is a functional option for overriding the default transport
|
||||
// for remote operations.
|
||||
// If transport.Wrapper is provided, this signals that the consumer does *not* want any further wrapping to occur.
|
||||
// i.e. logging, retry and useragent
|
||||
//
|
||||
// The default transport is DefaultTransport.
|
||||
func WithTransport(t http.RoundTripper) Option { |
||||
return func(o *options) error { |
||||
o.transport = t |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithAuth is a functional option for overriding the default authenticator
|
||||
// for remote operations.
|
||||
// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set.
|
||||
//
|
||||
// The default authenticator is authn.Anonymous.
|
||||
func WithAuth(auth authn.Authenticator) Option { |
||||
return func(o *options) error { |
||||
o.auth = auth |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithAuthFromKeychain is a functional option for overriding the default
|
||||
// authenticator for remote operations, using an authn.Keychain to find
|
||||
// credentials.
|
||||
// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set.
|
||||
//
|
||||
// The default authenticator is authn.Anonymous.
|
||||
func WithAuthFromKeychain(keys authn.Keychain) Option { |
||||
return func(o *options) error { |
||||
o.keychain = keys |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithPlatform is a functional option for overriding the default platform
|
||||
// that Image and Descriptor.Image use for resolving an index to an image.
|
||||
//
|
||||
// The default platform is amd64/linux.
|
||||
func WithPlatform(p v1.Platform) Option { |
||||
return func(o *options) error { |
||||
o.platform = p |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithContext is a functional option for setting the context in http requests
|
||||
// performed by a given function. Note that this context is used for _all_
|
||||
// http requests, not just the initial volley. E.g., for remote.Image, the
|
||||
// context will be set on http requests generated by subsequent calls to
|
||||
// RawConfigFile() and even methods on layers returned by Layers().
|
||||
//
|
||||
// The default context is context.Background().
|
||||
func WithContext(ctx context.Context) Option { |
||||
return func(o *options) error { |
||||
o.context = ctx |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithJobs is a functional option for setting the parallelism of remote
|
||||
// operations performed by a given function. Note that not all remote
|
||||
// operations support parallelism.
|
||||
//
|
||||
// The default value is 4.
|
||||
func WithJobs(jobs int) Option { |
||||
return func(o *options) error { |
||||
if jobs <= 0 { |
||||
return errors.New("jobs must be greater than zero") |
||||
} |
||||
o.jobs = jobs |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithUserAgent adds the given string to the User-Agent header for any HTTP
|
||||
// requests. This header will also include "go-containerregistry/${version}".
|
||||
//
|
||||
// If you want to completely overwrite the User-Agent header, use WithTransport.
|
||||
func WithUserAgent(ua string) Option { |
||||
return func(o *options) error { |
||||
o.userAgent = ua |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithNondistributable includes non-distributable (foreign) layers
|
||||
// when writing images, see:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
|
||||
//
|
||||
// The default behaviour is to skip these layers
|
||||
func WithNondistributable(o *options) error { |
||||
o.allowNondistributableArtifacts = true |
||||
return nil |
||||
} |
||||
|
||||
// WithProgress takes a channel that will receive progress updates as bytes are written.
|
||||
//
|
||||
// Sending updates to an unbuffered channel will block writes, so callers
|
||||
// should provide a buffered channel to avoid potential deadlocks.
|
||||
func WithProgress(updates chan<- v1.Update) Option { |
||||
return func(o *options) error { |
||||
o.updates = updates |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithPageSize sets the given size as the value of parameter 'n' in the request.
|
||||
//
|
||||
// To omit the `n` parameter entirely, use WithPageSize(0).
|
||||
// The default value is 1000.
|
||||
func WithPageSize(size int) Option { |
||||
return func(o *options) error { |
||||
o.pageSize = size |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithRetryBackoff sets the httpBackoff for retry HTTP operations.
|
||||
func WithRetryBackoff(backoff Backoff) Option { |
||||
return func(o *options) error { |
||||
o.retryBackoff = backoff |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithRetryPredicate sets the predicate for retry HTTP operations.
|
||||
func WithRetryPredicate(predicate retry.Predicate) Option { |
||||
return func(o *options) error { |
||||
o.retryPredicate = predicate |
||||
return nil |
||||
} |
||||
} |
@ -0,0 +1,69 @@ |
||||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"io" |
||||
"sync" |
||||
"sync/atomic" |
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
) |
||||
|
||||
type progress struct { |
||||
sync.Mutex |
||||
updates chan<- v1.Update |
||||
lastUpdate *v1.Update |
||||
} |
||||
|
||||
func (p *progress) total(delta int64) { |
||||
atomic.AddInt64(&p.lastUpdate.Total, delta) |
||||
} |
||||
|
||||
func (p *progress) complete(delta int64) { |
||||
p.Lock() |
||||
defer p.Unlock() |
||||
p.updates <- v1.Update{ |
||||
Total: p.lastUpdate.Total, |
||||
Complete: atomic.AddInt64(&p.lastUpdate.Complete, delta), |
||||
} |
||||
} |
||||
|
||||
func (p *progress) err(err error) error { |
||||
if err != nil && p.updates != nil { |
||||
p.updates <- v1.Update{Error: err} |
||||
} |
||||
return err |
||||
} |
||||
|
||||
type progressReader struct { |
||||
rc io.ReadCloser |
||||
|
||||
count *int64 // number of bytes this reader has read, to support resetting on retry.
|
||||
progress *progress |
||||
} |
||||
|
||||
func (r *progressReader) Read(b []byte) (int, error) { |
||||
n, err := r.rc.Read(b) |
||||
if err != nil { |
||||
return n, err |
||||
} |
||||
atomic.AddInt64(r.count, int64(n)) |
||||
// TODO: warn/debug log if sending takes too long, or if sending is blocked while context is canceled.
|
||||
r.progress.complete(int64(n)) |
||||
return n, nil |
||||
} |
||||
|
||||
func (r *progressReader) Close() error { return r.rc.Close() } |
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
@ -0,0 +1,129 @@ |
||||
# `transport` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) |
||||
|
||||
The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. |
||||
|
||||
This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) |
||||
that transparently performs: |
||||
* [Token |
||||
Authentication](https://docs.docker.com/registry/spec/auth/token/) and |
||||
* [OAuth2 |
||||
Authentication](https://docs.docker.com/registry/spec/auth/oauth/) |
||||
|
||||
for registry clients. |
||||
|
||||
## Raison d'ĂŞtre |
||||
|
||||
> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? |
||||
|
||||
Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). |
||||
|
||||
As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. |
||||
|
||||
![docker/distribution](../../../../images/docker.dot.svg) |
||||
|
||||
> Why does it matter if you depend on prometheus? Who cares? |
||||
|
||||
It's generally polite to your downstream to reduce the number of dependencies your package requires: |
||||
|
||||
* Downloading your package is faster, which helps our Australian friends and people on airplanes. |
||||
* There is less code to compile, which speeds up builds and saves the planet from global warming. |
||||
* You reduce the likelihood of inflicting dependency hell upon your consumers. |
||||
* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. |
||||
|
||||
> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? |
||||
|
||||
Similar reasons! That ends up pulling in grpc, protobuf, and logrus. |
||||
|
||||
![containerd/containerd](../../../../images/containerd.dot.svg) |
||||
|
||||
> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? |
||||
|
||||
That just uses the the `docker/distribution` client... and more! |
||||
|
||||
![containers/image](../../../../images/containers.dot.svg) |
||||
|
||||
> Wow, what about this package? |
||||
|
||||
Of course, this package isn't perfect either. `transport` depends on `authn`, |
||||
which in turn depends on docker's config file parsing and handling package, |
||||
which you don't strictly need but almost certainly want if you're going to be |
||||
interacting with a registry. |
||||
|
||||
![google/go-containerregistry](../../../../images/ggcr.dot.svg) |
||||
|
||||
*These graphs were generated by |
||||
[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* |
||||
|
||||
## Usage |
||||
|
||||
This is heavily used by the |
||||
[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) |
||||
package, which implements higher level image-centric functionality, but this |
||||
package is useful if you want to interact directly with the registry to do |
||||
something that `remote` doesn't support, e.g. [to handle with schema 1 |
||||
images](https://github.com/google/go-containerregistry/pull/509). |
||||
|
||||
This package also includes some [error |
||||
handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) |
||||
facilities in the form of |
||||
[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), |
||||
which will parse the response body into a structured error for unexpected http |
||||
status codes. |
||||
|
||||
Here's a "simple" program that writes the result of |
||||
[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) |
||||
for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) |
||||
to stdout. |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"io" |
||||
"net/http" |
||||
"os" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
) |
||||
|
||||
func main() { |
||||
repo, err := name.NewRepository("gcr.io/google-containers/pause") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. |
||||
auth, err := authn.DefaultKeychain.Resolve(repo.Registry) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. |
||||
scopes := []string{repo.Scope(transport.PullScope)} |
||||
t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
client := &http.Client{Transport: t} |
||||
|
||||
// Make the actual request. |
||||
resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Assert that we get a 200, otherwise attempt to parse body as a structured error. |
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// Write the response to stdout. |
||||
if _, err := io.Copy(os.Stdout, resp.Body); err != nil { |
||||
panic(err) |
||||
} |
||||
} |
||||
``` |
62
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
62
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
@ -0,0 +1,62 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"encoding/base64" |
||||
"fmt" |
||||
"net/http" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
) |
||||
|
||||
type basicTransport struct { |
||||
inner http.RoundTripper |
||||
auth authn.Authenticator |
||||
target string |
||||
} |
||||
|
||||
var _ http.RoundTripper = (*basicTransport)(nil) |
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { |
||||
if bt.auth != authn.Anonymous { |
||||
auth, err := bt.auth.Authorization() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the host with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if in.Host == bt.target || in.URL.Host == bt.target { |
||||
if bearer := auth.RegistryToken; bearer != "" { |
||||
hdr := fmt.Sprintf("Bearer %s", bearer) |
||||
in.Header.Set("Authorization", hdr) |
||||
} else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { |
||||
delimited := fmt.Sprintf("%s:%s", user, pass) |
||||
encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) |
||||
hdr := fmt.Sprintf("Basic %s", encoded) |
||||
in.Header.Set("Authorization", hdr) |
||||
} else if token := auth.Auth; token != "" { |
||||
hdr := fmt.Sprintf("Basic %s", token) |
||||
in.Header.Set("Authorization", hdr) |
||||
} |
||||
} |
||||
} |
||||
return bt.inner.RoundTrip(in) |
||||
} |
320
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
320
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
@ -0,0 +1,320 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"net/http" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
authchallenge "github.com/docker/distribution/registry/client/auth/challenge" |
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
) |
||||
|
||||
type bearerTransport struct { |
||||
// Wrapped by bearerTransport.
|
||||
inner http.RoundTripper |
||||
// Basic credentials that we exchange for bearer tokens.
|
||||
basic authn.Authenticator |
||||
// Holds the bearer response from the token service.
|
||||
bearer authn.AuthConfig |
||||
// Registry to which we send bearer tokens.
|
||||
registry name.Registry |
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
realm string |
||||
// See https://docs.docker.com/registry/spec/auth/token/
|
||||
service string |
||||
scopes []string |
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string |
||||
} |
||||
|
||||
var _ http.RoundTripper = (*bearerTransport)(nil) |
||||
|
||||
var portMap = map[string]string{ |
||||
"http": "80", |
||||
"https": "443", |
||||
} |
||||
|
||||
func stringSet(ss []string) map[string]struct{} { |
||||
set := make(map[string]struct{}) |
||||
for _, s := range ss { |
||||
set[s] = struct{}{} |
||||
} |
||||
return set |
||||
} |
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { |
||||
sendRequest := func() (*http.Response, error) { |
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the registry with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if matchesHost(bt.registry, in, bt.scheme) { |
||||
hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) |
||||
in.Header.Set("Authorization", hdr) |
||||
} |
||||
return bt.inner.RoundTrip(in) |
||||
} |
||||
|
||||
res, err := sendRequest() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
|
||||
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { |
||||
// close out old response, since we will not return it.
|
||||
res.Body.Close() |
||||
|
||||
newScopes := []string{} |
||||
for _, wac := range challenges { |
||||
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
|
||||
if want, ok := wac.Parameters["scope"]; ok { |
||||
// Add any scopes that we don't already request.
|
||||
got := stringSet(bt.scopes) |
||||
if _, ok := got[want]; !ok { |
||||
newScopes = append(newScopes, want) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Some registries seem to only look at the first scope parameter during a token exchange.
|
||||
// If a request fails because it's missing a scope, we should put those at the beginning,
|
||||
// otherwise the registry might just ignore it :/
|
||||
newScopes = append(newScopes, bt.scopes...) |
||||
bt.scopes = newScopes |
||||
|
||||
// TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
|
||||
|
||||
// Retry the request to attempt to get a valid token.
|
||||
if err = bt.refresh(in.Context()); err != nil { |
||||
return nil, err |
||||
} |
||||
return sendRequest() |
||||
} |
||||
|
||||
return res, err |
||||
} |
||||
|
||||
// It's unclear which authentication flow to use based purely on the protocol,
|
||||
// so we rely on heuristics and fallbacks to support as many registries as possible.
|
||||
// The basic token exchange is attempted first, falling back to the oauth flow.
|
||||
// If the IdentityToken is set, this indicates that we should start with the oauth flow.
|
||||
func (bt *bearerTransport) refresh(ctx context.Context) error { |
||||
auth, err := bt.basic.Authorization() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if auth.RegistryToken != "" { |
||||
bt.bearer.RegistryToken = auth.RegistryToken |
||||
return nil |
||||
} |
||||
|
||||
var content []byte |
||||
if auth.IdentityToken != "" { |
||||
// If the secret being stored is an identity token,
|
||||
// the Username should be set to <token>, which indicates
|
||||
// we are using an oauth flow.
|
||||
content, err = bt.refreshOauth(ctx) |
||||
var terr *Error |
||||
if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { |
||||
// Note: Not all token servers implement oauth2.
|
||||
// If the request to the endpoint returns 404 using the HTTP POST method,
|
||||
// refer to Token Documentation for using the HTTP GET method supported by all token servers.
|
||||
content, err = bt.refreshBasic(ctx) |
||||
} |
||||
} else { |
||||
content, err = bt.refreshBasic(ctx) |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Some registries don't have "token" in the response. See #54.
|
||||
type tokenResponse struct { |
||||
Token string `json:"token"` |
||||
AccessToken string `json:"access_token"` |
||||
RefreshToken string `json:"refresh_token"` |
||||
// TODO: handle expiry?
|
||||
} |
||||
|
||||
var response tokenResponse |
||||
if err := json.Unmarshal(content, &response); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Some registries set access_token instead of token.
|
||||
if response.AccessToken != "" { |
||||
response.Token = response.AccessToken |
||||
} |
||||
|
||||
// Find a token to turn into a Bearer authenticator
|
||||
if response.Token != "" { |
||||
bt.bearer.RegistryToken = response.Token |
||||
} else { |
||||
return fmt.Errorf("no token in bearer response:\n%s", content) |
||||
} |
||||
|
||||
// If we obtained a refresh token from the oauth flow, use that for refresh() now.
|
||||
if response.RefreshToken != "" { |
||||
bt.basic = authn.FromConfig(authn.AuthConfig{ |
||||
IdentityToken: response.RefreshToken, |
||||
}) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func matchesHost(reg name.Registry, in *http.Request, scheme string) bool { |
||||
canonicalHeaderHost := canonicalAddress(in.Host, scheme) |
||||
canonicalURLHost := canonicalAddress(in.URL.Host, scheme) |
||||
canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme) |
||||
return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost |
||||
} |
||||
|
||||
func canonicalAddress(host, scheme string) (address string) { |
||||
// The host may be any one of:
|
||||
// - hostname
|
||||
// - hostname:port
|
||||
// - ipv4
|
||||
// - ipv4:port
|
||||
// - ipv6
|
||||
// - [ipv6]:port
|
||||
// As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt
|
||||
// to call it when we know that the address contains a port
|
||||
if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { |
||||
hostname, port, err := net.SplitHostPort(host) |
||||
if err != nil { |
||||
return host |
||||
} |
||||
if port == "" { |
||||
port = portMap[scheme] |
||||
} |
||||
|
||||
return net.JoinHostPort(hostname, port) |
||||
} |
||||
|
||||
return net.JoinHostPort(host, portMap[scheme]) |
||||
} |
||||
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/
|
||||
func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { |
||||
auth, err := bt.basic.Authorization() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
u, err := url.Parse(bt.realm) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
v := url.Values{} |
||||
v.Set("scope", strings.Join(bt.scopes, " ")) |
||||
if bt.service != "" { |
||||
v.Set("service", bt.service) |
||||
} |
||||
v.Set("client_id", defaultUserAgent) |
||||
if auth.IdentityToken != "" { |
||||
v.Set("grant_type", "refresh_token") |
||||
v.Set("refresh_token", auth.IdentityToken) |
||||
} else if auth.Username != "" && auth.Password != "" { |
||||
// TODO(#629): This is unreachable.
|
||||
v.Set("grant_type", "password") |
||||
v.Set("username", auth.Username) |
||||
v.Set("password", auth.Password) |
||||
v.Set("access_type", "offline") |
||||
} |
||||
|
||||
client := http.Client{Transport: bt.inner} |
||||
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") |
||||
|
||||
// We don't want to log credentials.
|
||||
ctx = redact.NewContext(ctx, "oauth token response contains credentials") |
||||
|
||||
resp, err := client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil { |
||||
if bt.basic == authn.Anonymous { |
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry) |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
return io.ReadAll(resp.Body) |
||||
} |
||||
|
||||
// https://docs.docker.com/registry/spec/auth/token/
|
||||
func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { |
||||
u, err := url.Parse(bt.realm) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
b := &basicTransport{ |
||||
inner: bt.inner, |
||||
auth: bt.basic, |
||||
target: u.Host, |
||||
} |
||||
client := http.Client{Transport: b} |
||||
|
||||
v := u.Query() |
||||
v["scope"] = bt.scopes |
||||
v.Set("service", bt.service) |
||||
u.RawQuery = v.Encode() |
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// We don't want to log credentials.
|
||||
ctx = redact.NewContext(ctx, "basic token response contains credentials") |
||||
|
||||
resp, err := client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil { |
||||
if bt.basic == authn.Anonymous { |
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry) |
||||
} |
||||
return nil, err |
||||
} |
||||
|
||||
return io.ReadAll(resp.Body) |
||||
} |
@ -0,0 +1,18 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transport provides facilities for setting up an authenticated
|
||||
// http.RoundTripper given an Authenticator and base RoundTripper. See
|
||||
// transport.New for more information.
|
||||
package transport |
173
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
173
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
@ -0,0 +1,173 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"strings" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
) |
||||
|
||||
// Error implements error to support the following error specification:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
type Error struct { |
||||
Errors []Diagnostic `json:"errors,omitempty"` |
||||
// The http status code returned.
|
||||
StatusCode int |
||||
// The request that failed.
|
||||
Request *http.Request |
||||
// The raw body if we couldn't understand it.
|
||||
rawBody string |
||||
} |
||||
|
||||
// Check that Error implements error
|
||||
var _ error = (*Error)(nil) |
||||
|
||||
// Error implements error
|
||||
func (e *Error) Error() string { |
||||
prefix := "" |
||||
if e.Request != nil { |
||||
prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redact.URL(e.Request.URL)) |
||||
} |
||||
return prefix + e.responseErr() |
||||
} |
||||
|
||||
func (e *Error) responseErr() string { |
||||
switch len(e.Errors) { |
||||
case 0: |
||||
if len(e.rawBody) == 0 { |
||||
if e.Request != nil && e.Request.Method == http.MethodHead { |
||||
return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) |
||||
} |
||||
return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) |
||||
} |
||||
return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) |
||||
case 1: |
||||
return e.Errors[0].String() |
||||
default: |
||||
var errors []string |
||||
for _, d := range e.Errors { |
||||
errors = append(errors, d.String()) |
||||
} |
||||
return fmt.Sprintf("multiple errors returned: %s", |
||||
strings.Join(errors, "; ")) |
||||
} |
||||
} |
||||
|
||||
// Temporary returns whether the request that preceded the error is temporary.
|
||||
func (e *Error) Temporary() bool { |
||||
if len(e.Errors) == 0 { |
||||
_, ok := temporaryStatusCodes[e.StatusCode] |
||||
return ok |
||||
} |
||||
for _, d := range e.Errors { |
||||
if _, ok := temporaryErrorCodes[d.Code]; !ok { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Diagnostic represents a single error returned by a Docker registry interaction.
|
||||
type Diagnostic struct { |
||||
Code ErrorCode `json:"code"` |
||||
Message string `json:"message,omitempty"` |
||||
Detail any `json:"detail,omitempty"` |
||||
} |
||||
|
||||
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
|
||||
func (d Diagnostic) String() string { |
||||
msg := fmt.Sprintf("%s: %s", d.Code, d.Message) |
||||
if d.Detail != nil { |
||||
msg = fmt.Sprintf("%s; %v", msg, d.Detail) |
||||
} |
||||
return msg |
||||
} |
||||
|
||||
// ErrorCode is an enumeration of supported error codes.
|
||||
type ErrorCode string |
||||
|
||||
// The set of error conditions a registry may return:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2
|
||||
const ( |
||||
BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" |
||||
BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" |
||||
BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" |
||||
DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" |
||||
ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" |
||||
ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" |
||||
ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" |
||||
ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" |
||||
NameInvalidErrorCode ErrorCode = "NAME_INVALID" |
||||
NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" |
||||
SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" |
||||
TagInvalidErrorCode ErrorCode = "TAG_INVALID" |
||||
UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" |
||||
DeniedErrorCode ErrorCode = "DENIED" |
||||
UnsupportedErrorCode ErrorCode = "UNSUPPORTED" |
||||
TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" |
||||
UnknownErrorCode ErrorCode = "UNKNOWN" |
||||
|
||||
// This isn't defined by either docker or OCI spec, but is defined by docker/distribution:
|
||||
// https://github.com/distribution/distribution/blob/6a977a5a754baa213041443f841705888107362a/registry/api/errcode/register.go#L60
|
||||
UnavailableErrorCode ErrorCode = "UNAVAILABLE" |
||||
) |
||||
|
||||
// TODO: Include other error types.
|
||||
var temporaryErrorCodes = map[ErrorCode]struct{}{ |
||||
BlobUploadInvalidErrorCode: {}, |
||||
TooManyRequestsErrorCode: {}, |
||||
UnknownErrorCode: {}, |
||||
UnavailableErrorCode: {}, |
||||
} |
||||
|
||||
var temporaryStatusCodes = map[int]struct{}{ |
||||
http.StatusRequestTimeout: {}, |
||||
http.StatusInternalServerError: {}, |
||||
http.StatusBadGateway: {}, |
||||
http.StatusServiceUnavailable: {}, |
||||
http.StatusGatewayTimeout: {}, |
||||
} |
||||
|
||||
// CheckError returns a structured error if the response status is not in codes.
|
||||
func CheckError(resp *http.Response, codes ...int) error { |
||||
for _, code := range codes { |
||||
if resp.StatusCode == code { |
||||
// This is one of the supported status codes.
|
||||
return nil |
||||
} |
||||
} |
||||
b, err := io.ReadAll(resp.Body) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
structuredError := &Error{} |
||||
|
||||
// This can fail if e.g. the response body is not valid JSON. That's fine,
|
||||
// we'll construct an appropriate error string from the body and status code.
|
||||
_ = json.Unmarshal(b, structuredError) |
||||
|
||||
structuredError.rawBody = string(b) |
||||
structuredError.StatusCode = resp.StatusCode |
||||
structuredError.Request = resp.Request |
||||
|
||||
return structuredError |
||||
} |
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
@ -0,0 +1,91 @@ |
||||
// Copyright 2020 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"net/http/httputil" |
||||
"time" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
) |
||||
|
||||
type logTransport struct { |
||||
inner http.RoundTripper |
||||
} |
||||
|
||||
// NewLogger returns a transport that logs requests and responses to
|
||||
// github.com/google/go-containerregistry/pkg/logs.Debug.
|
||||
func NewLogger(inner http.RoundTripper) http.RoundTripper { |
||||
return &logTransport{inner} |
||||
} |
||||
|
||||
func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { |
||||
// Inspired by: github.com/motemen/go-loghttp
|
||||
|
||||
// We redact token responses and binary blobs in response/request.
|
||||
omitBody, reason := redact.FromContext(in.Context()) |
||||
if omitBody { |
||||
logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) |
||||
} else { |
||||
logs.Debug.Printf("--> %s %s", in.Method, in.URL) |
||||
} |
||||
|
||||
// Save these headers so we can redact Authorization.
|
||||
savedHeaders := in.Header.Clone() |
||||
if in.Header != nil && in.Header.Get("authorization") != "" { |
||||
in.Header.Set("authorization", "<redacted>") |
||||
} |
||||
|
||||
b, err := httputil.DumpRequestOut(in, !omitBody) |
||||
if err == nil { |
||||
logs.Debug.Println(string(b)) |
||||
} else { |
||||
logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) |
||||
} |
||||
|
||||
// Restore the non-redacted headers.
|
||||
in.Header = savedHeaders |
||||
|
||||
start := time.Now() |
||||
out, err = t.inner.RoundTrip(in) |
||||
duration := time.Since(start) |
||||
if err != nil { |
||||
logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) |
||||
} |
||||
if out != nil { |
||||
msg := fmt.Sprintf("<-- %d", out.StatusCode) |
||||
if out.Request != nil { |
||||
msg = fmt.Sprintf("%s %s", msg, out.Request.URL) |
||||
} |
||||
msg = fmt.Sprintf("%s (%s)", msg, duration) |
||||
|
||||
if omitBody { |
||||
msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) |
||||
} |
||||
|
||||
logs.Debug.Print(msg) |
||||
|
||||
b, err := httputil.DumpResponse(out, !omitBody) |
||||
if err == nil { |
||||
logs.Debug.Println(string(b)) |
||||
} else { |
||||
logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) |
||||
} |
||||
} |
||||
return |
||||
} |
227
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
227
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
@ -0,0 +1,227 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"strings" |
||||
"time" |
||||
|
||||
authchallenge "github.com/docker/distribution/registry/client/auth/challenge" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
) |
||||
|
||||
type challenge string |
||||
|
||||
const ( |
||||
anonymous challenge = "anonymous" |
||||
basic challenge = "basic" |
||||
bearer challenge = "bearer" |
||||
) |
||||
|
||||
// 300ms is the default fallback period for go's DNS dialer but we could make this configurable.
|
||||
var fallbackDelay = 300 * time.Millisecond |
||||
|
||||
type pingResp struct { |
||||
challenge challenge |
||||
|
||||
// Following the challenge there are often key/value pairs
|
||||
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
|
||||
parameters map[string]string |
||||
|
||||
// The registry's scheme to use. Communicates whether we fell back to http.
|
||||
scheme string |
||||
} |
||||
|
||||
func (c challenge) Canonical() challenge { |
||||
return challenge(strings.ToLower(string(c))) |
||||
} |
||||
|
||||
func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { |
||||
// This first attempts to use "https" for every request, falling back to http
|
||||
// if the registry matches our localhost heuristic or if it is intentionally
|
||||
// set to insecure via name.NewInsecureRegistry.
|
||||
schemes := []string{"https"} |
||||
if reg.Scheme() == "http" { |
||||
schemes = append(schemes, "http") |
||||
} |
||||
if len(schemes) == 1 { |
||||
return pingSingle(ctx, reg, t, schemes[0]) |
||||
} |
||||
return pingParallel(ctx, reg, t, schemes) |
||||
} |
||||
|
||||
func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*pingResp, error) { |
||||
client := http.Client{Transport: t} |
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) |
||||
req, err := http.NewRequest(http.MethodGet, url, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
resp, err := client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer func() { |
||||
// By draining the body, make sure to reuse the connection made by
|
||||
// the ping for the following access to the registry
|
||||
io.Copy(io.Discard, resp.Body) |
||||
resp.Body.Close() |
||||
}() |
||||
|
||||
switch resp.StatusCode { |
||||
case http.StatusOK: |
||||
// If we get a 200, then no authentication is needed.
|
||||
return &pingResp{ |
||||
challenge: anonymous, |
||||
scheme: scheme, |
||||
}, nil |
||||
case http.StatusUnauthorized: |
||||
if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { |
||||
// If we hit more than one, let's try to find one that we know how to handle.
|
||||
wac := pickFromMultipleChallenges(challenges) |
||||
return &pingResp{ |
||||
challenge: challenge(wac.Scheme).Canonical(), |
||||
parameters: wac.Parameters, |
||||
scheme: scheme, |
||||
}, nil |
||||
} |
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{ |
||||
challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), |
||||
scheme: scheme, |
||||
}, nil |
||||
default: |
||||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) |
||||
} |
||||
} |
||||
|
||||
// Based on the golang happy eyeballs dialParallel impl in net/dial.go.
|
||||
func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*pingResp, error) { |
||||
returned := make(chan struct{}) |
||||
defer close(returned) |
||||
|
||||
type pingResult struct { |
||||
*pingResp |
||||
error |
||||
primary bool |
||||
done bool |
||||
} |
||||
|
||||
results := make(chan pingResult) |
||||
|
||||
startRacer := func(ctx context.Context, scheme string) { |
||||
pr, err := pingSingle(ctx, reg, t, scheme) |
||||
select { |
||||
case results <- pingResult{pingResp: pr, error: err, primary: scheme == "https", done: true}: |
||||
case <-returned: |
||||
if pr != nil { |
||||
logs.Debug.Printf("%s lost race", scheme) |
||||
} |
||||
} |
||||
} |
||||
|
||||
var primary, fallback pingResult |
||||
|
||||
primaryCtx, primaryCancel := context.WithCancel(ctx) |
||||
defer primaryCancel() |
||||
go startRacer(primaryCtx, schemes[0]) |
||||
|
||||
fallbackTimer := time.NewTimer(fallbackDelay) |
||||
defer fallbackTimer.Stop() |
||||
|
||||
for { |
||||
select { |
||||
case <-fallbackTimer.C: |
||||
fallbackCtx, fallbackCancel := context.WithCancel(ctx) |
||||
defer fallbackCancel() |
||||
go startRacer(fallbackCtx, schemes[1]) |
||||
|
||||
case res := <-results: |
||||
if res.error == nil { |
||||
return res.pingResp, nil |
||||
} |
||||
if res.primary { |
||||
primary = res |
||||
} else { |
||||
fallback = res |
||||
} |
||||
if primary.done && fallback.done { |
||||
return nil, multierrs([]error{primary.error, fallback.error}) |
||||
} |
||||
if res.primary && fallbackTimer.Stop() { |
||||
// Primary failed and we haven't started the fallback,
|
||||
// reset time to start fallback immediately.
|
||||
fallbackTimer.Reset(0) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge { |
||||
// It might happen there are multiple www-authenticate headers, e.g. `Negotiate` and `Basic`.
|
||||
// Picking simply the first one could result eventually in `unrecognized challenge` error,
|
||||
// that's why we're looping through the challenges in search for one that can be handled.
|
||||
allowedSchemes := []string{"basic", "bearer"} |
||||
|
||||
for _, wac := range challenges { |
||||
currentScheme := strings.ToLower(wac.Scheme) |
||||
for _, allowed := range allowedSchemes { |
||||
if allowed == currentScheme { |
||||
return wac |
||||
} |
||||
} |
||||
} |
||||
|
||||
return challenges[0] |
||||
} |
||||
|
||||
type multierrs []error |
||||
|
||||
func (m multierrs) Error() string { |
||||
var b strings.Builder |
||||
hasWritten := false |
||||
for _, err := range m { |
||||
if hasWritten { |
||||
b.WriteString("; ") |
||||
} |
||||
hasWritten = true |
||||
b.WriteString(err.Error()) |
||||
} |
||||
return b.String() |
||||
} |
||||
|
||||
func (m multierrs) As(target any) bool { |
||||
for _, err := range m { |
||||
if errors.As(err, target) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (m multierrs) Is(target error) bool { |
||||
for _, err := range m { |
||||
if errors.Is(err, target) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
111
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
111
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
@ -0,0 +1,111 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/google/go-containerregistry/internal/retry" |
||||
) |
||||
|
||||
// Sleep for 0.1 then 0.3 seconds. This should cover networking blips.
|
||||
var defaultBackoff = retry.Backoff{ |
||||
Duration: 100 * time.Millisecond, |
||||
Factor: 3.0, |
||||
Jitter: 0.1, |
||||
Steps: 3, |
||||
} |
||||
|
||||
var _ http.RoundTripper = (*retryTransport)(nil) |
||||
|
||||
// retryTransport wraps a RoundTripper and retries temporary network errors.
|
||||
type retryTransport struct { |
||||
inner http.RoundTripper |
||||
backoff retry.Backoff |
||||
predicate retry.Predicate |
||||
codes []int |
||||
} |
||||
|
||||
// Option is a functional option for retryTransport.
|
||||
type Option func(*options) |
||||
|
||||
type options struct { |
||||
backoff retry.Backoff |
||||
predicate retry.Predicate |
||||
codes []int |
||||
} |
||||
|
||||
// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib
|
||||
type Backoff = retry.Backoff |
||||
|
||||
// WithRetryBackoff sets the backoff for retry operations.
|
||||
func WithRetryBackoff(backoff Backoff) Option { |
||||
return func(o *options) { |
||||
o.backoff = backoff |
||||
} |
||||
} |
||||
|
||||
// WithRetryPredicate sets the predicate for retry operations.
|
||||
func WithRetryPredicate(predicate func(error) bool) Option { |
||||
return func(o *options) { |
||||
o.predicate = predicate |
||||
} |
||||
} |
||||
|
||||
// WithRetryStatusCodes sets which http response codes will be retried.
|
||||
func WithRetryStatusCodes(codes ...int) Option { |
||||
return func(o *options) { |
||||
o.codes = codes |
||||
} |
||||
} |
||||
|
||||
// NewRetry returns a transport that retries errors.
|
||||
func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { |
||||
o := &options{ |
||||
backoff: defaultBackoff, |
||||
predicate: retry.IsTemporary, |
||||
} |
||||
|
||||
for _, opt := range opts { |
||||
opt(o) |
||||
} |
||||
|
||||
return &retryTransport{ |
||||
inner: inner, |
||||
backoff: o.backoff, |
||||
predicate: o.predicate, |
||||
codes: o.codes, |
||||
} |
||||
} |
||||
|
||||
func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { |
||||
roundtrip := func() error { |
||||
out, err = t.inner.RoundTrip(in) |
||||
if !retry.Ever(in.Context()) { |
||||
return nil |
||||
} |
||||
if out != nil { |
||||
for _, code := range t.codes { |
||||
if out.StatusCode == code { |
||||
return CheckError(out) |
||||
} |
||||
} |
||||
} |
||||
return err |
||||
} |
||||
retry.Retry(roundtrip, t.predicate, t.backoff) |
||||
return |
||||
} |
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
@ -0,0 +1,44 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"net/http" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
) |
||||
|
||||
type schemeTransport struct { |
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string |
||||
|
||||
// Registry we're talking to.
|
||||
registry name.Registry |
||||
|
||||
// Wrapped by schemeTransport.
|
||||
inner http.RoundTripper |
||||
} |
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { |
||||
// When we ping() the registry, we determine whether to use http or https
|
||||
// based on which scheme was successful. That is only valid for the
|
||||
// registry server and not e.g. a separate token server or blob storage,
|
||||
// so we should only override the scheme if the host is the registry.
|
||||
if matchesHost(st.registry, in, st.scheme) { |
||||
in.URL.Scheme = st.scheme |
||||
} |
||||
return st.inner.RoundTrip(in) |
||||
} |
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
@ -0,0 +1,24 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
// Scopes suitable to qualify each Repository
|
||||
const ( |
||||
PullScope string = "pull" |
||||
PushScope string = "push,pull" |
||||
// For now DELETE is PUSH, which is the read/write ACL.
|
||||
DeleteScope string = PushScope |
||||
CatalogScope string = "catalog" |
||||
) |
116
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
116
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
@ -0,0 +1,116 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
) |
||||
|
||||
// New returns a new RoundTripper based on the provided RoundTripper that has been
|
||||
// setup to authenticate with the remote registry "reg", in the capacity
|
||||
// laid out by the specified scopes.
|
||||
//
|
||||
// Deprecated: Use NewWithContext.
|
||||
func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { |
||||
return NewWithContext(context.Background(), reg, auth, t, scopes) |
||||
} |
||||
|
||||
// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been
|
||||
// set up to authenticate with the remote registry "reg", in the capacity
|
||||
// laid out by the specified scopes.
|
||||
// In case the RoundTripper is already of the type Wrapper it assumes
|
||||
// authentication was already done prior to this call, so it just returns
|
||||
// the provided RoundTripper without further action
|
||||
func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { |
||||
// When the transport provided is of the type Wrapper this function assumes that the caller already
|
||||
// executed the necessary login and check.
|
||||
switch t.(type) { |
||||
case *Wrapper: |
||||
return t, nil |
||||
} |
||||
// The handshake:
|
||||
// 1. Use "t" to ping() the registry for the authentication challenge.
|
||||
//
|
||||
// 2a. If we get back a 200, then simply use "t".
|
||||
//
|
||||
// 2b. If we get back a 401 with a Basic challenge, then use a transport
|
||||
// that just attachs auth each roundtrip.
|
||||
//
|
||||
// 2c. If we get back a 401 with a Bearer challenge, then use a transport
|
||||
// that attaches a bearer token to each request, and refreshes is on 401s.
|
||||
// Perform an initial refresh to seed the bearer token.
|
||||
|
||||
// First we ping the registry to determine the parameters of the authentication handshake
|
||||
// (if one is even necessary).
|
||||
pr, err := ping(ctx, reg, t) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Wrap t with a useragent transport unless we already have one.
|
||||
if _, ok := t.(*userAgentTransport); !ok { |
||||
t = NewUserAgent(t, "") |
||||
} |
||||
|
||||
// Wrap t in a transport that selects the appropriate scheme based on the ping response.
|
||||
t = &schemeTransport{ |
||||
scheme: pr.scheme, |
||||
registry: reg, |
||||
inner: t, |
||||
} |
||||
|
||||
switch pr.challenge.Canonical() { |
||||
case anonymous, basic: |
||||
return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil |
||||
case bearer: |
||||
// We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth.
|
||||
realm, ok := pr.parameters["realm"] |
||||
if !ok { |
||||
return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) |
||||
} |
||||
service := pr.parameters["service"] |
||||
bt := &bearerTransport{ |
||||
inner: t, |
||||
basic: auth, |
||||
realm: realm, |
||||
registry: reg, |
||||
service: service, |
||||
scopes: scopes, |
||||
scheme: pr.scheme, |
||||
} |
||||
if err := bt.refresh(ctx); err != nil { |
||||
return nil, err |
||||
} |
||||
return &Wrapper{bt}, nil |
||||
default: |
||||
return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge) |
||||
} |
||||
} |
||||
|
||||
// Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging
|
||||
// Consumers are opt-ing into providing their own transport without any additional wrapping.
|
||||
type Wrapper struct { |
||||
inner http.RoundTripper |
||||
} |
||||
|
||||
// RoundTrip delegates to the inner RoundTripper
|
||||
func (w *Wrapper) RoundTrip(in *http.Request) (*http.Response, error) { |
||||
return w.inner.RoundTrip(in) |
||||
} |
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
94
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
@ -0,0 +1,94 @@ |
||||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"runtime/debug" |
||||
) |
||||
|
||||
var ( |
||||
// Version can be set via:
|
||||
// -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'"
|
||||
Version string |
||||
|
||||
ggcrVersion = defaultUserAgent |
||||
) |
||||
|
||||
const ( |
||||
defaultUserAgent = "go-containerregistry" |
||||
moduleName = "github.com/google/go-containerregistry" |
||||
) |
||||
|
||||
type userAgentTransport struct { |
||||
inner http.RoundTripper |
||||
ua string |
||||
} |
||||
|
||||
func init() { |
||||
if v := version(); v != "" { |
||||
ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) |
||||
} |
||||
} |
||||
|
||||
func version() string { |
||||
if Version != "" { |
||||
// Version was set via ldflags, just return it.
|
||||
return Version |
||||
} |
||||
|
||||
info, ok := debug.ReadBuildInfo() |
||||
if !ok { |
||||
return "" |
||||
} |
||||
|
||||
// Happens for crane and gcrane.
|
||||
if info.Main.Path == moduleName { |
||||
return info.Main.Version |
||||
} |
||||
|
||||
// Anything else.
|
||||
for _, dep := range info.Deps { |
||||
if dep.Path == moduleName { |
||||
return dep.Version |
||||
} |
||||
} |
||||
|
||||
return "" |
||||
} |
||||
|
||||
// NewUserAgent returns an http.Roundtripper that sets the user agent to
|
||||
// The provided string plus additional go-containerregistry information,
|
||||
// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4:
|
||||
//
|
||||
// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4
|
||||
func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { |
||||
if ua == "" { |
||||
ua = ggcrVersion |
||||
} else { |
||||
ua = fmt.Sprintf("%s %s", ua, ggcrVersion) |
||||
} |
||||
return &userAgentTransport{ |
||||
inner: inner, |
||||
ua: ua, |
||||
} |
||||
} |
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { |
||||
in.Header.Set("User-Agent", ut.ua) |
||||
return ut.inner.RoundTrip(in) |
||||
} |
@ -0,0 +1,876 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
"github.com/google/go-containerregistry/internal/redact" |
||||
"github.com/google/go-containerregistry/internal/retry" |
||||
"github.com/google/go-containerregistry/pkg/logs" |
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/partial" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport" |
||||
"github.com/google/go-containerregistry/pkg/v1/stream" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
"golang.org/x/sync/errgroup" |
||||
) |
||||
|
||||
// Taggable is an interface that enables a manifest PUT (e.g. for tagging).
|
||||
type Taggable interface { |
||||
RawManifest() ([]byte, error) |
||||
} |
||||
|
||||
// Write pushes the provided img to the specified image reference.
|
||||
func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
var p *progress |
||||
if o.updates != nil { |
||||
p = &progress{updates: o.updates} |
||||
p.lastUpdate = &v1.Update{} |
||||
p.lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer close(o.updates) |
||||
defer func() { _ = p.err(rerr) }() |
||||
} |
||||
return writeImage(o.context, ref, img, o, p) |
||||
} |
||||
|
||||
func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, progress *progress) error { |
||||
ls, err := img.Layers() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
scopes := scopesForUploadingImage(ref.Context(), ls) |
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w := writer{ |
||||
repo: ref.Context(), |
||||
client: &http.Client{Transport: tr}, |
||||
progress: progress, |
||||
backoff: o.retryBackoff, |
||||
predicate: o.retryPredicate, |
||||
} |
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs) |
||||
g, gctx := errgroup.WithContext(ctx) |
||||
for i := 0; i < o.jobs; i++ { |
||||
// Start N workers consuming blobs to upload.
|
||||
g.Go(func() error { |
||||
for b := range blobChan { |
||||
if err := w.uploadOne(gctx, b); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
// Upload individual layers in goroutines and collect any errors.
|
||||
// If we can dedupe by the layer digest, try to do so. If we can't determine
|
||||
// the digest for whatever reason, we can't dedupe and might re-upload.
|
||||
g.Go(func() error { |
||||
defer close(blobChan) |
||||
uploaded := map[v1.Hash]bool{} |
||||
for _, l := range ls { |
||||
l := l |
||||
|
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if !mt.IsDistributable() && !o.allowNondistributableArtifacts { |
||||
continue |
||||
} |
||||
|
||||
// Streaming layers calculate their digests while uploading them. Assume
|
||||
// an error here indicates we need to upload the layer.
|
||||
h, err := l.Digest() |
||||
if err == nil { |
||||
// If we can determine the layer's digest ahead of
|
||||
// time, use it to dedupe uploads.
|
||||
if uploaded[h] { |
||||
continue // Already uploading.
|
||||
} |
||||
uploaded[h] = true |
||||
} |
||||
select { |
||||
case blobChan <- l: |
||||
case <-gctx.Done(): |
||||
return gctx.Err() |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
|
||||
if l, err := partial.ConfigLayer(img); err != nil { |
||||
// We can't read the ConfigLayer, possibly because of streaming layers,
|
||||
// since the layer DiffIDs haven't been calculated yet. Attempt to wait
|
||||
// for the other layers to be uploaded, then try the config again.
|
||||
if err := g.Wait(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Now that all the layers are uploaded, try to upload the config file blob.
|
||||
l, err := partial.ConfigLayer(img) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := w.uploadOne(ctx, l); err != nil { |
||||
return err |
||||
} |
||||
} else { |
||||
// We *can* read the ConfigLayer, so upload it concurrently with the layers.
|
||||
g.Go(func() error { |
||||
return w.uploadOne(gctx, l) |
||||
}) |
||||
|
||||
// Wait for the layers + config.
|
||||
if err := g.Wait(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, img, ref) |
||||
} |
||||
|
||||
// writer writes the elements of an image to a remote image reference.
|
||||
type writer struct { |
||||
repo name.Repository |
||||
client *http.Client |
||||
|
||||
progress *progress |
||||
backoff Backoff |
||||
predicate retry.Predicate |
||||
} |
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (w *writer) url(path string) url.URL { |
||||
return url.URL{ |
||||
Scheme: w.repo.Registry.Scheme(), |
||||
Host: w.repo.RegistryStr(), |
||||
Path: path, |
||||
} |
||||
} |
||||
|
||||
// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence.
|
||||
func (w *writer) nextLocation(resp *http.Response) (string, error) { |
||||
loc := resp.Header.Get("Location") |
||||
if len(loc) == 0 { |
||||
return "", errors.New("missing Location header") |
||||
} |
||||
u, err := url.Parse(loc) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
// If the location header returned is just a url path, then fully qualify it.
|
||||
// We cannot simply call w.url, since there might be an embedded query string.
|
||||
return resp.Request.URL.ResolveReference(u).String(), nil |
||||
} |
||||
|
||||
// checkExistingBlob checks if a blob exists already in the repository by making a
|
||||
// HEAD request to the blob store API. GCR performs an existence check on the
|
||||
// initiation if "mount" is specified, even if no "from" sources are specified.
|
||||
// However, this is not broadly applicable to all registries, e.g. ECR.
|
||||
func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) { |
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) |
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
return resp.StatusCode == http.StatusOK, nil |
||||
} |
||||
|
||||
// checkExistingManifest checks if a manifest exists already in the repository
|
||||
// by making a HEAD request to the manifest API.
|
||||
func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) { |
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) |
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
req.Header.Set("Accept", string(mt)) |
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { |
||||
return false, err |
||||
} |
||||
|
||||
return resp.StatusCode == http.StatusOK, nil |
||||
} |
||||
|
||||
// initiateUpload initiates the blob upload, which starts with a POST that can
|
||||
// optionally include the hash of the layer and a list of repositories from
|
||||
// which that layer might be read. On failure, an error is returned.
|
||||
// On success, the layer was either mounted (nothing more to do) or a blob
|
||||
// upload was initiated and the body of that blob should be sent to the returned
|
||||
// location.
|
||||
func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) { |
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) |
||||
uv := url.Values{} |
||||
if mount != "" && from != "" { |
||||
// Quay will fail if we specify a "mount" without a "from".
|
||||
uv.Set("mount", mount) |
||||
uv.Set("from", from) |
||||
if origin != "" { |
||||
uv.Set("origin", origin) |
||||
} |
||||
} |
||||
u.RawQuery = uv.Encode() |
||||
|
||||
// Make the request to initiate the blob upload.
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), nil) |
||||
if err != nil { |
||||
return "", false, err |
||||
} |
||||
req.Header.Set("Content-Type", "application/json") |
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return "", false, err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { |
||||
if origin != "" && origin != w.repo.RegistryStr() { |
||||
// https://github.com/google/go-containerregistry/issues/1404
|
||||
logs.Warn.Printf("retrying without mount: %v", err) |
||||
return w.initiateUpload(ctx, "", "", "") |
||||
} |
||||
return "", false, err |
||||
} |
||||
|
||||
// Check the response code to determine the result.
|
||||
switch resp.StatusCode { |
||||
case http.StatusCreated: |
||||
// We're done, we were able to fast-path.
|
||||
return "", true, nil |
||||
case http.StatusAccepted: |
||||
// Proceed to PATCH, upload has begun.
|
||||
loc, err := w.nextLocation(resp) |
||||
return loc, false, err |
||||
default: |
||||
panic("Unreachable: initiateUpload") |
||||
} |
||||
} |
||||
|
||||
// streamBlob streams the contents of the blob to the specified location.
|
||||
// On failure, this will return an error. On success, this will return the location
|
||||
// header indicating how to commit the streamed blob.
|
||||
func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation string) (commitLocation string, rerr error) { |
||||
reset := func() {} |
||||
defer func() { |
||||
if rerr != nil { |
||||
reset() |
||||
} |
||||
}() |
||||
blob, err := layer.Compressed() |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
getBody := layer.Compressed |
||||
if w.progress != nil { |
||||
var count int64 |
||||
blob = &progressReader{rc: blob, progress: w.progress, count: &count} |
||||
getBody = func() (io.ReadCloser, error) { |
||||
blob, err := layer.Compressed() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &progressReader{rc: blob, progress: w.progress, count: &count}, nil |
||||
} |
||||
reset = func() { |
||||
w.progress.complete(-count) |
||||
} |
||||
} |
||||
|
||||
req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
if _, ok := layer.(*stream.Layer); !ok { |
||||
// We can't retry streaming layers.
|
||||
req.GetBody = getBody |
||||
} |
||||
req.Header.Set("Content-Type", "application/octet-stream") |
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { |
||||
return "", err |
||||
} |
||||
|
||||
// The blob has been uploaded, return the location header indicating
|
||||
// how to commit this layer.
|
||||
return w.nextLocation(resp) |
||||
} |
||||
|
||||
// commitBlob commits this blob by sending a PUT to the location returned from
|
||||
// streaming the blob.
|
||||
func (w *writer) commitBlob(ctx context.Context, location, digest string) error { |
||||
u, err := url.Parse(location) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
v := u.Query() |
||||
v.Set("digest", digest) |
||||
u.RawQuery = v.Encode() |
||||
|
||||
req, err := http.NewRequest(http.MethodPut, u.String(), nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
req.Header.Set("Content-Type", "application/octet-stream") |
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
return transport.CheckError(resp, http.StatusCreated) |
||||
} |
||||
|
||||
// incrProgress increments and sends a progress update, if WithProgress is used.
|
||||
func (w *writer) incrProgress(written int64) { |
||||
if w.progress == nil { |
||||
return |
||||
} |
||||
w.progress.complete(written) |
||||
} |
||||
|
||||
// uploadOne performs a complete upload of a single layer.
|
||||
func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { |
||||
tryUpload := func() error { |
||||
ctx := retry.Never(ctx) |
||||
var from, mount, origin string |
||||
if h, err := l.Digest(); err == nil { |
||||
// If we know the digest, this isn't a streaming layer. Do an existence
|
||||
// check so we can skip uploading the layer if possible.
|
||||
existing, err := w.checkExistingBlob(ctx, h) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if existing { |
||||
size, err := l.Size() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w.incrProgress(size) |
||||
logs.Progress.Printf("existing blob: %v", h) |
||||
return nil |
||||
} |
||||
|
||||
mount = h.String() |
||||
} |
||||
if ml, ok := l.(*MountableLayer); ok { |
||||
from = ml.Reference.Context().RepositoryStr() |
||||
origin = ml.Reference.Context().RegistryStr() |
||||
} |
||||
|
||||
location, mounted, err := w.initiateUpload(ctx, from, mount, origin) |
||||
if err != nil { |
||||
return err |
||||
} else if mounted { |
||||
size, err := l.Size() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w.incrProgress(size) |
||||
h, err := l.Digest() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
logs.Progress.Printf("mounted blob: %s", h.String()) |
||||
return nil |
||||
} |
||||
|
||||
// Only log layers with +json or +yaml. We can let through other stuff if it becomes popular.
|
||||
// TODO(opencontainers/image-spec#791): Would be great to have an actual parser.
|
||||
mt, err := l.MediaType() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
smt := string(mt) |
||||
if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) { |
||||
ctx = redact.NewContext(ctx, "omitting binary blobs from logs") |
||||
} |
||||
|
||||
location, err = w.streamBlob(ctx, l, location) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
h, err := l.Digest() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
digest := h.String() |
||||
|
||||
if err := w.commitBlob(ctx, location, digest); err != nil { |
||||
return err |
||||
} |
||||
logs.Progress.Printf("pushed blob: %s", digest) |
||||
return nil |
||||
} |
||||
|
||||
return retry.Retry(tryUpload, w.predicate, w.backoff) |
||||
} |
||||
|
||||
type withLayer interface { |
||||
Layer(v1.Hash) (v1.Layer, error) |
||||
} |
||||
|
||||
func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.ImageIndex, options ...Option) error { |
||||
index, err := ii.IndexManifest() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
// TODO(#803): Pipe through remote.WithJobs and upload these in parallel.
|
||||
for _, desc := range index.Manifests { |
||||
ref := ref.Context().Digest(desc.Digest.String()) |
||||
exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if exists { |
||||
logs.Progress.Print("existing manifest: ", desc.Digest) |
||||
continue |
||||
} |
||||
|
||||
switch desc.MediaType { |
||||
case types.OCIImageIndex, types.DockerManifestList: |
||||
ii, err := ii.ImageIndex(desc.Digest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := w.writeIndex(ctx, ref, ii, options...); err != nil { |
||||
return err |
||||
} |
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||
img, err := ii.Image(desc.Digest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := writeImage(ctx, ref, img, o, w.progress); err != nil { |
||||
return err |
||||
} |
||||
default: |
||||
// Workaround for #819.
|
||||
if wl, ok := ii.(withLayer); ok { |
||||
layer, err := wl.Layer(desc.Digest) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := w.uploadOne(ctx, layer); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, ii, ref) |
||||
} |
||||
|
||||
type withMediaType interface { |
||||
MediaType() (types.MediaType, error) |
||||
} |
||||
|
||||
// This is really silly, but go interfaces don't let me satisfy remote.Taggable
|
||||
// with remote.Descriptor because of name collisions between method names and
|
||||
// struct fields.
|
||||
//
|
||||
// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or
|
||||
// create a descriptor based on the RawManifest and (optionally) MediaType.
|
||||
func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { |
||||
if d, ok := t.(*Descriptor); ok { |
||||
return d.Manifest, &d.Descriptor, nil |
||||
} |
||||
b, err := t.RawManifest() |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
// A reasonable default if Taggable doesn't implement MediaType.
|
||||
mt := types.DockerManifestSchema2 |
||||
|
||||
if wmt, ok := t.(withMediaType); ok { |
||||
m, err := wmt.MediaType() |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
mt = m |
||||
} |
||||
|
||||
h, sz, err := v1.SHA256(bytes.NewReader(b)) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
return b, &v1.Descriptor{ |
||||
MediaType: mt, |
||||
Size: sz, |
||||
Digest: h, |
||||
}, nil |
||||
} |
||||
|
||||
// commitManifest does a PUT of the image's manifest.
|
||||
func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error { |
||||
tryUpload := func() error { |
||||
ctx := retry.Never(ctx) |
||||
raw, desc, err := unpackTaggable(t) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) |
||||
|
||||
// Make the request to PUT the serialized manifest
|
||||
req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
req.Header.Set("Content-Type", string(desc.MediaType)) |
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer resp.Body.Close() |
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// The image was successfully pushed!
|
||||
logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) |
||||
w.incrProgress(int64(len(raw))) |
||||
return nil |
||||
} |
||||
|
||||
return retry.Retry(tryUpload, w.predicate, w.backoff) |
||||
} |
||||
|
||||
func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { |
||||
// use a map as set to remove duplicates scope strings
|
||||
scopeSet := map[string]struct{}{} |
||||
|
||||
for _, l := range layers { |
||||
if ml, ok := l.(*MountableLayer); ok { |
||||
// we will add push scope for ref.Context() after the loop.
|
||||
// for now we ask pull scope for references of the same registry
|
||||
if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { |
||||
scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} |
||||
} |
||||
} |
||||
} |
||||
|
||||
scopes := make([]string, 0) |
||||
// Push scope should be the first element because a few registries just look at the first scope to determine access.
|
||||
scopes = append(scopes, repo.Scope(transport.PushScope)) |
||||
|
||||
for scope := range scopeSet { |
||||
scopes = append(scopes, scope) |
||||
} |
||||
|
||||
return scopes |
||||
} |
||||
|
||||
// WriteIndex pushes the provided ImageIndex to the specified image reference.
|
||||
// WriteIndex will attempt to push all of the referenced manifests before
|
||||
// attempting to push the ImageIndex, to retain referential integrity.
|
||||
func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
scopes := []string{ref.Scope(transport.PushScope)} |
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w := writer{ |
||||
repo: ref.Context(), |
||||
client: &http.Client{Transport: tr}, |
||||
backoff: o.retryBackoff, |
||||
predicate: o.retryPredicate, |
||||
} |
||||
|
||||
if o.updates != nil { |
||||
w.progress = &progress{updates: o.updates} |
||||
w.progress.lastUpdate = &v1.Update{} |
||||
|
||||
defer close(o.updates) |
||||
defer func() { w.progress.err(rerr) }() |
||||
|
||||
w.progress.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
return w.writeIndex(o.context, ref, ii, options...) |
||||
} |
||||
|
||||
// countImage counts the total size of all layers + config blob + manifest for
|
||||
// an image. It de-dupes duplicate layers.
|
||||
func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) { |
||||
var total int64 |
||||
ls, err := img.Layers() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
seen := map[v1.Hash]bool{} |
||||
for _, l := range ls { |
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
if !mt.IsDistributable() && !allowNondistributableArtifacts { |
||||
continue |
||||
} |
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := l.(*stream.Layer); ok { |
||||
return 0, errors.New("cannot use stream.Layer and WithProgress") |
||||
} |
||||
|
||||
// Dedupe layers.
|
||||
d, err := l.Digest() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
if seen[d] { |
||||
continue |
||||
} |
||||
seen[d] = true |
||||
|
||||
size, err := l.Size() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
} |
||||
b, err := img.RawConfigFile() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += int64(len(b)) |
||||
size, err := img.Size() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
return total, nil |
||||
} |
||||
|
||||
// countIndex counts the total size of all images + sub-indexes for an index.
|
||||
// It does not attempt to de-dupe duplicate images, etc.
|
||||
func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) { |
||||
var total int64 |
||||
mf, err := idx.IndexManifest() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
for _, desc := range mf.Manifests { |
||||
switch desc.MediaType { |
||||
case types.OCIImageIndex, types.DockerManifestList: |
||||
sidx, err := idx.ImageIndex(desc.Digest) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
size, err := countIndex(sidx, allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2: |
||||
simg, err := idx.Image(desc.Digest) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
size, err := countImage(simg, allowNondistributableArtifacts) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
default: |
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok { |
||||
layer, err := wl.Layer(desc.Digest) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
size, err := layer.Size() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
} |
||||
} |
||||
} |
||||
|
||||
size, err := idx.Size() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
total += size |
||||
return total, nil |
||||
} |
||||
|
||||
// WriteLayer uploads the provided Layer to the specified repo.
|
||||
func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { |
||||
o, err := makeOptions(repo, options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
scopes := scopesForUploadingImage(repo, []v1.Layer{layer}) |
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w := writer{ |
||||
repo: repo, |
||||
client: &http.Client{Transport: tr}, |
||||
backoff: o.retryBackoff, |
||||
predicate: o.retryPredicate, |
||||
} |
||||
|
||||
if o.updates != nil { |
||||
w.progress = &progress{updates: o.updates} |
||||
w.progress.lastUpdate = &v1.Update{} |
||||
|
||||
defer close(o.updates) |
||||
defer func() { w.progress.err(rerr) }() |
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := layer.(*stream.Layer); ok { |
||||
return errors.New("cannot use stream.Layer and WithProgress") |
||||
} |
||||
size, err := layer.Size() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w.progress.total(size) |
||||
} |
||||
return w.uploadOne(o.context, layer) |
||||
} |
||||
|
||||
// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/<tag>
|
||||
//
|
||||
// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
|
||||
// remote.Descriptor.
|
||||
//
|
||||
// If t implements MediaType, we will use that for the Content-Type, otherwise
|
||||
// we will default to types.DockerManifestSchema2.
|
||||
//
|
||||
// Tag does not attempt to write anything other than the manifest, so callers
|
||||
// should ensure that all blobs or manifests that are referenced by t exist
|
||||
// in the target registry.
|
||||
func Tag(tag name.Tag, t Taggable, options ...Option) error { |
||||
return Put(tag, t, options...) |
||||
} |
||||
|
||||
// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/<ref>
|
||||
//
|
||||
// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and
|
||||
// remote.Descriptor.
|
||||
//
|
||||
// If t implements MediaType, we will use that for the Content-Type, otherwise
|
||||
// we will default to types.DockerManifestSchema2.
|
||||
//
|
||||
// Put does not attempt to write anything other than the manifest, so callers
|
||||
// should ensure that all blobs or manifests that are referenced by t exist
|
||||
// in the target registry.
|
||||
func Put(ref name.Reference, t Taggable, options ...Option) error { |
||||
o, err := makeOptions(ref.Context(), options...) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
scopes := []string{ref.Scope(transport.PushScope)} |
||||
|
||||
// TODO: This *always* does a token exchange. For some registries,
|
||||
// that's pretty slow. Some ideas;
|
||||
// * Tag could take a list of tags.
|
||||
// * Allow callers to pass in a transport.Transport, typecheck
|
||||
// it to allow them to reuse the transport across multiple calls.
|
||||
// * WithTag option to do multiple manifest PUTs in commitManifest.
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
w := writer{ |
||||
repo: ref.Context(), |
||||
client: &http.Client{Transport: tr}, |
||||
backoff: o.retryBackoff, |
||||
predicate: o.retryPredicate, |
||||
} |
||||
|
||||
return w.commitManifest(o.context, t, ref) |
||||
} |
@ -0,0 +1,68 @@ |
||||
# `stream` |
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) |
||||
|
||||
The `stream` package contains an implementation of |
||||
[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) |
||||
that supports _streaming_ access, i.e. the layer contents are read once and not |
||||
buffered. |
||||
|
||||
## Usage |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"os" |
||||
|
||||
"github.com/google/go-containerregistry/pkg/name" |
||||
"github.com/google/go-containerregistry/pkg/v1/remote" |
||||
"github.com/google/go-containerregistry/pkg/v1/stream" |
||||
) |
||||
|
||||
// upload the contents of stdin as a layer to a local registry |
||||
func main() { |
||||
repo, err := name.NewRepository("localhost:5000/stream") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
layer := stream.NewLayer(os.Stdin) |
||||
|
||||
if err := remote.WriteLayer(repo, layer); err != nil { |
||||
panic(err) |
||||
} |
||||
} |
||||
``` |
||||
|
||||
## Structure |
||||
|
||||
This implements the layer portion of an [image |
||||
upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that |
||||
is responsible for hashing the uncompressed contents to compute the `DiffID`, |
||||
gzipping them to produce the `Compressed` contents, and hashing/counting the |
||||
bytes to produce the `Digest`/`Size`. This goroutine writes to an |
||||
`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from |
||||
the corresponding `io.PipeReader`. |
||||
|
||||
<p align="center"> |
||||
<img src="/images/stream.dot.svg" /> |
||||
</p> |
||||
|
||||
## Caveats |
||||
|
||||
This assumes that you have an uncompressed layer (i.e. a tarball) and would like |
||||
to compress it. Calling `Uncompressed` is always an error. Likewise, other |
||||
methods are invalid until the contents of `Compressed` have been completely |
||||
consumed and `Close`d. |
||||
|
||||
Using a `stream.Layer` will likely not work without careful consideration. For |
||||
example, in the `mutate` package, we defer computing the manifest and config |
||||
file until they are actually called. This allows you to `mutate.Append` a |
||||
streaming layer to an image without accidentally consuming it. Similarly, in |
||||
`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the |
||||
layer anyway, understanding that we may be dealing with a `stream.Layer` whose |
||||
contents need to be uploaded before we can upload the config file. |
||||
|
||||
Given the [structure](#structure) of how this is implemented, forgetting to |
||||
`Close` a `stream.Layer` will leak a goroutine. |
@ -0,0 +1,273 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package stream implements a single-pass streaming v1.Layer.
|
||||
package stream |
||||
|
||||
import ( |
||||
"bufio" |
||||
"compress/gzip" |
||||
"crypto/sha256" |
||||
"encoding/hex" |
||||
"errors" |
||||
"hash" |
||||
"io" |
||||
"os" |
||||
"sync" |
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1" |
||||
"github.com/google/go-containerregistry/pkg/v1/types" |
||||
) |
||||
|
||||
var ( |
||||
// ErrNotComputed is returned when the requested value is not yet
|
||||
// computed because the stream has not been consumed yet.
|
||||
ErrNotComputed = errors.New("value not computed until stream is consumed") |
||||
|
||||
// ErrConsumed is returned by Compressed when the underlying stream has
|
||||
// already been consumed and closed.
|
||||
ErrConsumed = errors.New("stream was already consumed") |
||||
) |
||||
|
||||
// Layer is a streaming implementation of v1.Layer.
|
||||
type Layer struct { |
||||
blob io.ReadCloser |
||||
consumed bool |
||||
compression int |
||||
|
||||
mu sync.Mutex |
||||
digest, diffID *v1.Hash |
||||
size int64 |
||||
mediaType types.MediaType |
||||
} |
||||
|
||||
var _ v1.Layer = (*Layer)(nil) |
||||
|
||||
// LayerOption applies options to layer
|
||||
type LayerOption func(*Layer) |
||||
|
||||
// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values.
|
||||
func WithCompressionLevel(level int) LayerOption { |
||||
return func(l *Layer) { |
||||
l.compression = level |
||||
} |
||||
} |
||||
|
||||
// WithMediaType is a functional option for overriding the layer's media type.
|
||||
func WithMediaType(mt types.MediaType) LayerOption { |
||||
return func(l *Layer) { |
||||
l.mediaType = mt |
||||
} |
||||
} |
||||
|
||||
// NewLayer creates a Layer from an io.ReadCloser.
|
||||
func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { |
||||
layer := &Layer{ |
||||
blob: rc, |
||||
compression: gzip.BestSpeed, |
||||
// We use DockerLayer for now as uncompressed layers
|
||||
// are unimplemented
|
||||
mediaType: types.DockerLayer, |
||||
} |
||||
|
||||
for _, opt := range opts { |
||||
opt(layer) |
||||
} |
||||
|
||||
return layer |
||||
} |
||||
|
||||
// Digest implements v1.Layer.
|
||||
func (l *Layer) Digest() (v1.Hash, error) { |
||||
l.mu.Lock() |
||||
defer l.mu.Unlock() |
||||
if l.digest == nil { |
||||
return v1.Hash{}, ErrNotComputed |
||||
} |
||||
return *l.digest, nil |
||||
} |
||||
|
||||
// DiffID implements v1.Layer.
|
||||
func (l *Layer) DiffID() (v1.Hash, error) { |
||||
l.mu.Lock() |
||||
defer l.mu.Unlock() |
||||
if l.diffID == nil { |
||||
return v1.Hash{}, ErrNotComputed |
||||
} |
||||
return *l.diffID, nil |
||||
} |
||||
|
||||
// Size implements v1.Layer.
|
||||
func (l *Layer) Size() (int64, error) { |
||||
l.mu.Lock() |
||||
defer l.mu.Unlock() |
||||
if l.size == 0 { |
||||
return 0, ErrNotComputed |
||||
} |
||||
return l.size, nil |
||||
} |
||||
|
||||
// MediaType implements v1.Layer
|
||||
func (l *Layer) MediaType() (types.MediaType, error) { |
||||
return l.mediaType, nil |
||||
} |
||||
|
||||
// Uncompressed implements v1.Layer.
|
||||
func (l *Layer) Uncompressed() (io.ReadCloser, error) { |
||||
return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") |
||||
} |
||||
|
||||
// Compressed implements v1.Layer.
|
||||
func (l *Layer) Compressed() (io.ReadCloser, error) { |
||||
if l.consumed { |
||||
return nil, ErrConsumed |
||||
} |
||||
return newCompressedReader(l) |
||||
} |
||||
|
||||
// finalize sets the layer to consumed and computes all hash and size values.
|
||||
func (l *Layer) finalize(uncompressed, compressed hash.Hash, size int64) error { |
||||
l.mu.Lock() |
||||
defer l.mu.Unlock() |
||||
|
||||
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(uncompressed.Sum(nil))) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
l.diffID = &diffID |
||||
|
||||
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(compressed.Sum(nil))) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
l.digest = &digest |
||||
|
||||
l.size = size |
||||
l.consumed = true |
||||
return nil |
||||
} |
||||
|
||||
type compressedReader struct { |
||||
pr io.Reader |
||||
closer func() error |
||||
} |
||||
|
||||
func newCompressedReader(l *Layer) (*compressedReader, error) { |
||||
// Collect digests of compressed and uncompressed stream and size of
|
||||
// compressed stream.
|
||||
h := sha256.New() |
||||
zh := sha256.New() |
||||
count := &countWriter{} |
||||
|
||||
// gzip.Writer writes to the output stream via pipe, a hasher to
|
||||
// capture compressed digest, and a countWriter to capture compressed
|
||||
// size.
|
||||
pr, pw := io.Pipe() |
||||
|
||||
// Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count.
|
||||
mw := io.MultiWriter(pw, zh, count) |
||||
|
||||
// Buffer the output of the gzip writer so we don't have to wait on pr to keep writing.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(mw, 2<<16) |
||||
zw, err := gzip.NewWriterLevel(bw, l.compression) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
doneDigesting := make(chan struct{}) |
||||
|
||||
cr := &compressedReader{ |
||||
pr: pr, |
||||
closer: func() error { |
||||
// Immediately close pw without error. There are three ways to get
|
||||
// here.
|
||||
//
|
||||
// 1. There was a copy error due from the underlying reader, in which
|
||||
// case the error will not be overwritten.
|
||||
// 2. Copying from the underlying reader completed successfully.
|
||||
// 3. Close has been called before the underlying reader has been
|
||||
// fully consumed. In this case pw must be closed in order to
|
||||
// keep the flush of bw from blocking indefinitely.
|
||||
//
|
||||
// NOTE: pw.Close never returns an error. The signature is only to
|
||||
// implement io.Closer.
|
||||
_ = pw.Close() |
||||
|
||||
// Close the inner ReadCloser.
|
||||
//
|
||||
// NOTE: net/http will call close on success, so if we've already
|
||||
// closed the inner rc, it's not an error.
|
||||
if err := l.blob.Close(); err != nil && !errors.Is(err, os.ErrClosed) { |
||||
return err |
||||
} |
||||
|
||||
// Finalize layer with its digest and size values.
|
||||
<-doneDigesting |
||||
return l.finalize(h, zh, count.n) |
||||
}, |
||||
} |
||||
go func() { |
||||
// Copy blob into the gzip writer, which also hashes and counts the
|
||||
// size of the compressed output, and hasher of the raw contents.
|
||||
_, copyErr := io.Copy(io.MultiWriter(h, zw), l.blob) |
||||
|
||||
// Close the gzip writer once copying is done. If this is done in the
|
||||
// Close method of compressedReader instead, then it can cause a panic
|
||||
// when the compressedReader is closed before the blob is fully
|
||||
// consumed and io.Copy in this goroutine is still blocking.
|
||||
closeErr := zw.Close() |
||||
|
||||
// Check errors from writing and closing streams.
|
||||
if copyErr != nil { |
||||
close(doneDigesting) |
||||
pw.CloseWithError(copyErr) |
||||
return |
||||
} |
||||
if closeErr != nil { |
||||
close(doneDigesting) |
||||
pw.CloseWithError(closeErr) |
||||
return |
||||
} |
||||
|
||||
// Flush the buffer once all writes are complete to the gzip writer.
|
||||
if err := bw.Flush(); err != nil { |
||||
close(doneDigesting) |
||||
pw.CloseWithError(err) |
||||
return |
||||
} |
||||
|
||||
// Notify closer that digests are done being written.
|
||||
close(doneDigesting) |
||||
|
||||
// Close the compressed reader to calculate digest/diffID/size. This
|
||||
// will cause pr to return EOF which will cause readers of the
|
||||
// Compressed stream to finish reading.
|
||||
pw.CloseWithError(cr.Close()) |
||||
}() |
||||
|
||||
return cr, nil |
||||
} |
||||
|
||||
func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } |
||||
|
||||
func (cr *compressedReader) Close() error { return cr.closer() } |
||||
|
||||
// countWriter counts bytes written to it.
|
||||
type countWriter struct{ n int64 } |
||||
|
||||
func (c *countWriter) Write(p []byte) (int, error) { |
||||
c.n += int64(len(p)) |
||||
return len(p), nil |
||||
} |
@ -0,0 +1,73 @@ |
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types holds common OCI media types.
|
||||
package types |
||||
|
||||
// MediaType is an enumeration of the supported mime types that an element of an image might have.
|
||||
type MediaType string |
||||
|
||||
// The collection of known MediaType values.
|
||||
const ( |
||||
OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" |
||||
OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" |
||||
OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" |
||||
OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" |
||||
OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" |
||||
OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" |
||||
OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" |
||||
OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" |
||||
OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" |
||||
|
||||
DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" |
||||
DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" |
||||
DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" |
||||
DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" |
||||
DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" |
||||
DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" |
||||
DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" |
||||
DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" |
||||
DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" |
||||
|
||||
OCIVendorPrefix = "vnd.oci" |
||||
DockerVendorPrefix = "vnd.docker" |
||||
) |
||||
|
||||
// IsDistributable returns true if a layer is distributable, see:
|
||||
// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers
|
||||
func (m MediaType) IsDistributable() bool { |
||||
switch m { |
||||
case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index.
|
||||
func (m MediaType) IsImage() bool { |
||||
switch m { |
||||
case OCIManifestSchema1, DockerManifestSchema2: |
||||
return true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image.
|
||||
func (m MediaType) IsIndex() bool { |
||||
switch m { |
||||
case OCIImageIndex, DockerManifestList: |
||||
return true |
||||
} |
||||
return false |
||||
} |
324
vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
generated
vendored
324
vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go
generated
vendored
@ -0,0 +1,324 @@ |
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1 |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Config) DeepCopyInto(out *Config) { |
||||
*out = *in |
||||
if in.Cmd != nil { |
||||
in, out := &in.Cmd, &out.Cmd |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Healthcheck != nil { |
||||
in, out := &in.Healthcheck, &out.Healthcheck |
||||
*out = new(HealthConfig) |
||||
(*in).DeepCopyInto(*out) |
||||
} |
||||
if in.Entrypoint != nil { |
||||
in, out := &in.Entrypoint, &out.Entrypoint |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Env != nil { |
||||
in, out := &in.Env, &out.Env |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Labels != nil { |
||||
in, out := &in.Labels, &out.Labels |
||||
*out = make(map[string]string, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
if in.OnBuild != nil { |
||||
in, out := &in.OnBuild, &out.OnBuild |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Volumes != nil { |
||||
in, out := &in.Volumes, &out.Volumes |
||||
*out = make(map[string]struct{}, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
if in.ExposedPorts != nil { |
||||
in, out := &in.ExposedPorts, &out.ExposedPorts |
||||
*out = make(map[string]struct{}, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
if in.Shell != nil { |
||||
in, out := &in.Shell, &out.Shell |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
|
||||
func (in *Config) DeepCopy() *Config { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Config) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { |
||||
*out = *in |
||||
in.Created.DeepCopyInto(&out.Created) |
||||
if in.History != nil { |
||||
in, out := &in.History, &out.History |
||||
*out = make([]History, len(*in)) |
||||
for i := range *in { |
||||
(*in)[i].DeepCopyInto(&(*out)[i]) |
||||
} |
||||
} |
||||
in.RootFS.DeepCopyInto(&out.RootFS) |
||||
in.Config.DeepCopyInto(&out.Config) |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile.
|
||||
func (in *ConfigFile) DeepCopy() *ConfigFile { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(ConfigFile) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Descriptor) DeepCopyInto(out *Descriptor) { |
||||
*out = *in |
||||
out.Digest = in.Digest |
||||
if in.Data != nil { |
||||
in, out := &in.Data, &out.Data |
||||
*out = make([]byte, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.URLs != nil { |
||||
in, out := &in.URLs, &out.URLs |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Annotations != nil { |
||||
in, out := &in.Annotations, &out.Annotations |
||||
*out = make(map[string]string, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
if in.Platform != nil { |
||||
in, out := &in.Platform, &out.Platform |
||||
*out = new(Platform) |
||||
(*in).DeepCopyInto(*out) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor.
|
||||
func (in *Descriptor) DeepCopy() *Descriptor { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Descriptor) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Hash) DeepCopyInto(out *Hash) { |
||||
*out = *in |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash.
|
||||
func (in *Hash) DeepCopy() *Hash { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Hash) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { |
||||
*out = *in |
||||
if in.Test != nil { |
||||
in, out := &in.Test, &out.Test |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig.
|
||||
func (in *HealthConfig) DeepCopy() *HealthConfig { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(HealthConfig) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *History) DeepCopyInto(out *History) { |
||||
*out = *in |
||||
in.Created.DeepCopyInto(&out.Created) |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History.
|
||||
func (in *History) DeepCopy() *History { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(History) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { |
||||
*out = *in |
||||
if in.Manifests != nil { |
||||
in, out := &in.Manifests, &out.Manifests |
||||
*out = make([]Descriptor, len(*in)) |
||||
for i := range *in { |
||||
(*in)[i].DeepCopyInto(&(*out)[i]) |
||||
} |
||||
} |
||||
if in.Annotations != nil { |
||||
in, out := &in.Annotations, &out.Annotations |
||||
*out = make(map[string]string, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest.
|
||||
func (in *IndexManifest) DeepCopy() *IndexManifest { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(IndexManifest) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Manifest) DeepCopyInto(out *Manifest) { |
||||
*out = *in |
||||
in.Config.DeepCopyInto(&out.Config) |
||||
if in.Layers != nil { |
||||
in, out := &in.Layers, &out.Layers |
||||
*out = make([]Descriptor, len(*in)) |
||||
for i := range *in { |
||||
(*in)[i].DeepCopyInto(&(*out)[i]) |
||||
} |
||||
} |
||||
if in.Annotations != nil { |
||||
in, out := &in.Annotations, &out.Annotations |
||||
*out = make(map[string]string, len(*in)) |
||||
for key, val := range *in { |
||||
(*out)[key] = val |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest.
|
||||
func (in *Manifest) DeepCopy() *Manifest { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Manifest) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Platform) DeepCopyInto(out *Platform) { |
||||
*out = *in |
||||
if in.OSFeatures != nil { |
||||
in, out := &in.OSFeatures, &out.OSFeatures |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
if in.Features != nil { |
||||
in, out := &in.Features, &out.Features |
||||
*out = make([]string, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
|
||||
func (in *Platform) DeepCopy() *Platform { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Platform) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RootFS) DeepCopyInto(out *RootFS) { |
||||
*out = *in |
||||
if in.DiffIDs != nil { |
||||
in, out := &in.DiffIDs, &out.DiffIDs |
||||
*out = make([]Hash, len(*in)) |
||||
copy(*out, *in) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS.
|
||||
func (in *RootFS) DeepCopy() *RootFS { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(RootFS) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
|
||||
func (in *Time) DeepCopy() *Time { |
||||
if in == nil { |
||||
return nil |
||||
} |
||||
out := new(Time) |
||||
in.DeepCopyInto(out) |
||||
return out |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue