mirror of https://github.com/k3d-io/k3d
chore: upgrade docker dependency and adjust for deprecations (#1460)
parent
2d44b3186d
commit
41049a26e3
@ -0,0 +1,25 @@ |
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||
*.o |
||||
*.a |
||||
*.so |
||||
|
||||
# Folders |
||||
_obj |
||||
_test |
||||
|
||||
# Architecture specific extensions/prefixes |
||||
*.[568vq] |
||||
[568vq].out |
||||
|
||||
*.cgo1.go |
||||
*.cgo2.c |
||||
_cgo_defun.c |
||||
_cgo_gotypes.go |
||||
_cgo_export.* |
||||
|
||||
_testmain.go |
||||
|
||||
*.exe |
||||
|
||||
# IDEs |
||||
.idea/ |
@ -0,0 +1,20 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2014 Cenk Altı |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of |
||||
this software and associated documentation files (the "Software"), to deal in |
||||
the Software without restriction, including without limitation the rights to |
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of |
||||
the Software, and to permit persons to whom the Software is furnished to do so, |
||||
subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS |
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR |
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER |
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@ -0,0 +1,30 @@ |
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] |
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. |
||||
|
||||
[Exponential backoff][exponential backoff wiki] |
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process, |
||||
in order to gradually find an acceptable rate. |
||||
The retries exponentially increase and stop increasing when a certain threshold is met. |
||||
|
||||
## Usage |
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. |
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. |
||||
|
||||
## Contributing |
||||
|
||||
* I would like to keep this library as small as possible. |
||||
* Please don't send a PR without opening an issue and discussing it first. |
||||
* If proposed change is not a common use case, I will probably not accept it. |
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 |
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png |
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master |
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master |
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java |
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff |
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples |
@ -0,0 +1,66 @@ |
||||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff |
||||
|
||||
import "time" |
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface { |
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration |
||||
|
||||
// Reset to initial state.
|
||||
Reset() |
||||
} |
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1 |
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{} |
||||
|
||||
func (b *ZeroBackOff) Reset() {} |
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } |
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{} |
||||
|
||||
func (b *StopBackOff) Reset() {} |
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop } |
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct { |
||||
Interval time.Duration |
||||
} |
||||
|
||||
func (b *ConstantBackOff) Reset() {} |
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } |
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff { |
||||
return &ConstantBackOff{Interval: d} |
||||
} |
@ -0,0 +1,62 @@ |
||||
package backoff |
||||
|
||||
import ( |
||||
"context" |
||||
"time" |
||||
) |
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff |
||||
Context() context.Context |
||||
} |
||||
|
||||
type backOffContext struct { |
||||
BackOff |
||||
ctx context.Context |
||||
} |
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil { |
||||
panic("nil context") |
||||
} |
||||
|
||||
if b, ok := b.(*backOffContext); ok { |
||||
return &backOffContext{ |
||||
BackOff: b.BackOff, |
||||
ctx: ctx, |
||||
} |
||||
} |
||||
|
||||
return &backOffContext{ |
||||
BackOff: b, |
||||
ctx: ctx, |
||||
} |
||||
} |
||||
|
||||
func getContext(b BackOff) context.Context { |
||||
if cb, ok := b.(BackOffContext); ok { |
||||
return cb.Context() |
||||
} |
||||
if tb, ok := b.(*backOffTries); ok { |
||||
return getContext(tb.delegate) |
||||
} |
||||
return context.Background() |
||||
} |
||||
|
||||
func (b *backOffContext) Context() context.Context { |
||||
return b.ctx |
||||
} |
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration { |
||||
select { |
||||
case <-b.ctx.Done(): |
||||
return Stop |
||||
default: |
||||
return b.BackOff.NextBackOff() |
||||
} |
||||
} |
@ -0,0 +1,216 @@ |
||||
package backoff |
||||
|
||||
import ( |
||||
"math/rand" |
||||
"time" |
||||
) |
||||
|
||||
/* |
||||
ExponentialBackOff is a backoff implementation that increases the backoff |
||||
period for each retry attempt using a randomization function that grows exponentially. |
||||
|
||||
NextBackOff() is calculated using the following formula: |
||||
|
||||
randomized interval = |
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) |
||||
|
||||
In other words NextBackOff() will range between the randomization factor |
||||
percentage below and above the retry interval. |
||||
|
||||
For example, given the following parameters: |
||||
|
||||
RetryInterval = 2 |
||||
RandomizationFactor = 0.5 |
||||
Multiplier = 2 |
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, |
||||
multiplied by the exponential, that is, between 2 and 6 seconds. |
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval. |
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the |
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. |
||||
|
||||
The elapsed time can be reset by calling Reset(). |
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be, |
||||
and assuming we go over the MaxElapsedTime on the 10th try: |
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds) |
||||
|
||||
1 0.5 [0.25, 0.75] |
||||
2 0.75 [0.375, 1.125] |
||||
3 1.125 [0.562, 1.687] |
||||
4 1.687 [0.8435, 2.53] |
||||
5 2.53 [1.265, 3.795] |
||||
6 3.795 [1.897, 5.692] |
||||
7 5.692 [2.846, 8.538] |
||||
8 8.538 [4.269, 12.807] |
||||
9 12.807 [6.403, 19.210] |
||||
10 19.210 backoff.Stop |
||||
|
||||
Note: Implementation is not thread-safe. |
||||
*/ |
||||
type ExponentialBackOff struct { |
||||
InitialInterval time.Duration |
||||
RandomizationFactor float64 |
||||
Multiplier float64 |
||||
MaxInterval time.Duration |
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration |
||||
Stop time.Duration |
||||
Clock Clock |
||||
|
||||
currentInterval time.Duration |
||||
startTime time.Time |
||||
} |
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface { |
||||
Now() time.Time |
||||
} |
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff) |
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const ( |
||||
DefaultInitialInterval = 500 * time.Millisecond |
||||
DefaultRandomizationFactor = 0.5 |
||||
DefaultMultiplier = 1.5 |
||||
DefaultMaxInterval = 60 * time.Second |
||||
DefaultMaxElapsedTime = 15 * time.Minute |
||||
) |
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { |
||||
b := &ExponentialBackOff{ |
||||
InitialInterval: DefaultInitialInterval, |
||||
RandomizationFactor: DefaultRandomizationFactor, |
||||
Multiplier: DefaultMultiplier, |
||||
MaxInterval: DefaultMaxInterval, |
||||
MaxElapsedTime: DefaultMaxElapsedTime, |
||||
Stop: Stop, |
||||
Clock: SystemClock, |
||||
} |
||||
for _, fn := range opts { |
||||
fn(b) |
||||
} |
||||
b.Reset() |
||||
return b |
||||
} |
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.InitialInterval = duration |
||||
} |
||||
} |
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.RandomizationFactor = randomizationFactor |
||||
} |
||||
} |
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.Multiplier = multiplier |
||||
} |
||||
} |
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.MaxInterval = duration |
||||
} |
||||
} |
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.MaxElapsedTime = duration |
||||
} |
||||
} |
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.Stop = duration |
||||
} |
||||
} |
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts { |
||||
return func(ebo *ExponentialBackOff) { |
||||
ebo.Clock = clock |
||||
} |
||||
} |
||||
|
||||
type systemClock struct{} |
||||
|
||||
func (t systemClock) Now() time.Time { |
||||
return time.Now() |
||||
} |
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{} |
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() { |
||||
b.currentInterval = b.InitialInterval |
||||
b.startTime = b.Clock.Now() |
||||
} |
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration { |
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime() |
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) |
||||
b.incrementCurrentInterval() |
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { |
||||
return b.Stop |
||||
} |
||||
return next |
||||
} |
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration { |
||||
return b.Clock.Now().Sub(b.startTime) |
||||
} |
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() { |
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { |
||||
b.currentInterval = b.MaxInterval |
||||
} else { |
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) |
||||
} |
||||
} |
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { |
||||
if randomizationFactor == 0 { |
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
} |
||||
var delta = randomizationFactor * float64(currentInterval) |
||||
var minInterval = float64(currentInterval) - delta |
||||
var maxInterval = float64(currentInterval) + delta |
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) |
||||
} |
@ -0,0 +1,146 @@ |
||||
package backoff |
||||
|
||||
import ( |
||||
"errors" |
||||
"time" |
||||
) |
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error) |
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error |
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] { |
||||
return func() (struct{}, error) { |
||||
return struct{}{}, o() |
||||
} |
||||
} |
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration) |
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error { |
||||
return RetryNotify(o, b, nil) |
||||
} |
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { |
||||
return RetryNotifyWithData(o, b, nil) |
||||
} |
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error { |
||||
return RetryNotifyWithTimer(operation, b, notify, nil) |
||||
} |
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { |
||||
return doRetryNotify(operation, b, notify, nil) |
||||
} |
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { |
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t) |
||||
return err |
||||
} |
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { |
||||
return doRetryNotify(operation, b, notify, t) |
||||
} |
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { |
||||
var ( |
||||
err error |
||||
next time.Duration |
||||
res T |
||||
) |
||||
if t == nil { |
||||
t = &defaultTimer{} |
||||
} |
||||
|
||||
defer func() { |
||||
t.Stop() |
||||
}() |
||||
|
||||
ctx := getContext(b) |
||||
|
||||
b.Reset() |
||||
for { |
||||
res, err = operation() |
||||
if err == nil { |
||||
return res, nil |
||||
} |
||||
|
||||
var permanent *PermanentError |
||||
if errors.As(err, &permanent) { |
||||
return res, permanent.Err |
||||
} |
||||
|
||||
if next = b.NextBackOff(); next == Stop { |
||||
if cerr := ctx.Err(); cerr != nil { |
||||
return res, cerr |
||||
} |
||||
|
||||
return res, err |
||||
} |
||||
|
||||
if notify != nil { |
||||
notify(err, next) |
||||
} |
||||
|
||||
t.Start(next) |
||||
|
||||
select { |
||||
case <-ctx.Done(): |
||||
return res, ctx.Err() |
||||
case <-t.C(): |
||||
} |
||||
} |
||||
} |
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct { |
||||
Err error |
||||
} |
||||
|
||||
func (e *PermanentError) Error() string { |
||||
return e.Err.Error() |
||||
} |
||||
|
||||
func (e *PermanentError) Unwrap() error { |
||||
return e.Err |
||||
} |
||||
|
||||
func (e *PermanentError) Is(target error) bool { |
||||
_, ok := target.(*PermanentError) |
||||
return ok |
||||
} |
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error { |
||||
if err == nil { |
||||
return nil |
||||
} |
||||
return &PermanentError{ |
||||
Err: err, |
||||
} |
||||
} |
@ -0,0 +1,97 @@ |
||||
package backoff |
||||
|
||||
import ( |
||||
"context" |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct { |
||||
C <-chan time.Time |
||||
c chan time.Time |
||||
b BackOff |
||||
ctx context.Context |
||||
timer Timer |
||||
stop chan struct{} |
||||
stopOnce sync.Once |
||||
} |
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker { |
||||
return NewTickerWithTimer(b, &defaultTimer{}) |
||||
} |
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { |
||||
if timer == nil { |
||||
timer = &defaultTimer{} |
||||
} |
||||
c := make(chan time.Time) |
||||
t := &Ticker{ |
||||
C: c, |
||||
c: c, |
||||
b: b, |
||||
ctx: getContext(b), |
||||
timer: timer, |
||||
stop: make(chan struct{}), |
||||
} |
||||
t.b.Reset() |
||||
go t.run() |
||||
return t |
||||
} |
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() { |
||||
t.stopOnce.Do(func() { close(t.stop) }) |
||||
} |
||||
|
||||
func (t *Ticker) run() { |
||||
c := t.c |
||||
defer close(c) |
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now()) |
||||
|
||||
for { |
||||
if afterC == nil { |
||||
return |
||||
} |
||||
|
||||
select { |
||||
case tick := <-afterC: |
||||
afterC = t.send(tick) |
||||
case <-t.stop: |
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return |
||||
case <-t.ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time { |
||||
select { |
||||
case t.c <- tick: |
||||
case <-t.stop: |
||||
return nil |
||||
} |
||||
|
||||
next := t.b.NextBackOff() |
||||
if next == Stop { |
||||
t.Stop() |
||||
return nil |
||||
} |
||||
|
||||
t.timer.Start(next) |
||||
return t.timer.C() |
||||
} |
@ -0,0 +1,35 @@ |
||||
package backoff |
||||
|
||||
import "time" |
||||
|
||||
type Timer interface { |
||||
Start(duration time.Duration) |
||||
Stop() |
||||
C() <-chan time.Time |
||||
} |
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct { |
||||
timer *time.Timer |
||||
} |
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time { |
||||
return t.timer.C |
||||
} |
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) { |
||||
if t.timer == nil { |
||||
t.timer = time.NewTimer(duration) |
||||
} else { |
||||
t.timer.Reset(duration) |
||||
} |
||||
} |
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() { |
||||
if t.timer != nil { |
||||
t.timer.Stop() |
||||
} |
||||
} |
@ -0,0 +1,38 @@ |
||||
package backoff |
||||
|
||||
import "time" |
||||
|
||||
/* |
||||
WithMaxRetries creates a wrapper around another BackOff, which will |
||||
return Stop if NextBackOff() has been called too many times since |
||||
the last time Reset() was called |
||||
|
||||
Note: Implementation is not thread-safe. |
||||
*/ |
||||
func WithMaxRetries(b BackOff, max uint64) BackOff { |
||||
return &backOffTries{delegate: b, maxTries: max} |
||||
} |
||||
|
||||
type backOffTries struct { |
||||
delegate BackOff |
||||
maxTries uint64 |
||||
numTries uint64 |
||||
} |
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration { |
||||
if b.maxTries == 0 { |
||||
return Stop |
||||
} |
||||
if b.maxTries > 0 { |
||||
if b.maxTries <= b.numTries { |
||||
return Stop |
||||
} |
||||
b.numTries++ |
||||
} |
||||
return b.delegate.NextBackOff() |
||||
} |
||||
|
||||
func (b *backOffTries) Reset() { |
||||
b.numTries = 0 |
||||
b.delegate.Reset() |
||||
} |
@ -1,51 +0,0 @@ |
||||
package command |
||||
|
||||
import ( |
||||
"sync" |
||||
|
||||
"github.com/docker/docker/api/types/events" |
||||
"github.com/sirupsen/logrus" |
||||
) |
||||
|
||||
// EventHandler is abstract interface for user to customize
|
||||
// own handle functions of each type of events
|
||||
//
|
||||
// Deprecated: EventHandler is no longer used, and will be removed in the next release.
|
||||
type EventHandler interface { |
||||
Handle(action events.Action, h func(events.Message)) |
||||
Watch(c <-chan events.Message) |
||||
} |
||||
|
||||
// InitEventHandler initializes and returns an EventHandler
|
||||
//
|
||||
// Deprecated: InitEventHandler is no longer used, and will be removed in the next release.
|
||||
func InitEventHandler() EventHandler { |
||||
return &eventHandler{handlers: make(map[events.Action]func(events.Message))} |
||||
} |
||||
|
||||
type eventHandler struct { |
||||
handlers map[events.Action]func(events.Message) |
||||
mu sync.Mutex |
||||
} |
||||
|
||||
func (w *eventHandler) Handle(action events.Action, h func(events.Message)) { |
||||
w.mu.Lock() |
||||
w.handlers[action] = h |
||||
w.mu.Unlock() |
||||
} |
||||
|
||||
// Watch ranges over the passed in event chan and processes the events based on the
|
||||
// handlers created for a given action.
|
||||
// To stop watching, close the event chan.
|
||||
func (w *eventHandler) Watch(c <-chan events.Message) { |
||||
for e := range c { |
||||
w.mu.Lock() |
||||
h, exists := w.handlers[e.Action] |
||||
w.mu.Unlock() |
||||
if !exists { |
||||
continue |
||||
} |
||||
logrus.Debugf("event handler: received event: %v", e) |
||||
go h(e) |
||||
} |
||||
} |
@ -0,0 +1,218 @@ |
||||
package command |
||||
|
||||
import ( |
||||
"context" |
||||
"os" |
||||
"path/filepath" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/docker/distribution/uuid" |
||||
"go.opentelemetry.io/otel" |
||||
"go.opentelemetry.io/otel/metric" |
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric" |
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata" |
||||
"go.opentelemetry.io/otel/sdk/resource" |
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace" |
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0" |
||||
"go.opentelemetry.io/otel/trace" |
||||
) |
||||
|
||||
const exportTimeout = 50 * time.Millisecond |
||||
|
||||
// TracerProvider is an extension of the trace.TracerProvider interface for CLI programs.
|
||||
type TracerProvider interface { |
||||
trace.TracerProvider |
||||
ForceFlush(ctx context.Context) error |
||||
Shutdown(ctx context.Context) error |
||||
} |
||||
|
||||
// MeterProvider is an extension of the metric.MeterProvider interface for CLI programs.
|
||||
type MeterProvider interface { |
||||
metric.MeterProvider |
||||
ForceFlush(ctx context.Context) error |
||||
Shutdown(ctx context.Context) error |
||||
} |
||||
|
||||
// TelemetryClient provides the methods for using OTEL tracing or metrics.
|
||||
type TelemetryClient interface { |
||||
// Resource returns the OTEL Resource configured with this TelemetryClient.
|
||||
// This resource may be created lazily, but the resource should be the same
|
||||
// each time this function is invoked.
|
||||
Resource() *resource.Resource |
||||
|
||||
// TracerProvider returns the currently initialized TracerProvider. This TracerProvider will be configured
|
||||
// with the default tracing components for a CLI program
|
||||
TracerProvider() trace.TracerProvider |
||||
|
||||
// MeterProvider returns the currently initialized MeterProvider. This MeterProvider will be configured
|
||||
// with the default metric components for a CLI program
|
||||
MeterProvider() metric.MeterProvider |
||||
} |
||||
|
||||
func (cli *DockerCli) Resource() *resource.Resource { |
||||
return cli.res.Get() |
||||
} |
||||
|
||||
func (cli *DockerCli) TracerProvider() trace.TracerProvider { |
||||
return otel.GetTracerProvider() |
||||
} |
||||
|
||||
func (cli *DockerCli) MeterProvider() metric.MeterProvider { |
||||
return otel.GetMeterProvider() |
||||
} |
||||
|
||||
// WithResourceOptions configures additional options for the default resource. The default
|
||||
// resource will continue to include its default options.
|
||||
func WithResourceOptions(opts ...resource.Option) CLIOption { |
||||
return func(cli *DockerCli) error { |
||||
cli.res.AppendOptions(opts...) |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithResource overwrites the default resource and prevents its creation.
|
||||
func WithResource(res *resource.Resource) CLIOption { |
||||
return func(cli *DockerCli) error { |
||||
cli.res.Set(res) |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
type telemetryResource struct { |
||||
res *resource.Resource |
||||
opts []resource.Option |
||||
once sync.Once |
||||
} |
||||
|
||||
func (r *telemetryResource) Set(res *resource.Resource) { |
||||
r.res = res |
||||
} |
||||
|
||||
func (r *telemetryResource) Get() *resource.Resource { |
||||
r.once.Do(r.init) |
||||
return r.res |
||||
} |
||||
|
||||
func (r *telemetryResource) init() { |
||||
if r.res != nil { |
||||
r.opts = nil |
||||
return |
||||
} |
||||
|
||||
opts := append(defaultResourceOptions(), r.opts...) |
||||
res, err := resource.New(context.Background(), opts...) |
||||
if err != nil { |
||||
otel.Handle(err) |
||||
} |
||||
r.res = res |
||||
|
||||
// Clear the resource options since they'll never be used again and to allow
|
||||
// the garbage collector to retrieve that memory.
|
||||
r.opts = nil |
||||
} |
||||
|
||||
// createGlobalMeterProvider creates a new MeterProvider from the initialized DockerCli struct
|
||||
// with the given options and sets it as the global meter provider
|
||||
func (cli *DockerCli) createGlobalMeterProvider(ctx context.Context, opts ...sdkmetric.Option) { |
||||
allOpts := make([]sdkmetric.Option, 0, len(opts)+2) |
||||
allOpts = append(allOpts, sdkmetric.WithResource(cli.Resource())) |
||||
allOpts = append(allOpts, dockerMetricExporter(ctx, cli)...) |
||||
allOpts = append(allOpts, opts...) |
||||
mp := sdkmetric.NewMeterProvider(allOpts...) |
||||
otel.SetMeterProvider(mp) |
||||
} |
||||
|
||||
// createGlobalTracerProvider creates a new TracerProvider from the initialized DockerCli struct
|
||||
// with the given options and sets it as the global tracer provider
|
||||
func (cli *DockerCli) createGlobalTracerProvider(ctx context.Context, opts ...sdktrace.TracerProviderOption) { |
||||
allOpts := make([]sdktrace.TracerProviderOption, 0, len(opts)+2) |
||||
allOpts = append(allOpts, sdktrace.WithResource(cli.Resource())) |
||||
allOpts = append(allOpts, dockerSpanExporter(ctx, cli)...) |
||||
allOpts = append(allOpts, opts...) |
||||
tp := sdktrace.NewTracerProvider(allOpts...) |
||||
otel.SetTracerProvider(tp) |
||||
} |
||||
|
||||
func defaultResourceOptions() []resource.Option { |
||||
return []resource.Option{ |
||||
resource.WithDetectors(serviceNameDetector{}), |
||||
resource.WithAttributes( |
||||
// Use a unique instance id so OTEL knows that each invocation
|
||||
// of the CLI is its own instance. Without this, downstream
|
||||
// OTEL processors may think the same process is restarting
|
||||
// continuously.
|
||||
semconv.ServiceInstanceID(uuid.Generate().String()), |
||||
), |
||||
resource.WithFromEnv(), |
||||
resource.WithTelemetrySDK(), |
||||
} |
||||
} |
||||
|
||||
func (r *telemetryResource) AppendOptions(opts ...resource.Option) { |
||||
if r.res != nil { |
||||
return |
||||
} |
||||
r.opts = append(r.opts, opts...) |
||||
} |
||||
|
||||
type serviceNameDetector struct{} |
||||
|
||||
func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) { |
||||
return resource.StringDetector( |
||||
semconv.SchemaURL, |
||||
semconv.ServiceNameKey, |
||||
func() (string, error) { |
||||
return filepath.Base(os.Args[0]), nil |
||||
}, |
||||
).Detect(ctx) |
||||
} |
||||
|
||||
// cliReader is an implementation of Reader that will automatically
|
||||
// report to a designated Exporter when Shutdown is called.
|
||||
type cliReader struct { |
||||
sdkmetric.Reader |
||||
exporter sdkmetric.Exporter |
||||
} |
||||
|
||||
func newCLIReader(exp sdkmetric.Exporter) sdkmetric.Reader { |
||||
reader := sdkmetric.NewManualReader( |
||||
sdkmetric.WithTemporalitySelector(deltaTemporality), |
||||
) |
||||
return &cliReader{ |
||||
Reader: reader, |
||||
exporter: exp, |
||||
} |
||||
} |
||||
|
||||
func (r *cliReader) Shutdown(ctx context.Context) error { |
||||
// Place a pretty tight constraint on the actual reporting.
|
||||
// We don't want CLI metrics to prevent the CLI from exiting
|
||||
// so if there's some kind of issue we need to abort pretty
|
||||
// quickly.
|
||||
ctx, cancel := context.WithTimeout(ctx, exportTimeout) |
||||
defer cancel() |
||||
|
||||
return r.ForceFlush(ctx) |
||||
} |
||||
|
||||
func (r *cliReader) ForceFlush(ctx context.Context) error { |
||||
var rm metricdata.ResourceMetrics |
||||
if err := r.Reader.Collect(ctx, &rm); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return r.exporter.Export(ctx, &rm) |
||||
} |
||||
|
||||
// deltaTemporality sets the Temporality of every instrument to delta.
|
||||
//
|
||||
// This isn't really needed since we create a unique resource on each invocation,
|
||||
// but it can help with cardinality concerns for downstream processors since they can
|
||||
// perform aggregation for a time interval and then discard the data once that time
|
||||
// period has passed. Cumulative temporality would imply to the downstream processor
|
||||
// that they might receive a successive point and they may unnecessarily keep state
|
||||
// they really shouldn't.
|
||||
func deltaTemporality(_ sdkmetric.InstrumentKind) metricdata.Temporality { |
||||
return metricdata.DeltaTemporality |
||||
} |
@ -0,0 +1,137 @@ |
||||
// FIXME(jsternberg): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.21
|
||||
|
||||
package command |
||||
|
||||
import ( |
||||
"context" |
||||
"net/url" |
||||
"os" |
||||
"path" |
||||
|
||||
"github.com/pkg/errors" |
||||
"go.opentelemetry.io/otel" |
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" |
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" |
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric" |
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace" |
||||
) |
||||
|
||||
const ( |
||||
otelContextFieldName string = "otel" |
||||
otelExporterOTLPEndpoint string = "OTEL_EXPORTER_OTLP_ENDPOINT" |
||||
debugEnvVarPrefix string = "DOCKER_CLI_" |
||||
) |
||||
|
||||
// dockerExporterOTLPEndpoint retrieves the OTLP endpoint used for the docker reporter
|
||||
// from the current context.
|
||||
func dockerExporterOTLPEndpoint(cli Cli) (endpoint string, secure bool) { |
||||
meta, err := cli.ContextStore().GetMetadata(cli.CurrentContext()) |
||||
if err != nil { |
||||
otel.Handle(err) |
||||
return "", false |
||||
} |
||||
|
||||
var otelCfg any |
||||
switch m := meta.Metadata.(type) { |
||||
case DockerContext: |
||||
otelCfg = m.AdditionalFields[otelContextFieldName] |
||||
case map[string]any: |
||||
otelCfg = m[otelContextFieldName] |
||||
} |
||||
|
||||
if otelCfg != nil { |
||||
otelMap, ok := otelCfg.(map[string]any) |
||||
if !ok { |
||||
otel.Handle(errors.Errorf( |
||||
"unexpected type for field %q: %T (expected: %T)", |
||||
otelContextFieldName, |
||||
otelCfg, |
||||
otelMap, |
||||
)) |
||||
} |
||||
// keys from https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
|
||||
endpoint, _ = otelMap[otelExporterOTLPEndpoint].(string) |
||||
} |
||||
|
||||
// Override with env var value if it exists AND IS SET
|
||||
// (ignore otel defaults for this override when the key exists but is empty)
|
||||
if override := os.Getenv(debugEnvVarPrefix + otelExporterOTLPEndpoint); override != "" { |
||||
endpoint = override |
||||
} |
||||
|
||||
if endpoint == "" { |
||||
return "", false |
||||
} |
||||
|
||||
// Parse the endpoint. The docker config expects the endpoint to be
|
||||
// in the form of a URL to match the environment variable, but this
|
||||
// option doesn't correspond directly to WithEndpoint.
|
||||
//
|
||||
// We pretend we're the same as the environment reader.
|
||||
u, err := url.Parse(endpoint) |
||||
if err != nil { |
||||
otel.Handle(errors.Errorf("docker otel endpoint is invalid: %s", err)) |
||||
return "", false |
||||
} |
||||
|
||||
switch u.Scheme { |
||||
case "unix": |
||||
// Unix sockets are a bit weird. OTEL seems to imply they
|
||||
// can be used as an environment variable and are handled properly,
|
||||
// but they don't seem to be as the behavior of the environment variable
|
||||
// is to strip the scheme from the endpoint, but the underlying implementation
|
||||
// needs the scheme to use the correct resolver.
|
||||
//
|
||||
// We'll just handle this in a special way and add the unix:// back to the endpoint.
|
||||
endpoint = "unix://" + path.Join(u.Host, u.Path) |
||||
case "https": |
||||
secure = true |
||||
fallthrough |
||||
case "http": |
||||
endpoint = path.Join(u.Host, u.Path) |
||||
} |
||||
return endpoint, secure |
||||
} |
||||
|
||||
func dockerSpanExporter(ctx context.Context, cli Cli) []sdktrace.TracerProviderOption { |
||||
endpoint, secure := dockerExporterOTLPEndpoint(cli) |
||||
if endpoint == "" { |
||||
return nil |
||||
} |
||||
|
||||
opts := []otlptracegrpc.Option{ |
||||
otlptracegrpc.WithEndpoint(endpoint), |
||||
} |
||||
if !secure { |
||||
opts = append(opts, otlptracegrpc.WithInsecure()) |
||||
} |
||||
|
||||
exp, err := otlptracegrpc.New(ctx, opts...) |
||||
if err != nil { |
||||
otel.Handle(err) |
||||
return nil |
||||
} |
||||
return []sdktrace.TracerProviderOption{sdktrace.WithBatcher(exp, sdktrace.WithExportTimeout(exportTimeout))} |
||||
} |
||||
|
||||
func dockerMetricExporter(ctx context.Context, cli Cli) []sdkmetric.Option { |
||||
endpoint, secure := dockerExporterOTLPEndpoint(cli) |
||||
if endpoint == "" { |
||||
return nil |
||||
} |
||||
|
||||
opts := []otlpmetricgrpc.Option{ |
||||
otlpmetricgrpc.WithEndpoint(endpoint), |
||||
} |
||||
if !secure { |
||||
opts = append(opts, otlpmetricgrpc.WithInsecure()) |
||||
} |
||||
|
||||
exp, err := otlpmetricgrpc.New(ctx, opts...) |
||||
if err != nil { |
||||
otel.Handle(err) |
||||
return nil |
||||
} |
||||
return []sdkmetric.Option{sdkmetric.WithReader(newCLIReader(exp))} |
||||
} |
@ -0,0 +1,25 @@ |
||||
package command |
||||
|
||||
// WithEnableGlobalMeterProvider configures the DockerCli to create a new
|
||||
// MeterProvider from the initialized DockerCli struct, and set it as
|
||||
// the global meter provider.
|
||||
//
|
||||
// WARNING: For internal use, don't depend on this.
|
||||
func WithEnableGlobalMeterProvider() CLIOption { |
||||
return func(cli *DockerCli) error { |
||||
cli.enableGlobalMeter = true |
||||
return nil |
||||
} |
||||
} |
||||
|
||||
// WithEnableGlobalTracerProvider configures the DockerCli to create a new
|
||||
// TracerProvider from the initialized DockerCli struct, and set it as
|
||||
// the global tracer provider.
|
||||
//
|
||||
// WARNING: For internal use, don't depend on this.
|
||||
func WithEnableGlobalTracerProvider() CLIOption { |
||||
return func(cli *DockerCli) error { |
||||
cli.enableGlobalTracer = true |
||||
return nil |
||||
} |
||||
} |
@ -0,0 +1,179 @@ |
||||
package command |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/docker/cli/cli/version" |
||||
"github.com/pkg/errors" |
||||
"github.com/spf13/cobra" |
||||
"go.opentelemetry.io/otel/attribute" |
||||
"go.opentelemetry.io/otel/metric" |
||||
) |
||||
|
||||
// BaseCommandAttributes returns an attribute.Set containing attributes to attach to metrics/traces
|
||||
func BaseCommandAttributes(cmd *cobra.Command, streams Streams) []attribute.KeyValue { |
||||
return append([]attribute.KeyValue{ |
||||
attribute.String("command.name", getCommandName(cmd)), |
||||
}, stdioAttributes(streams)...) |
||||
} |
||||
|
||||
// InstrumentCobraCommands wraps all cobra commands' RunE funcs to set a command duration metric using otel.
|
||||
//
|
||||
// Note: this should be the last func to wrap/modify the PersistentRunE/RunE funcs before command execution.
|
||||
//
|
||||
// can also be used for spans!
|
||||
func (cli *DockerCli) InstrumentCobraCommands(ctx context.Context, cmd *cobra.Command) { |
||||
// If PersistentPreRunE is nil, make it execute PersistentPreRun and return nil by default
|
||||
ogPersistentPreRunE := cmd.PersistentPreRunE |
||||
if ogPersistentPreRunE == nil { |
||||
ogPersistentPreRun := cmd.PersistentPreRun |
||||
//nolint:unparam // necessary because error will always be nil here
|
||||
ogPersistentPreRunE = func(cmd *cobra.Command, args []string) error { |
||||
ogPersistentPreRun(cmd, args) |
||||
return nil |
||||
} |
||||
cmd.PersistentPreRun = nil |
||||
} |
||||
|
||||
// wrap RunE in PersistentPreRunE so that this operation gets executed on all children commands
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { |
||||
// If RunE is nil, make it execute Run and return nil by default
|
||||
ogRunE := cmd.RunE |
||||
if ogRunE == nil { |
||||
ogRun := cmd.Run |
||||
//nolint:unparam // necessary because error will always be nil here
|
||||
ogRunE = func(cmd *cobra.Command, args []string) error { |
||||
ogRun(cmd, args) |
||||
return nil |
||||
} |
||||
cmd.Run = nil |
||||
} |
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error { |
||||
// start the timer as the first step of every cobra command
|
||||
stopInstrumentation := cli.StartInstrumentation(cmd) |
||||
cmdErr := ogRunE(cmd, args) |
||||
stopInstrumentation(cmdErr) |
||||
return cmdErr |
||||
} |
||||
|
||||
return ogPersistentPreRunE(cmd, args) |
||||
} |
||||
} |
||||
|
||||
// StartInstrumentation instruments CLI commands with the individual metrics and spans configured.
|
||||
// It's the main command OTel utility, and new command-related metrics should be added to it.
|
||||
// It should be called immediately before command execution, and returns a stopInstrumentation function
|
||||
// that must be called with the error resulting from the command execution.
|
||||
func (cli *DockerCli) StartInstrumentation(cmd *cobra.Command) (stopInstrumentation func(error)) { |
||||
baseAttrs := BaseCommandAttributes(cmd, cli) |
||||
return startCobraCommandTimer(cli.MeterProvider(), baseAttrs) |
||||
} |
||||
|
||||
func startCobraCommandTimer(mp metric.MeterProvider, attrs []attribute.KeyValue) func(err error) { |
||||
meter := getDefaultMeter(mp) |
||||
durationCounter, _ := meter.Float64Counter( |
||||
"command.time", |
||||
metric.WithDescription("Measures the duration of the cobra command"), |
||||
metric.WithUnit("ms"), |
||||
) |
||||
start := time.Now() |
||||
|
||||
return func(err error) { |
||||
// Use a new context for the export so that the command being cancelled
|
||||
// doesn't affect the metrics, and we get metrics for cancelled commands.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), exportTimeout) |
||||
defer cancel() |
||||
|
||||
duration := float64(time.Since(start)) / float64(time.Millisecond) |
||||
cmdStatusAttrs := attributesFromError(err) |
||||
durationCounter.Add(ctx, duration, |
||||
metric.WithAttributes(attrs...), |
||||
metric.WithAttributes(cmdStatusAttrs...), |
||||
) |
||||
if mp, ok := mp.(MeterProvider); ok { |
||||
mp.ForceFlush(ctx) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func stdioAttributes(streams Streams) []attribute.KeyValue { |
||||
return []attribute.KeyValue{ |
||||
attribute.Bool("command.stdin.isatty", streams.In().IsTerminal()), |
||||
attribute.Bool("command.stdout.isatty", streams.Out().IsTerminal()), |
||||
attribute.Bool("command.stderr.isatty", streams.Err().IsTerminal()), |
||||
} |
||||
} |
||||
|
||||
func attributesFromError(err error) []attribute.KeyValue { |
||||
attrs := []attribute.KeyValue{} |
||||
exitCode := 0 |
||||
if err != nil { |
||||
exitCode = 1 |
||||
if stderr, ok := err.(statusError); ok { |
||||
// StatusError should only be used for errors, and all errors should
|
||||
// have a non-zero exit status, so only set this here if this value isn't 0
|
||||
if stderr.StatusCode != 0 { |
||||
exitCode = stderr.StatusCode |
||||
} |
||||
} |
||||
attrs = append(attrs, attribute.String("command.error.type", otelErrorType(err))) |
||||
} |
||||
attrs = append(attrs, attribute.Int("command.status.code", exitCode)) |
||||
|
||||
return attrs |
||||
} |
||||
|
||||
// otelErrorType returns an attribute for the error type based on the error category.
|
||||
func otelErrorType(err error) string { |
||||
name := "generic" |
||||
if errors.Is(err, context.Canceled) { |
||||
name = "canceled" |
||||
} |
||||
return name |
||||
} |
||||
|
||||
// statusError reports an unsuccessful exit by a command.
|
||||
type statusError struct { |
||||
Status string |
||||
StatusCode int |
||||
} |
||||
|
||||
func (e statusError) Error() string { |
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) |
||||
} |
||||
|
||||
// getCommandName gets the cobra command name in the format
|
||||
// `... parentCommandName commandName` by traversing it's parent commands recursively.
|
||||
// until the root command is reached.
|
||||
//
|
||||
// Note: The root command's name is excluded. If cmd is the root cmd, return ""
|
||||
func getCommandName(cmd *cobra.Command) string { |
||||
fullCmdName := getFullCommandName(cmd) |
||||
i := strings.Index(fullCmdName, " ") |
||||
if i == -1 { |
||||
return "" |
||||
} |
||||
return fullCmdName[i+1:] |
||||
} |
||||
|
||||
// getFullCommandName gets the full cobra command name in the format
|
||||
// `... parentCommandName commandName` by traversing it's parent commands recursively
|
||||
// until the root command is reached.
|
||||
func getFullCommandName(cmd *cobra.Command) string { |
||||
if cmd.HasParent() { |
||||
return fmt.Sprintf("%s %s", getFullCommandName(cmd.Parent()), cmd.Name()) |
||||
} |
||||
return cmd.Name() |
||||
} |
||||
|
||||
// getDefaultMeter gets the default metric.Meter for the application
|
||||
// using the given metric.MeterProvider
|
||||
func getDefaultMeter(mp metric.MeterProvider) metric.Meter { |
||||
return mp.Meter( |
||||
"github.com/docker/cli", |
||||
metric.WithInstrumentationVersion(version.Version), |
||||
) |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -1,18 +0,0 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct { |
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
} |
@ -0,0 +1,44 @@ |
||||
package container |
||||
|
||||
import ( |
||||
"io" |
||||
"os" |
||||
"time" |
||||
) |
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type PruneReport struct { |
||||
ContainersDeleted []string |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// PathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type PathStat struct { |
||||
Name string `json:"name"` |
||||
Size int64 `json:"size"` |
||||
Mode os.FileMode `json:"mode"` |
||||
Mtime time.Time `json:"mtime"` |
||||
LinkTarget string `json:"linkTarget"` |
||||
} |
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct { |
||||
AllowOverwriteDirWithFile bool |
||||
CopyUIDGID bool |
||||
} |
||||
|
||||
// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats
|
||||
// for a container, as produced by the GET "/stats" endpoint.
|
||||
//
|
||||
// The OSType field is set to the server's platform to allow
|
||||
// platform-specific handling of the response.
|
||||
//
|
||||
// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse].
|
||||
type StatsResponseReader struct { |
||||
Body io.ReadCloser `json:"body"` |
||||
OSType string `json:"ostype"` |
||||
} |
@ -0,0 +1,13 @@ |
||||
package container |
||||
|
||||
import "github.com/docker/docker/api/types/network" |
||||
|
||||
// CreateRequest is the request message sent to the server for container
|
||||
// create calls. It is a config wrapper that holds the container [Config]
|
||||
// (portable) and the corresponding [HostConfig] (non-portable) and
|
||||
// [network.NetworkingConfig].
|
||||
type CreateRequest struct { |
||||
*Config |
||||
HostConfig *HostConfig `json:"HostConfig,omitempty"` |
||||
NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` |
||||
} |
@ -0,0 +1,43 @@ |
||||
package container |
||||
|
||||
// ExecOptions is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecOptions struct { |
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width]
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
} |
||||
|
||||
// ExecStartOptions is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartOptions struct { |
||||
// ExecStart will first check if it's detached
|
||||
Detach bool |
||||
// Check if there's a tty
|
||||
Tty bool |
||||
// Terminal size [height, width], unused if Tty == false
|
||||
ConsoleSize *[2]uint `json:",omitempty"` |
||||
} |
||||
|
||||
// ExecAttachOptions is a temp struct used by execAttach.
|
||||
//
|
||||
// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached.
|
||||
type ExecAttachOptions = ExecStartOptions |
||||
|
||||
// ExecInspect holds information returned by exec inspect.
|
||||
type ExecInspect struct { |
||||
ExecID string `json:"ID"` |
||||
ContainerID string |
||||
Running bool |
||||
ExitCode int |
||||
Pid int |
||||
} |
@ -1,9 +1,47 @@ |
||||
package image |
||||
|
||||
import "time" |
||||
import ( |
||||
"io" |
||||
"time" |
||||
) |
||||
|
||||
// Metadata contains engine-local data about the image.
|
||||
type Metadata struct { |
||||
// LastTagTime is the date and time at which the image was last tagged.
|
||||
LastTagTime time.Time `json:",omitempty"` |
||||
} |
||||
|
||||
// PruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type PruneReport struct { |
||||
ImagesDeleted []DeleteResponse |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// LoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// TODO(thaJeztah): remove this type, and just use an io.ReadCloser
|
||||
//
|
||||
// This type was added in https://github.com/moby/moby/pull/18878, related
|
||||
// to https://github.com/moby/moby/issues/19177;
|
||||
//
|
||||
// Make docker load to output json when the response content type is json
|
||||
// Swarm hijacks the response from docker load and returns JSON rather
|
||||
// than plain text like the Engine does. This makes the API library to return
|
||||
// information to figure that out.
|
||||
//
|
||||
// However the "load" endpoint unconditionally returns JSON;
|
||||
// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255
|
||||
//
|
||||
// PR https://github.com/moby/moby/pull/21959 made the response-type depend
|
||||
// on whether "quiet" was set, but this logic got changed in a follow-up
|
||||
// https://github.com/moby/moby/pull/25557, which made the JSON response-type
|
||||
// unconditionally, but the output produced depend on whether"quiet" was set.
|
||||
//
|
||||
// We should deprecated the "quiet" option, as it's really a client
|
||||
// responsibility.
|
||||
type LoadResponse struct { |
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser |
||||
JSON bool |
||||
} |
||||
|
@ -0,0 +1,19 @@ |
||||
package network |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// CreateResponse NetworkCreateResponse
|
||||
//
|
||||
// OK response to NetworkCreate operation
|
||||
// swagger:model CreateResponse
|
||||
type CreateResponse struct { |
||||
|
||||
// The ID of the created network.
|
||||
// Required: true
|
||||
ID string `json:"Id"` |
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warning string `json:"Warning"` |
||||
} |
@ -0,0 +1,47 @@ |
||||
package registry |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/docker/docker/api/types/filters" |
||||
) |
||||
|
||||
// SearchOptions holds parameters to search images with.
|
||||
type SearchOptions struct { |
||||
RegistryAuth string |
||||
|
||||
// PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can
|
||||
// supply to retry operations after getting an authorization error.
|
||||
//
|
||||
// It must return the registry authentication header value in base64
|
||||
// format, or an error if the privilege request fails.
|
||||
PrivilegeFunc func(context.Context) (string, error) |
||||
Filters filters.Args |
||||
Limit int |
||||
} |
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct { |
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"` |
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"` |
||||
// Name is the name of the repository
|
||||
Name string `json:"name"` |
||||
// IsAutomated indicates whether the result is automated.
|
||||
//
|
||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
||||
IsAutomated bool `json:"is_automated"` |
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"` |
||||
} |
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct { |
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"` |
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"` |
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"` |
||||
} |
@ -1,35 +1,210 @@ |
||||
package types |
||||
|
||||
import ( |
||||
"github.com/docker/docker/api/types/container" |
||||
"github.com/docker/docker/api/types/events" |
||||
"github.com/docker/docker/api/types/image" |
||||
"github.com/docker/docker/api/types/network" |
||||
"github.com/docker/docker/api/types/registry" |
||||
"github.com/docker/docker/api/types/volume" |
||||
) |
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
//
|
||||
// Deprecated: use [image.ImportOptions].
|
||||
type ImageImportOptions = image.ImportOptions |
||||
// Deprecated: use [image.PruneReport].
|
||||
type ImagesPruneReport = image.PruneReport |
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune".
|
||||
//
|
||||
// Deprecated: use [image.CreateOptions].
|
||||
type ImageCreateOptions = image.CreateOptions |
||||
// Deprecated: use [volume.PruneReport].
|
||||
type VolumesPruneReport = volume.PruneReport |
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
//
|
||||
// Deprecated: use [image.PullOptions].
|
||||
type ImagePullOptions = image.PullOptions |
||||
// Deprecated: use [network.CreateRequest].
|
||||
type NetworkCreateRequest = network.CreateRequest |
||||
|
||||
// ImagePushOptions holds information to push images.
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
//
|
||||
// Deprecated: use [image.PushOptions].
|
||||
type ImagePushOptions = image.PushOptions |
||||
// Deprecated: use [network.CreateOptions].
|
||||
type NetworkCreate = network.CreateOptions |
||||
|
||||
// ImageListOptions holds parameters to list images with.
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
//
|
||||
// Deprecated: use [image.ListOptions].
|
||||
type ImageListOptions = image.ListOptions |
||||
// Deprecated: use [network.ListOptions].
|
||||
type NetworkListOptions = network.ListOptions |
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call.
|
||||
//
|
||||
// Deprecated: use [image.RemoveOptions].
|
||||
type ImageRemoveOptions = image.RemoveOptions |
||||
// Deprecated: use [network.CreateResponse].
|
||||
type NetworkCreateResponse = network.CreateResponse |
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network.
|
||||
//
|
||||
// Deprecated: use [network.InspectOptions].
|
||||
type NetworkInspectOptions = network.InspectOptions |
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
//
|
||||
// Deprecated: use [network.ConnectOptions].
|
||||
type NetworkConnect = network.ConnectOptions |
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
//
|
||||
// Deprecated: use [network.DisconnectOptions].
|
||||
type NetworkDisconnect = network.DisconnectOptions |
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network.
|
||||
//
|
||||
// Deprecated: use [network.EndpointResource].
|
||||
type EndpointResource = network.EndpointResource |
||||
|
||||
// NetworkResource is the body of the "get network" http response message/
|
||||
//
|
||||
// Deprecated: use [network.Inspect] or [network.Summary] (for list operations).
|
||||
type NetworkResource = network.Inspect |
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
//
|
||||
// Deprecated: use [network.PruneReport].
|
||||
type NetworksPruneReport = network.PruneReport |
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
//
|
||||
// Deprecated: use [container.ExecOptions].
|
||||
type ExecConfig = container.ExecOptions |
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
//
|
||||
// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions].
|
||||
type ExecStartCheck = container.ExecStartOptions |
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
//
|
||||
// Deprecated: use [container.ExecInspect].
|
||||
type ContainerExecInspect = container.ExecInspect |
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
//
|
||||
// Deprecated: use [container.PruneReport].
|
||||
type ContainersPruneReport = container.PruneReport |
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
//
|
||||
// Deprecated: use [container.PathStat].
|
||||
type ContainerPathStat = container.PathStat |
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container.
|
||||
//
|
||||
// Deprecated: use [container.CopyToContainerOptions],
|
||||
type CopyToContainerOptions = container.CopyToContainerOptions |
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
//
|
||||
// Deprecated: use [container.StatsResponseReader].
|
||||
type ContainerStats = container.StatsResponseReader |
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.ThrottlingData].
|
||||
type ThrottlingData = container.ThrottlingData |
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
//
|
||||
// Deprecated: use [container.CPUUsage].
|
||||
type CPUUsage = container.CPUUsage |
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
//
|
||||
// Deprecated: use [container.CPUStats].
|
||||
type CPUStats = container.CPUStats |
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
//
|
||||
// Deprecated: use [container.MemoryStats].
|
||||
type MemoryStats = container.MemoryStats |
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStatEntry].
|
||||
type BlkioStatEntry = container.BlkioStatEntry |
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
//
|
||||
// Deprecated: use [container.BlkioStats].
|
||||
type BlkioStats = container.BlkioStats |
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
//
|
||||
// Deprecated: use [container.StorageStats].
|
||||
type StorageStats = container.StorageStats |
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
//
|
||||
// Deprecated: use [container.NetworkStats].
|
||||
type NetworkStats = container.NetworkStats |
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
//
|
||||
// Deprecated: use [container.PidsStats].
|
||||
type PidsStats = container.PidsStats |
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
//
|
||||
// Deprecated: use [container.Stats].
|
||||
type Stats = container.Stats |
||||
|
||||
// StatsJSON is newly used Networks
|
||||
//
|
||||
// Deprecated: use [container.StatsResponse].
|
||||
type StatsJSON = container.StatsResponse |
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
//
|
||||
// Deprecated: use [events.ListOptions].
|
||||
type EventsOptions = events.ListOptions |
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
//
|
||||
// Deprecated: use [registry.SearchOptions].
|
||||
type ImageSearchOptions = registry.SearchOptions |
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
//
|
||||
// Deprecated: use [image.ImportSource].
|
||||
type ImageImportSource image.ImportSource |
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
//
|
||||
// Deprecated: use [image.LoadResponse].
|
||||
type ImageLoadResponse = image.LoadResponse |
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only used by the Docker Swarm standalone API.
|
||||
//
|
||||
// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release.
|
||||
type ContainerNode struct { |
||||
ID string |
||||
IPAddress string `json:"IP"` |
||||
Addr string |
||||
Name string |
||||
Cpus int |
||||
Memory int64 |
||||
Labels map[string]string |
||||
} |
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue