[Cleanup] Types, ready-log-messages & closing connections (#818)

- new special internal role `initServer` used only to determine the correct ready-log-message
- ready-log-messages now looked up by role and new `Intent` type (cluster-create/cluster-start/node-create/node-start), as especially for the init server there are different log messages indicating that we can proceed with the next step
- moving types around:
	- K3s env vars now under .../types/k3s/env.go
	- defaults now under .../types/defaults.go
	- ...
- improved waiting for log messages
	- not checking the whole log again and again in a loop
	- follow log with a single reader (and retry in case we see a fatal error, meaning that the K3s container will restart -> backoff after 10 tries)
	- BREAKING: new `*runtimeTypes.NodeLogsOpts` parameter in GetNodeLogs
pull/827/head v5.0.2-rc.1
Thorsten Klein 3 years ago committed by GitHub
parent 407ced6405
commit f8f17caf78
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 5
      cmd/cluster/clusterStart.go
  2. 19
      pkg/client/cluster.go
  3. 2
      pkg/client/loadbalancer.go
  4. 146
      pkg/client/node.go
  5. 12
      pkg/runtimes/docker/node.go
  6. 2
      pkg/runtimes/docker/translate.go
  7. 2
      pkg/runtimes/docker/translate_test.go
  8. 1
      pkg/runtimes/docker/util.go
  9. 2
      pkg/runtimes/runtime.go
  10. 4
      pkg/runtimes/types/types.go
  11. 96
      pkg/types/defaults.go
  12. 42
      pkg/types/env.go
  13. 6
      pkg/types/fixes/fixes.go
  14. 28
      pkg/types/images.go
  15. 32
      pkg/types/intent.go
  16. 29
      pkg/types/k3s/env.go
  17. 69
      pkg/types/k3slogs.go
  18. 59
      pkg/types/registry.go
  19. 131
      pkg/types/types.go
  20. 17
      version/version.go

@ -37,7 +37,9 @@ import (
// NewCmdClusterStart returns a new cobra command
func NewCmdClusterStart() *cobra.Command {
startClusterOpts := types.ClusterStartOpts{}
startClusterOpts := types.ClusterStartOpts{
Intent: k3d.IntentClusterStart,
}
// create new command
cmd := &cobra.Command{
@ -59,6 +61,7 @@ func NewCmdClusterStart() *cobra.Command {
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
l.Log().Fatalln(err)
}
l.Log().Infof("Started cluster '%s'", c.Name)
}
}
},

@ -89,6 +89,7 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
EnvironmentInfo: envInfo,
Intent: k3d.IntentClusterCreate,
}); err != nil {
return fmt.Errorf("Failed Cluster Start: %+v", err)
}
@ -379,7 +380,7 @@ ClusterCreatOpts:
// connection url is always the name of the first server node (index 0) // TODO: change this to the server loadbalancer
connectionURL := fmt.Sprintf("https://%s:%s", GenerateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterToken, cluster.Token))
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, cluster.Token))
nodeSetup := func(node *k3d.Node) error {
// cluster specific settings
@ -413,12 +414,12 @@ ClusterCreatOpts:
// the cluster has an init server node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterConnectURL, connectionURL))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
node.RuntimeLabels[k3d.LabelServerIsInit] = "false" // set label, that this server node is not the init server
}
} else if node.Role == k3d.AgentRole {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterConnectURL, connectionURL))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, connectionURL))
}
node.Networks = []string{cluster.Network.Name}
@ -822,6 +823,10 @@ func GenerateNodeName(cluster string, role k3d.Role, suffix int) string {
func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterStartOpts types.ClusterStartOpts) error {
l.Log().Infof("Starting cluster '%s'", cluster.Name)
if clusterStartOpts.Intent == "" {
clusterStartOpts.Intent = k3d.IntentClusterStart
}
if clusterStartOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, clusterStartOpts.Timeout)
@ -860,7 +865,7 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
if err := NodeStart(ctx, runtime, initNode, &k3d.NodeStartOpts{
Wait: true, // always wait for the init node
NodeHooks: clusterStartOpts.NodeHooks,
ReadyLogMessage: "Running kube-apiserver", // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
ReadyLogMessage: types.GetReadyLogMessage(initNode, clusterStartOpts.Intent), // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
EnvironmentInfo: clusterStartOpts.EnvironmentInfo,
}); err != nil {
return fmt.Errorf("Failed to start initializing server node: %+v", err)
@ -1042,12 +1047,12 @@ func SortClusters(clusters []*k3d.Cluster) []*k3d.Cluster {
// corednsAddHost adds a host entry to the CoreDNS configmap if it doesn't exist (a host entry is a single line of the form "IP HOST")
func corednsAddHost(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, ip string, name string) error {
retries := 3
if v, ok := os.LookupEnv("K3D_DEBUG_COREDNS_RETRIES"); ok && v != "" {
l.Log().Debugf("Running with K3D_DEBUG_COREDNS_RETRIES=%s", v)
if v, ok := os.LookupEnv(k3d.K3dEnvDebugCorednsRetries); ok && v != "" {
l.Log().Debugf("Running with %s=%s", k3d.K3dEnvDebugCorednsRetries, v)
if r, err := strconv.Atoi(v); err == nil {
retries = r
} else {
return fmt.Errorf("Invalid value set for env var K3D_DEBUG_COREDNS_RETRIES (%s): %w", v, err)
return fmt.Errorf("Invalid value set for env var %s (%s): %w", k3d.K3dEnvDebugCorednsRetries, v, err)
}
}

@ -86,7 +86,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer successCtxCancel()
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer.Node, k3d.GetReadyLogMessage(cluster.ServerLoadBalancer.Node, k3d.IntentAny), startTime)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))

@ -23,6 +23,7 @@ THE SOFTWARE.
package client
import (
"bufio"
"bytes"
"context"
"errors"
@ -30,11 +31,11 @@ import (
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"time"
copystruct "github.com/mitchellh/copystructure"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"github.com/docker/go-connections/nat"
@ -44,9 +45,12 @@ import (
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/pkg/runtimes"
"github.com/rancher/k3d/v5/pkg/runtimes/docker"
runtimeTypes "github.com/rancher/k3d/v5/pkg/runtimes/types"
runtimeErrors "github.com/rancher/k3d/v5/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v5/pkg/types"
"github.com/rancher/k3d/v5/pkg/types/fixes"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/pkg/util"
"golang.org/x/sync/errgroup"
)
@ -172,23 +176,23 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
k3sURLEnvFound := false
k3sTokenEnvFoundIndex := -1
for index, envVar := range node.Env {
if strings.HasPrefix(envVar, k3d.K3sEnvClusterConnectURL) {
if strings.HasPrefix(envVar, k3s.EnvClusterConnectURL) {
k3sURLEnvFound = true
}
if strings.HasPrefix(envVar, k3d.K3sEnvClusterToken) {
if strings.HasPrefix(envVar, k3s.EnvClusterToken) {
k3sTokenEnvFoundIndex = index
}
}
if !k3sURLEnvFound {
if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok {
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterConnectURL, url))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, url))
} else {
l.Log().Warnln("Failed to find K3S_URL value!")
}
}
if k3sTokenEnvFoundIndex != -1 && createNodeOpts.ClusterToken != "" {
l.Log().Debugln("Overriding copied cluster token with value from nodeCreateOpts...")
node.Env[k3sTokenEnvFoundIndex] = fmt.Sprintf("%s=%s", k3d.K3sEnvClusterToken, createNodeOpts.ClusterToken)
node.Env[k3sTokenEnvFoundIndex] = fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken)
node.RuntimeLabels[k3d.LabelClusterToken] = createNodeOpts.ClusterToken
}
@ -246,8 +250,8 @@ func NodeAddToClusterRemote(ctx context.Context, runtime runtimes.Runtime, node
node.Env = []string{}
}
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterConnectURL, clusterRef))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3d.K3sEnvClusterToken, createNodeOpts.ClusterToken))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterConnectURL, clusterRef))
node.Env = append(node.Env, fmt.Sprintf("%s=%s", k3s.EnvClusterToken, createNodeOpts.ClusterToken))
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
@ -316,7 +320,7 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
currentNode := node
nodeWaitGroup.Go(func() error {
l.Log().Debugf("Starting to wait for node '%s'", currentNode.Name)
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
readyLogMessage := k3d.GetReadyLogMessage(currentNode, k3d.IntentNodeCreate)
if readyLogMessage != "" {
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
}
@ -327,9 +331,7 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
}
if err := nodeWaitGroup.Wait(); err != nil {
l.Log().Errorln("Failed to bring up all nodes in time. Check the logs:")
l.Log().Errorf(">>> %+v", err)
return fmt.Errorf("Failed to create nodes")
return fmt.Errorf("failed to create nodes: %w", err)
}
return nil
@ -346,6 +348,7 @@ func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, node
Timeout: nodeCreateOpts.Timeout,
NodeHooks: nodeCreateOpts.NodeHooks,
EnvironmentInfo: nodeCreateOpts.EnvironmentInfo,
Intent: k3d.IntentNodeCreate,
}); err != nil {
return fmt.Errorf("failed to start node '%s': %w", node.Name, err)
}
@ -397,7 +400,7 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
if nodeStartOpts.Wait {
if nodeStartOpts.ReadyLogMessage == "" {
nodeStartOpts.ReadyLogMessage = k3d.ReadyLogMessageByRole[node.Role]
nodeStartOpts.ReadyLogMessage = k3d.GetReadyLogMessage(node, nodeStartOpts.Intent)
}
if nodeStartOpts.ReadyLogMessage != "" {
l.Log().Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
@ -669,61 +672,100 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
l.Log().Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
for {
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
d, ok := ctx.Deadline()
if ok {
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
}
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
// specify max number of retries if container is in crashloop (as defined by last seen message being a fatal log)
backOffLimit := k3d.DefaultNodeWaitForLogMessageCrashLoopBackOffLimit
if l, ok := os.LookupEnv(k3d.K3dEnvDebugNodeWaitBackOffLimit); ok {
limit, err := strconv.Atoi(l)
if err == nil {
backOffLimit = limit
}
}
// start a goroutine to print a warning continuously if a node is restarting for quite some time already
donechan := make(chan struct{})
defer close(donechan)
go func(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, since time.Time, donechan chan struct{}) {
for {
select {
case <-ctx.Done():
return
case <-donechan:
return
default:
}
return ctx.Err()
default:
// check if the container is restarting
running, status, _ := runtime.GetNodeStatus(ctx, node)
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
l.Log().Warnf("Node '%s' is restarting for more than %s now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name, k3d.NodeWaitForLogMessageRestartWarnTime)
}
time.Sleep(500 * time.Millisecond)
}
// read the logs
out, err := runtime.GetNodeLogs(ctx, node, since)
}(ctx, runtime, node, since, donechan)
// Start loop to check log stream for specified log message.
// We're looping here, as sometimes the containers run into a crash loop, but *may* recover from that
// e.g. when a new server is joining an existing cluster and has to wait for another member to finish learning.
// The logstream returned by docker ends everytime the container restarts, so we have to start from the beginning.
for i := 0; i < backOffLimit; i++ {
// get the log stream (reader is following the logstream)
out, err := runtime.GetNodeLogs(ctx, node, since, &runtimeTypes.NodeLogsOpts{Follow: true})
if out != nil {
defer out.Close()
}
if err != nil {
if out != nil {
out.Close()
}
return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err)
}
defer out.Close()
buf := new(bytes.Buffer)
nRead, _ := buf.ReadFrom(out)
out.Close()
output := buf.String()
// We're scanning the logstream continuously line-by-line
scanner := bufio.NewScanner(out)
var previousline string
if nRead > 0 && strings.Contains(os.Getenv("K3D_LOG_NODE_WAIT_LOGS"), string(node.Role)) {
l.Log().Tracef("=== Read logs since %s ===\n%s\n", since, output)
}
// check if we can find the specified line in the log
if nRead > 0 && strings.Contains(output, message) {
if l.Log().GetLevel() >= logrus.TraceLevel {
temp := strings.Split(output, "\n")
for _, t := range temp {
if strings.Contains(t, message) {
l.Log().Tracef("Found target log line: `%s`", t)
for scanner.Scan() {
select {
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
d, ok := ctx.Deadline()
if ok {
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
}
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
}
return ctx.Err()
default:
}
break
}
// check if the container is restarting
running, status, _ := runtime.GetNodeStatus(ctx, node)
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
l.Log().Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
if strings.Contains(os.Getenv(k3d.K3dEnvLogNodeWaitLogs), string(node.Role)) {
l.Log().Tracef(">>> Parsing log line: `%s`", scanner.Text())
}
// check if we can find the specified line in the log
if strings.Contains(scanner.Text(), message) {
l.Log().Tracef("Found target message `%s` in log line `%s`", message, scanner.Text())
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
return nil
}
previousline = scanner.Text()
}
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
out.Close() // no more input on scanner, but target log not yet found -> close current logreader (precautionary)
// we got here, because the logstream ended (no more input on scanner), so we check if maybe the container crashed
if strings.Contains(previousline, "level=fatal") {
// case 1: last log line we saw contained a fatal error, so probably it crashed and we want to retry on restart
l.Log().Warnf("warning: encountered fatal log from node %s (retrying %d/%d): %s", node.Name, i, backOffLimit, previousline)
out.Close()
time.Sleep(500 * time.Millisecond)
continue
} else {
// case 2: last log line we saw did not contain a fatal error, so we break the loop here and return a generic error
break
}
}
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
return nil
return fmt.Errorf("error waiting for log line `%s` from node '%s': stopped returning log lines", message, node.Name)
}
// NodeFilterByRoles filters a list of nodes by their roles

@ -35,6 +35,8 @@ import (
"github.com/docker/docker/api/types/filters"
l "github.com/rancher/k3d/v5/pkg/logger"
runtimeErr "github.com/rancher/k3d/v5/pkg/runtimes/errors"
runtimeTypes "github.com/rancher/k3d/v5/pkg/runtimes/types"
k3d "github.com/rancher/k3d/v5/pkg/types"
)
@ -271,7 +273,7 @@ func (d Docker) NodeIsRunning(ctx context.Context, node *k3d.Node) (bool, error)
}
// GetNodeLogs returns the logs from a given node
func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time) (io.ReadCloser, error) {
func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time, opts *runtimeTypes.NodeLogsOpts) (io.ReadCloser, error) {
// get the container for the given node
container, err := getNodeContainer(ctx, node)
if err != nil {
@ -298,7 +300,7 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
if !since.IsZero() {
sinceStr = since.Format("2006-01-02T15:04:05.999999999Z")
}
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr})
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr, Follow: opts.Follow})
if err != nil {
return nil, fmt.Errorf("docker failed to get logs from node '%s' (container '%s'): %w", node.Name, container.ID, err)
}
@ -309,6 +311,9 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
// ExecInNodeGetLogs executes a command inside a node and returns the logs to the caller, e.g. to parse them
func (d Docker) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []string) (*bufio.Reader, error) {
resp, err := executeInNode(ctx, node, cmd)
if resp != nil {
defer resp.Close()
}
if err != nil {
if resp != nil && resp.Reader != nil { // sometimes the exec process returns with a non-zero exit code, but we still have the logs we
return resp.Reader, err
@ -321,6 +326,9 @@ func (d Docker) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []str
// ExecInNode execs a command inside a node
func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
execConnection, err := executeInNode(ctx, node, cmd)
if execConnection != nil {
defer execConnection.Close()
}
if err != nil {
if execConnection != nil && execConnection.Reader != nil {
logs, err := ioutil.ReadAll(execConnection.Reader)

@ -47,7 +47,7 @@ import (
// TranslateNodeToContainer translates a k3d node specification to a docker container representation
func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
init := true
if disableInit, err := strconv.ParseBool(os.Getenv("K3D_DEBUG_DISABLE_DOCKER_INIT")); err == nil && disableInit {
if disableInit, err := strconv.ParseBool(os.Getenv(k3d.K3dEnvDebugDisableDockerInit)); err == nil && disableInit {
l.Log().Traceln("docker-init disabled for all containers")
init = false
}

@ -60,7 +60,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
}
init := true
if disableInit, err := strconv.ParseBool(os.Getenv("K3D_DEBUG_DISABLE_DOCKER_INIT")); err == nil && disableInit {
if disableInit, err := strconv.ParseBool(os.Getenv(k3d.K3dEnvDebugDisableDockerInit)); err == nil && disableInit {
init = false
}

@ -149,6 +149,7 @@ func (d Docker) ReadFromNode(ctx context.Context, path string, node *k3d.Node) (
if err != nil {
return nil, fmt.Errorf("failed to get docker client: %w", err)
}
defer docker.Close()
reader, _, err := docker.CopyFromContainer(ctx, nodeContainer.ID, path)
if err != nil {

@ -68,7 +68,7 @@ type Runtime interface {
GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup
ExecInNode(context.Context, *k3d.Node, []string) error
ExecInNodeGetLogs(context.Context, *k3d.Node, []string) (*bufio.Reader, error)
GetNodeLogs(context.Context, *k3d.Node, time.Time) (io.ReadCloser, error)
GetNodeLogs(context.Context, *k3d.Node, time.Time, *runtimeTypes.NodeLogsOpts) (io.ReadCloser, error)
GetImages(context.Context) ([]string, error)
CopyToNode(context.Context, string, string, *k3d.Node) error // @param context, source, destination, node
WriteToNode(context.Context, []byte, string, os.FileMode, *k3d.Node) error // @param context, content, destination, filemode, node

@ -32,3 +32,7 @@ type RuntimeInfo struct {
CgroupDriver string `yaml:",omitempty" json:",omitempty"`
Filesystem string `yaml:",omitempty" json:",omitempty"`
}
type NodeLogsOpts struct {
Follow bool
}

@ -0,0 +1,96 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
import (
"fmt"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/version"
)
// DefaultClusterName specifies the default name used for newly created clusters
const DefaultClusterName = "k3s-default"
// DefaultClusterNameMaxLength specifies the maximal length of a passed in cluster name
// This restriction allows us to construct an name consisting of
// <DefaultObjectNamePrefix[3]>-<ClusterName>-<TypeSuffix[5-10]>-<Counter[1-3]>
// ... and still stay within the 64 character limit (e.g. of docker)
const DefaultClusterNameMaxLength = 32
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
// DefaultRuntimeLabels specifies a set of labels that will be attached to k3d runtime objects by default
var DefaultRuntimeLabels = map[string]string{
"app": "k3d",
}
// DefaultRuntimeLabelsVar specifies a set of labels that will be attached to k3d runtime objects by default but are not static (e.g. across k3d versions)
var DefaultRuntimeLabelsVar = map[string]string{
"k3d.version": version.GetVersion(),
}
// DefaultRoleCmds maps the node roles to their respective default commands
var DefaultRoleCmds = map[Role][]string{
ServerRole: {"server"},
AgentRole: {"agent"},
}
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
var DefaultTmpfsMounts = []string{
"/run",
"/var/run",
}
// DefaultNodeEnv defines some default environment variables that should be set on every node
var DefaultNodeEnv = []string{
fmt.Sprintf("%s=/output/kubeconfig.yaml", k3s.EnvKubeconfigOutput),
}
// DefaultK3dInternalHostRecord defines the default /etc/hosts entry for the k3d host
const DefaultK3dInternalHostRecord = "host.k3d.internal"
// DefaultImageVolumeMountPath defines the mount path inside k3d nodes where we will mount the shared image volume by default
const DefaultImageVolumeMountPath = "/k3d/images"
// DefaultConfigDirName defines the name of the config directory (where we'll e.g. put the kubeconfigs)
const DefaultConfigDirName = ".k3d" // should end up in $HOME/
// DefaultKubeconfigPrefix defines the default prefix for kubeconfig files
const DefaultKubeconfigPrefix = DefaultObjectNamePrefix + "-kubeconfig"
// DefaultAPIPort defines the default Kubernetes API Port
const DefaultAPIPort = "6443"
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
const DefaultAPIHost = "0.0.0.0"
// GetDefaultObjectName prefixes the passed name with the default prefix
func GetDefaultObjectName(name string) string {
return fmt.Sprintf("%s-%s", DefaultObjectNamePrefix, name)
}
// DefaultNodeWaitForLogMessageCrashLoopBackOffLimit defines the maximum number of retries to find the target log message, if the
// container is in a crash loop.
// This makes sense e.g. when a new server is waiting to join an existing cluster and has to wait for other learners to finish.
const DefaultNodeWaitForLogMessageCrashLoopBackOffLimit = 10

@ -0,0 +1,42 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
// k3d config environment variables for options that don't have a place in the config file or CLI
const (
// Log config
K3dEnvLogNodeWaitLogs = "K3D_LOG_NODE_WAIT_LOGS"
// Images
K3dEnvImageLoadbalancer = "K3D_IMAGE_LOADBALANCER"
K3dEnvImageTools = "K3D_IMAGE_TOOLS"
K3dEnvImageHelperTag = "K3D_HELPER_IMAGE_TAG"
// Debug options
K3dEnvDebugCorednsRetries = "K3D_DEBUG_COREDNS_RETRIES"
K3dEnvDebugDisableDockerInit = "K3D_DEBUG_DISABLE_DOCKER_INIT"
K3dEnvDebugNodeWaitBackOffLimit = "K3D_DEBUG_NODE_WAIT_BACKOFF_LIMIT"
// Fixes
K3dEnvFixCgroupV2 = "K3D_FIX_CGROUPV2"
K3dEnvFixDNS = "K3D_FIX_DNS"
)

@ -25,6 +25,8 @@ import (
_ "embed"
"os"
"strconv"
k3d "github.com/rancher/k3d/v5/pkg/types"
)
/* NOTE
@ -40,8 +42,8 @@ import (
type K3DFixEnv string
const (
EnvFixCgroupV2 K3DFixEnv = "K3D_FIX_CGROUPV2" // EnvFixCgroupV2 is the environment variable that k3d will check for to enable/disable the cgroupv2 workaround
EnvFixDNS K3DFixEnv = "K3D_FIX_DNS" // EnvFixDNS is the environment variable that check for to enable/disable the application of network magic related to DNS
EnvFixCgroupV2 K3DFixEnv = k3d.K3dEnvFixCgroupV2 // EnvFixCgroupV2 is the environment variable that k3d will check for to enable/disable the cgroupv2 workaround
EnvFixDNS K3DFixEnv = k3d.K3dEnvFixDNS // EnvFixDNS is the environment variable that check for to enable/disable the application of network magic related to DNS
)
var FixEnvs []K3DFixEnv = []K3DFixEnv{

@ -24,6 +24,7 @@ package types
import (
"fmt"
"os"
"strings"
l "github.com/rancher/k3d/v5/pkg/logger"
"github.com/rancher/k3d/v5/version"
@ -45,19 +46,34 @@ const DefaultRegistryImageRepo = "docker.io/library/registry"
const DefaultRegistryImageTag = "2"
func GetLoadbalancerImage() string {
if img := os.Getenv("K3D_IMAGE_LOADBALANCER"); img != "" {
l.Log().Infof("Loadbalancer image set from env var $K3D_IMAGE_LOADBALANCER: %s", img)
if img := os.Getenv(K3dEnvImageLoadbalancer); img != "" {
l.Log().Infof("Loadbalancer image set from env var $%s: %s", K3dEnvImageLoadbalancer, img)
return img
}
return fmt.Sprintf("%s:%s", DefaultLBImageRepo, version.GetHelperImageVersion())
return fmt.Sprintf("%s:%s", DefaultLBImageRepo, GetHelperImageVersion())
}
func GetToolsImage() string {
if img := os.Getenv("K3D_IMAGE_TOOLS"); img != "" {
l.Log().Infof("Tools image set from env var $K3D_IMAGE_TOOLS: %s", img)
if img := os.Getenv(K3dEnvImageTools); img != "" {
l.Log().Infof("Tools image set from env var $%s: %s", K3dEnvImageTools, img)
return img
}
return fmt.Sprintf("%s:%s", DefaultToolsImageRepo, version.GetHelperImageVersion())
return fmt.Sprintf("%s:%s", DefaultToolsImageRepo, GetHelperImageVersion())
}
// GetHelperImageVersion returns the CLI version or 'latest'
func GetHelperImageVersion() string {
if tag := os.Getenv(K3dEnvImageHelperTag); tag != "" {
l.Log().Infoln("Helper image tag set from env var")
return tag
}
if len(version.HelperVersionOverride) > 0 {
return version.HelperVersionOverride
}
if len(version.Version) == 0 {
return "latest"
}
return strings.TrimPrefix(version.Version, "v")
}

@ -0,0 +1,32 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
type Intent string
const (
IntentClusterCreate Intent = "cluster-create"
IntentClusterStart Intent = "cluster-start"
IntentNodeCreate Intent = "node-create"
IntentNodeStart Intent = "node-start"
IntentAny Intent = ""
)

@ -0,0 +1,29 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package k3s
// k3s environment variables
const (
EnvClusterToken string = "K3S_TOKEN"
EnvClusterConnectURL string = "K3S_URL"
EnvKubeconfigOutput string = "K3S_KUBECONFIG_OUTPUT"
)

@ -0,0 +1,69 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
import (
"time"
l "github.com/rancher/k3d/v5/pkg/logger"
)
// NodeWaitForLogMessageRestartWarnTime is the time after which to warn about a restarting container
const NodeWaitForLogMessageRestartWarnTime = 2 * time.Minute
var ReadyLogMessagesByRoleAndIntent = map[Role]map[Intent]string{
Role(InternalRoleInitServer): {
IntentClusterCreate: "Containerd is now running",
IntentClusterStart: "Running kube-apiserver",
IntentAny: "Running kube-apiserver",
},
ServerRole: {
IntentAny: "k3s is up and running",
},
AgentRole: {
IntentAny: "Successfully registered node",
},
LoadBalancerRole: {
IntentAny: "start worker processes",
},
RegistryRole: {
IntentAny: "listening on",
},
}
func GetReadyLogMessage(node *Node, intent Intent) string {
role := node.Role
if node.Role == ServerRole && node.ServerOpts.IsInit {
role = Role(InternalRoleInitServer)
}
if _, ok := ReadyLogMessagesByRoleAndIntent[role]; ok {
if msg, ok := ReadyLogMessagesByRoleAndIntent[role][intent]; ok {
return msg
} else {
if msg, ok := ReadyLogMessagesByRoleAndIntent[role][IntentAny]; ok {
return msg
}
}
}
l.Log().Warnf("error looking up ready log message for role %s and intent %s: not defined", role, intent)
return ""
}

@ -0,0 +1,59 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package types
// Registry Defaults
const (
DefaultRegistryPort = "5000"
DefaultRegistryName = DefaultObjectNamePrefix + "-registry"
DefaultRegistriesFilePath = "/etc/rancher/k3s/registries.yaml"
DefaultRegistryMountPath = "/var/lib/registry"
DefaultDockerHubAddress = "registry-1.docker.io"
// Default temporary path for the LocalRegistryHosting configmap, from where it will be applied via kubectl
DefaultLocalRegistryHostingConfigmapTempPath = "/tmp/localRegistryHostingCM.yaml"
)
// Registry describes a k3d-managed registry
type Registry struct {
ClusterRef string // filled automatically -> if created with a cluster
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Image string `yaml:"image,omitempty" json:"image,omitempty"`
ExposureOpts ExposureOpts `yaml:"expose" json:"expose"`
Options struct {
ConfigFile string `yaml:"configFile,omitempty" json:"configFile,omitempty"`
Proxy struct {
RemoteURL string `yaml:"remoteURL" json:"remoteURL"`
Username string `yaml:"username,omitempty" json:"username,omitempty"`
Password string `yaml:"password,omitempty" json:"password,omitempty"`
} `yaml:"proxy,omitempty" json:"proxy,omitempty"`
} `yaml:"options,omitempty" json:"options,omitempty"`
}
// RegistryExternal describes a minimal spec for an "external" registry
// "external" meaning, that it's unrelated to the current cluster
// e.g. used for the --registry-use flag registry reference
type RegistryExternal struct {
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Port string `yaml:"port" json:"port"`
}

@ -23,40 +23,15 @@ package types
import (
"context"
"fmt"
"net"
"time"
"github.com/docker/go-connections/nat"
runtimeTypes "github.com/rancher/k3d/v5/pkg/runtimes/types"
"github.com/rancher/k3d/v5/pkg/types/k3s"
"github.com/rancher/k3d/v5/version"
"inet.af/netaddr"
)
// DefaultClusterName specifies the default name used for newly created clusters
const DefaultClusterName = "k3s-default"
// DefaultClusterNameMaxLength specifies the maximal length of a passed in cluster name
// This restriction allows us to construct an name consisting of
// <DefaultObjectNamePrefix[3]>-<ClusterName>-<TypeSuffix[5-10]>-<Counter[1-3]>
// ... and still stay within the 64 character limit (e.g. of docker)
const DefaultClusterNameMaxLength = 32
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
// ReadyLogMessageByRole defines the log messages we wait for until a server node is considered ready
var ReadyLogMessageByRole = map[Role]string{
ServerRole: "k3s is up and running",
AgentRole: "Successfully registered node",
LoadBalancerRole: "start worker processes",
RegistryRole: "listening on",
}
// NodeWaitForLogMessageRestartWarnTime is the time after which to warn about a restarting container
const NodeWaitForLogMessageRestartWarnTime = 2 * time.Minute
// NodeStatusRestarting defines the status string that signals the node container is restarting
const NodeStatusRestarting = "restarting"
@ -72,6 +47,12 @@ const (
RegistryRole Role = "registry"
)
type InternalRole Role
const (
InternalRoleInitServer InternalRole = "initServer"
)
// NodeRoles defines the roles available for nodes
var NodeRoles = map[string]Role{
string(ServerRole): ServerRole,
@ -92,16 +73,6 @@ var ClusterExternalNodeRoles = []Role{
RegistryRole,
}
// DefaultRuntimeLabels specifies a set of labels that will be attached to k3d runtime objects by default
var DefaultRuntimeLabels = map[string]string{
"app": "k3d",
}
// DefaultRuntimeLabelsVar specifies a set of labels that will be attached to k3d runtime objects by default but are not static (e.g. across k3d versions)
var DefaultRuntimeLabelsVar = map[string]string{
"k3d.version": version.GetVersion(),
}
// List of k3d technical label name
const (
LabelClusterName string = "k3d.cluster"
@ -125,48 +96,6 @@ const (
LabelNodeStaticIP string = "k3d.node.staticIP"
)
// DefaultRoleCmds maps the node roles to their respective default commands
var DefaultRoleCmds = map[Role][]string{
ServerRole: {"server"},
AgentRole: {"agent"},
}
// DefaultTmpfsMounts specifies tmpfs mounts that are required for all k3d nodes
var DefaultTmpfsMounts = []string{
"/run",
"/var/run",
}
// DefaultNodeEnv defines some default environment variables that should be set on every node
var DefaultNodeEnv = []string{
fmt.Sprintf("%s=/output/kubeconfig.yaml", K3sEnvKubeconfigOutput),
}
// k3s environment variables
const (
K3sEnvClusterToken string = "K3S_TOKEN"
K3sEnvClusterConnectURL string = "K3S_URL"
K3sEnvKubeconfigOutput string = "K3S_KUBECONFIG_OUTPUT"
)
// DefaultK3dInternalHostRecord defines the default /etc/hosts entry for the k3d host
const DefaultK3dInternalHostRecord = "host.k3d.internal"
// DefaultImageVolumeMountPath defines the mount path inside k3d nodes where we will mount the shared image volume by default
const DefaultImageVolumeMountPath = "/k3d/images"
// DefaultConfigDirName defines the name of the config directory (where we'll e.g. put the kubeconfigs)
const DefaultConfigDirName = ".k3d" // should end up in $HOME/
// DefaultKubeconfigPrefix defines the default prefix for kubeconfig files
const DefaultKubeconfigPrefix = DefaultObjectNamePrefix + "-kubeconfig"
// DefaultAPIPort defines the default Kubernetes API Port
const DefaultAPIPort = "6443"
// DefaultAPIHost defines the default host (IP) for the Kubernetes API
const DefaultAPIHost = "0.0.0.0"
// DoNotCopyServerFlags defines a list of commands/args that shouldn't be copied from an existing node when adding a similar node to a cluster
var DoNotCopyServerFlags = []string{
"--cluster-init",
@ -212,6 +141,7 @@ type ClusterStartOpts struct {
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
EnvironmentInfo *EnvironmentInfo
Intent Intent
}
// ClusterDeleteOpts describe a set of options one can set when deleting a cluster
@ -235,6 +165,7 @@ type NodeStartOpts struct {
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
ReadyLogMessage string
EnvironmentInfo *EnvironmentInfo
Intent Intent
}
// NodeDeleteOpts describes a set of options one can set when deleting a node
@ -370,11 +301,6 @@ type ExternalDatastore struct {
// AgentOpts describes some additional agent role specific opts
type AgentOpts struct{}
// GetDefaultObjectName prefixes the passed name with the default prefix
func GetDefaultObjectName(name string) string {
return fmt.Sprintf("%s-%s", DefaultObjectNamePrefix, name)
}
// NodeState describes the current state of a node
type NodeState struct {
Running bool
@ -382,47 +308,6 @@ type NodeState struct {
Started string
}
/*
* Registry
*/
// Registry Defaults
const (
DefaultRegistryPort = "5000"
DefaultRegistryName = DefaultObjectNamePrefix + "-registry"
DefaultRegistriesFilePath = "/etc/rancher/k3s/registries.yaml"
DefaultRegistryMountPath = "/var/lib/registry"
DefaultDockerHubAddress = "registry-1.docker.io"
// Default temporary path for the LocalRegistryHosting configmap, from where it will be applied via kubectl
DefaultLocalRegistryHostingConfigmapTempPath = "/tmp/localRegistryHostingCM.yaml"
)
// Registry describes a k3d-managed registry
type Registry struct {
ClusterRef string // filled automatically -> if created with a cluster
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Image string `yaml:"image,omitempty" json:"image,omitempty"`
ExposureOpts ExposureOpts `yaml:"expose" json:"expose"`
Options struct {
ConfigFile string `yaml:"configFile,omitempty" json:"configFile,omitempty"`
Proxy struct {
RemoteURL string `yaml:"remoteURL" json:"remoteURL"`
Username string `yaml:"username,omitempty" json:"username,omitempty"`
Password string `yaml:"password,omitempty" json:"password,omitempty"`
} `yaml:"proxy,omitempty" json:"proxy,omitempty"`
} `yaml:"options,omitempty" json:"options,omitempty"`
}
// RegistryExternal describes a minimal spec for an "external" registry
// "external" meaning, that it's unrelated to the current cluster
// e.g. used for the --registry-use flag registry reference
type RegistryExternal struct {
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Port string `yaml:"port" json:"port"`
}
type EnvironmentInfo struct {
HostGateway net.IP
RuntimeInfo runtimeTypes.RuntimeInfo

@ -23,8 +23,6 @@ package version
import (
"fmt"
"os"
"strings"
"github.com/heroku/docker-registry-client/registry"
l "github.com/rancher/k3d/v5/pkg/logger"
@ -47,21 +45,6 @@ func GetVersion() string {
return Version
}
// GetHelperImageVersion returns the CLI version or 'latest'
func GetHelperImageVersion() string {
if tag := os.Getenv("K3D_HELPER_IMAGE_TAG"); tag != "" {
l.Log().Infoln("Helper image tag set from env var")
return tag
}
if len(HelperVersionOverride) > 0 {
return HelperVersionOverride
}
if len(Version) == 0 {
return "latest"
}
return strings.TrimPrefix(Version, "v")
}
// GetK3sVersion returns the version string for K3s
func GetK3sVersion(latest bool) string {
if latest {

Loading…
Cancel
Save