mirror of https://github.com/k3d-io/k3d
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
839 lines
28 KiB
839 lines
28 KiB
/*
|
|
Copyright © 2020-2021 The k3d Author(s)
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
of this software and associated documentation files (the "Software"), to deal
|
|
in the Software without restriction, including without limitation the rights
|
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
copies of the Software, and to permit persons to whom the Software is
|
|
furnished to do so, subject to the following conditions:
|
|
|
|
The above copyright notice and this permission notice shall be included in
|
|
all copies or substantial portions of the Software.
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
THE SOFTWARE.
|
|
*/
|
|
|
|
package client
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"os"
|
|
"reflect"
|
|
"strings"
|
|
"time"
|
|
|
|
copystruct "github.com/mitchellh/copystructure"
|
|
"github.com/sirupsen/logrus"
|
|
"gopkg.in/yaml.v2"
|
|
|
|
"github.com/docker/go-connections/nat"
|
|
dockerunits "github.com/docker/go-units"
|
|
"github.com/imdario/mergo"
|
|
"github.com/rancher/k3d/v4/pkg/actions"
|
|
l "github.com/rancher/k3d/v4/pkg/logger"
|
|
"github.com/rancher/k3d/v4/pkg/runtimes"
|
|
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
|
runtimeErrors "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
|
k3d "github.com/rancher/k3d/v4/pkg/types"
|
|
"github.com/rancher/k3d/v4/pkg/types/fixes"
|
|
"github.com/rancher/k3d/v4/pkg/util"
|
|
"golang.org/x/sync/errgroup"
|
|
)
|
|
|
|
// NodeAddToCluster adds a node to an existing cluster
|
|
func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
|
targetClusterName := cluster.Name
|
|
cluster, err := ClusterGet(ctx, runtime, cluster)
|
|
if err != nil {
|
|
return fmt.Errorf("Failed to find specified cluster '%s': %w", targetClusterName, err)
|
|
}
|
|
|
|
// network
|
|
node.Networks = []string{cluster.Network.Name}
|
|
|
|
// skeleton
|
|
if node.RuntimeLabels == nil {
|
|
node.RuntimeLabels = map[string]string{
|
|
k3d.LabelRole: string(node.Role),
|
|
}
|
|
}
|
|
node.Env = []string{}
|
|
|
|
// copy labels and env vars from a similar node in the selected cluster
|
|
var srcNode *k3d.Node
|
|
for _, existingNode := range cluster.Nodes {
|
|
if existingNode.Role == node.Role {
|
|
srcNode = existingNode
|
|
break
|
|
}
|
|
}
|
|
// if we didn't find a node with the same role in the cluster, just choose any other node
|
|
if srcNode == nil {
|
|
l.Log().Debugf("Didn't find node with role '%s' in cluster '%s'. Choosing any other node (and using defaults)...", node.Role, cluster.Name)
|
|
node.Cmd = k3d.DefaultRoleCmds[node.Role]
|
|
for _, existingNode := range cluster.Nodes {
|
|
if existingNode.Role != k3d.LoadBalancerRole { // any role except for the LoadBalancer role
|
|
srcNode = existingNode
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// get node details
|
|
srcNode, err = NodeGet(ctx, runtime, srcNode)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
/*
|
|
* Sanitize Source Node
|
|
* -> remove fields that are not safe to copy as they break something down the stream
|
|
*/
|
|
|
|
// TODO: I guess proper deduplication can be handled in a cleaner/better way or at the infofaker level at some point
|
|
for _, forbiddenMount := range util.DoNotCopyVolumeSuffices {
|
|
for i, mount := range node.Volumes {
|
|
if strings.Contains(mount, forbiddenMount) {
|
|
l.Log().Tracef("Dropping copied volume mount %s to avoid issues...", mount)
|
|
node.Volumes = util.RemoveElementFromStringSlice(node.Volumes, i)
|
|
}
|
|
}
|
|
}
|
|
|
|
// drop port mappings as we cannot use the same port mapping for a two nodes (port collisions)
|
|
srcNode.Ports = nat.PortMap{}
|
|
|
|
// we cannot have two servers as init servers
|
|
if node.Role == k3d.ServerRole {
|
|
for _, forbiddenCmd := range k3d.DoNotCopyServerFlags {
|
|
for i, cmd := range srcNode.Cmd {
|
|
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
|
if cmd == forbiddenCmd {
|
|
l.Log().Tracef("Dropping '%s' from source node's cmd", forbiddenCmd)
|
|
srcNode.Cmd = append(srcNode.Cmd[:i], srcNode.Cmd[i+1:]...)
|
|
}
|
|
}
|
|
for i, arg := range node.Args {
|
|
// cut out the '--cluster-init' flag as this should only be done by the initializing server node
|
|
if arg == forbiddenCmd {
|
|
l.Log().Tracef("Dropping '%s' from source node's args", forbiddenCmd)
|
|
srcNode.Args = append(srcNode.Args[:i], srcNode.Args[i+1:]...)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
l.Log().Debugf("Adding node %s to cluster %s based on existing (sanitized) node %s", node.Name, cluster.Name, srcNode.Name)
|
|
l.Log().Tracef("Sanitized Source Node: %+v\nNew Node: %+v", srcNode, node)
|
|
|
|
// fetch registry config
|
|
registryConfigBytes := []byte{}
|
|
registryConfigReader, err := runtime.ReadFromNode(ctx, k3d.DefaultRegistriesFilePath, srcNode)
|
|
if err != nil {
|
|
if !errors.Is(err, runtimeErrors.ErrRuntimeFileNotFound) {
|
|
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
|
}
|
|
} else {
|
|
defer registryConfigReader.Close()
|
|
|
|
var err error
|
|
registryConfigBytes, err = ioutil.ReadAll(registryConfigReader)
|
|
if err != nil {
|
|
l.Log().Warnf("Failed to read registry config from node %s: %+v", node.Name, err)
|
|
}
|
|
registryConfigReader.Close()
|
|
registryConfigBytes = bytes.Trim(registryConfigBytes[512:], "\x00") // trim control characters, etc.
|
|
}
|
|
|
|
// merge node config of new node into existing node config
|
|
if err := mergo.MergeWithOverwrite(srcNode, *node); err != nil {
|
|
return fmt.Errorf("failed to merge new node config into existing node config: %w", err)
|
|
}
|
|
|
|
node = srcNode
|
|
|
|
l.Log().Debugf("Resulting node %+v", node)
|
|
|
|
k3sURLFound := false
|
|
for _, envVar := range node.Env {
|
|
if strings.HasPrefix(envVar, "K3S_URL") {
|
|
k3sURLFound = true
|
|
break
|
|
}
|
|
}
|
|
if !k3sURLFound {
|
|
if url, ok := node.RuntimeLabels[k3d.LabelClusterURL]; ok {
|
|
node.Env = append(node.Env, fmt.Sprintf("K3S_URL=%s", url))
|
|
} else {
|
|
l.Log().Warnln("Failed to find K3S_URL value!")
|
|
}
|
|
}
|
|
|
|
// add node actions
|
|
if len(registryConfigBytes) != 0 {
|
|
if createNodeOpts.NodeHooks == nil {
|
|
createNodeOpts.NodeHooks = []k3d.NodeHook{}
|
|
}
|
|
createNodeOpts.NodeHooks = append(createNodeOpts.NodeHooks, k3d.NodeHook{
|
|
Stage: k3d.LifecycleStagePreStart,
|
|
Action: actions.WriteFileAction{
|
|
Runtime: runtime,
|
|
Content: registryConfigBytes,
|
|
Dest: k3d.DefaultRegistriesFilePath,
|
|
Mode: 0644,
|
|
},
|
|
})
|
|
}
|
|
|
|
// clear status fields
|
|
node.State.Running = false
|
|
node.State.Status = ""
|
|
|
|
if err := NodeRun(ctx, runtime, node, createNodeOpts); err != nil {
|
|
return fmt.Errorf("failed to run node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
// if it's a server node, then update the loadbalancer configuration
|
|
if node.Role == k3d.ServerRole {
|
|
l.Log().Infoln("Updating loadbalancer config to include new server node(s)")
|
|
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
|
if !errors.Is(err, ErrLBConfigHostNotFound) {
|
|
return fmt.Errorf("error updating loadbalancer: %w", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeAddToClusterMulti adds multiple nodes to a chosen cluster
|
|
func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, cluster *k3d.Cluster, createNodeOpts k3d.NodeCreateOpts) error {
|
|
if createNodeOpts.Timeout > 0*time.Second {
|
|
var cancel context.CancelFunc
|
|
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
|
defer cancel()
|
|
}
|
|
|
|
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
|
for _, node := range nodes {
|
|
currentNode := node
|
|
nodeWaitGroup.Go(func() error {
|
|
return NodeAddToCluster(ctx, runtime, currentNode, cluster, createNodeOpts)
|
|
})
|
|
}
|
|
if err := nodeWaitGroup.Wait(); err != nil {
|
|
return fmt.Errorf("Failed to add one or more nodes: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeCreateMulti creates a list of nodes
|
|
func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d.Node, createNodeOpts k3d.NodeCreateOpts) error { // TODO: pass `--atomic` flag, so we stop and return an error if any node creation fails?
|
|
if createNodeOpts.Timeout > 0*time.Second {
|
|
var cancel context.CancelFunc
|
|
ctx, cancel = context.WithTimeout(ctx, createNodeOpts.Timeout)
|
|
defer cancel()
|
|
}
|
|
|
|
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
|
|
for _, node := range nodes {
|
|
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
|
|
l.Log().Error(err)
|
|
}
|
|
if createNodeOpts.Wait {
|
|
currentNode := node
|
|
nodeWaitGroup.Go(func() error {
|
|
l.Log().Debugf("Starting to wait for node '%s'", currentNode.Name)
|
|
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
|
|
if readyLogMessage != "" {
|
|
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
|
|
}
|
|
l.Log().Warnf("NodeCreateMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
|
|
return nil
|
|
})
|
|
}
|
|
}
|
|
|
|
if err := nodeWaitGroup.Wait(); err != nil {
|
|
l.Log().Errorln("Failed to bring up all nodes in time. Check the logs:")
|
|
l.Log().Errorf(">>> %+v", err)
|
|
return fmt.Errorf("Failed to create nodes")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeRun creates and starts a node
|
|
func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeCreateOpts k3d.NodeCreateOpts) error {
|
|
if err := NodeCreate(ctx, runtime, node, nodeCreateOpts); err != nil {
|
|
return fmt.Errorf("failed to create node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
if err := NodeStart(ctx, runtime, node, &k3d.NodeStartOpts{
|
|
Wait: nodeCreateOpts.Wait,
|
|
Timeout: nodeCreateOpts.Timeout,
|
|
NodeHooks: nodeCreateOpts.NodeHooks,
|
|
EnvironmentInfo: nodeCreateOpts.EnvironmentInfo,
|
|
}); err != nil {
|
|
return fmt.Errorf("failed to start node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeStart starts an existing node
|
|
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
|
|
|
|
// return early, if the node is already running
|
|
if node.State.Running {
|
|
l.Log().Infof("Node %s is already running", node.Name)
|
|
return nil
|
|
}
|
|
|
|
if err := enableFixes(ctx, runtime, node, nodeStartOpts); err != nil {
|
|
return fmt.Errorf("failed to enable k3d fixes: %w", err)
|
|
}
|
|
|
|
startTime := time.Now()
|
|
l.Log().Debugf("Node %s Start Time: %+v", node.Name, startTime)
|
|
|
|
// execute lifecycle hook actions
|
|
for _, hook := range nodeStartOpts.NodeHooks {
|
|
if hook.Stage == k3d.LifecycleStagePreStart {
|
|
l.Log().Tracef("Node %s: Executing preStartAction '%s'", node.Name, reflect.TypeOf(hook))
|
|
if err := hook.Action.Run(ctx, node); err != nil {
|
|
l.Log().Errorf("Node %s: Failed executing preStartAction '%+v': %+v", node.Name, hook, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// start the node
|
|
l.Log().Tracef("Starting node '%s'", node.Name)
|
|
|
|
if err := runtime.StartNode(ctx, node); err != nil {
|
|
return fmt.Errorf("runtime failed to start node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
if node.State.Started != "" {
|
|
ts, err := time.Parse("2006-01-02T15:04:05.999999999Z", node.State.Started)
|
|
if err != nil {
|
|
l.Log().Debugf("Failed to parse '%s.State.Started' timestamp '%s', falling back to calulated time", node.Name, node.State.Started)
|
|
}
|
|
startTime = ts.Truncate(time.Second)
|
|
l.Log().Debugf("Truncated %s to %s", ts, startTime)
|
|
}
|
|
|
|
if nodeStartOpts.Wait {
|
|
if nodeStartOpts.ReadyLogMessage == "" {
|
|
nodeStartOpts.ReadyLogMessage = k3d.ReadyLogMessageByRole[node.Role]
|
|
}
|
|
if nodeStartOpts.ReadyLogMessage != "" {
|
|
l.Log().Debugf("Waiting for node %s to get ready (Log: '%s')", node.Name, nodeStartOpts.ReadyLogMessage)
|
|
if err := NodeWaitForLogMessage(ctx, runtime, node, nodeStartOpts.ReadyLogMessage, startTime); err != nil {
|
|
return fmt.Errorf("Node %s failed to get ready: %+v", node.Name, err)
|
|
}
|
|
} else {
|
|
l.Log().Warnf("NodeStart: Set to wait for node %s to be ready, but there's no target log message defined", node.Name)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
|
|
|
|
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
|
|
|
|
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
|
// auto-enable, if needed
|
|
EnableCgroupV2FixIfNeeded(runtime)
|
|
|
|
// early exit if we don't need any fix
|
|
if !fixes.FixEnabledAny() {
|
|
l.Log().Debugln("No fix enabled.")
|
|
return nil
|
|
}
|
|
|
|
// ensure nodehook list
|
|
if nodeStartOpts.NodeHooks == nil {
|
|
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
|
|
}
|
|
|
|
// write umbrella entrypoint
|
|
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
|
|
Stage: k3d.LifecycleStagePreStart,
|
|
Action: actions.WriteFileAction{
|
|
Runtime: runtime,
|
|
Content: fixes.K3DEntrypoint,
|
|
Dest: "/bin/k3d-entrypoint.sh",
|
|
Mode: 0744,
|
|
},
|
|
})
|
|
|
|
// DNS Fix
|
|
if fixes.FixEnabled(fixes.EnvFixDNS) {
|
|
l.Log().Debugf("ENABLING DNS MAGIC!!!")
|
|
|
|
if nodeStartOpts.EnvironmentInfo == nil || nodeStartOpts.EnvironmentInfo.HostGateway == nil {
|
|
return fmt.Errorf("Cannot enable DNS fix, as Host Gateway IP is missing!")
|
|
}
|
|
|
|
data := []byte(strings.ReplaceAll(string(fixes.DNSMagicEntrypoint), "GATEWAY_IP", nodeStartOpts.EnvironmentInfo.HostGateway.String()))
|
|
|
|
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
|
|
Stage: k3d.LifecycleStagePreStart,
|
|
Action: actions.WriteFileAction{
|
|
Runtime: runtime,
|
|
Content: data,
|
|
Dest: "/bin/k3d-entrypoint-dns.sh",
|
|
Mode: 0744,
|
|
},
|
|
})
|
|
}
|
|
|
|
// CGroupsV2Fix
|
|
if fixes.FixEnabled(fixes.EnvFixCgroupV2) {
|
|
l.Log().Debugf("ENABLING CGROUPSV2 MAGIC!!!")
|
|
|
|
if nodeStartOpts.NodeHooks == nil {
|
|
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
|
|
}
|
|
|
|
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
|
|
Stage: k3d.LifecycleStagePreStart,
|
|
Action: actions.WriteFileAction{
|
|
Runtime: runtime,
|
|
Content: fixes.CgroupV2Entrypoint,
|
|
Dest: "/bin/k3d-entrypoint-cgroupv2.sh",
|
|
Mode: 0744,
|
|
},
|
|
})
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// NodeCreate creates a new containerized k3s node
|
|
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
|
|
// FIXME: FixCgroupV2 - to be removed when fixed upstream
|
|
EnableCgroupV2FixIfNeeded(runtime)
|
|
l.Log().Tracef("Creating node from spec\n%+v", node)
|
|
|
|
/*
|
|
* CONFIGURATION
|
|
*/
|
|
|
|
/* global node configuration (applies for any node role) */
|
|
|
|
// ### Labels ###
|
|
node.FillRuntimeLabels()
|
|
|
|
for k, v := range node.K3sNodeLabels {
|
|
node.Args = append(node.Args, "--node-label", fmt.Sprintf("%s=%s", k, v))
|
|
}
|
|
|
|
// ### Environment ###
|
|
node.Env = append(node.Env, k3d.DefaultNodeEnv...) // append default node env vars
|
|
|
|
// specify options depending on node role
|
|
if node.Role == k3d.AgentRole { // TODO: check here AND in CLI or only here?
|
|
if err := patchAgentSpec(node); err != nil {
|
|
return fmt.Errorf("failed to patch agent spec on node %s: %w", node.Name, err)
|
|
}
|
|
} else if node.Role == k3d.ServerRole {
|
|
if err := patchServerSpec(node, runtime); err != nil {
|
|
return fmt.Errorf("failed to patch server spec on node %s: %w", node.Name, err)
|
|
}
|
|
}
|
|
|
|
// memory limits
|
|
if node.Memory != "" {
|
|
if runtime != runtimes.Docker {
|
|
l.Log().Warn("ignoring specified memory limits as runtime is not Docker")
|
|
} else {
|
|
memory, err := dockerunits.RAMInBytes(node.Memory)
|
|
if err != nil {
|
|
return fmt.Errorf("Invalid memory limit format: %+v", err)
|
|
}
|
|
// mount fake meminfo as readonly
|
|
fakemempath, err := util.MakeFakeMeminfo(memory, node.Name)
|
|
if err != nil {
|
|
return fmt.Errorf("Failed to create fake meminfo: %+v", err)
|
|
}
|
|
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakemempath, util.MemInfoPath))
|
|
// mount empty edac folder, but only if it exists
|
|
exists, err := docker.CheckIfDirectoryExists(ctx, node.Image, util.EdacFolderPath)
|
|
if err != nil {
|
|
return fmt.Errorf("Failed to check for the existence of edac folder: %+v", err)
|
|
}
|
|
if exists {
|
|
l.Log().Debugln("Found edac folder")
|
|
fakeedacpath, err := util.MakeFakeEdac(node.Name)
|
|
if err != nil {
|
|
return fmt.Errorf("Failed to create fake edac: %+v", err)
|
|
}
|
|
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s:ro", fakeedacpath, util.EdacFolderPath))
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
* CREATION
|
|
*/
|
|
if err := runtime.CreateNode(ctx, node); err != nil {
|
|
return fmt.Errorf("runtime failed to create node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeDelete deletes an existing node
|
|
func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, opts k3d.NodeDeleteOpts) error {
|
|
// delete node
|
|
if err := runtime.DeleteNode(ctx, node); err != nil {
|
|
l.Log().Error(err)
|
|
}
|
|
|
|
// delete fake folder created for limits
|
|
if node.Memory != "" {
|
|
l.Log().Debug("Cleaning fake files folder from k3d config dir for this node...")
|
|
filepath, err := util.GetNodeFakerDirOrCreate(node.Name)
|
|
err = os.RemoveAll(filepath)
|
|
if err != nil {
|
|
// this err prob should not be fatal, just log it
|
|
l.Log().Errorf("Could not remove fake files folder for node %s: %+v", node.Name, err)
|
|
}
|
|
}
|
|
|
|
// update the server loadbalancer
|
|
if !opts.SkipLBUpdate && (node.Role == k3d.ServerRole || node.Role == k3d.AgentRole) {
|
|
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.RuntimeLabels[k3d.LabelClusterName]})
|
|
if err != nil {
|
|
return fmt.Errorf("failed fo find cluster for node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
// if it's a server node, then update the loadbalancer configuration
|
|
if node.Role == k3d.ServerRole {
|
|
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
|
|
if !errors.Is(err, ErrLBConfigHostNotFound) {
|
|
return fmt.Errorf("failed to update cluster loadbalancer: %w", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// patchAgentSpec adds agent node specific settings to a node
|
|
func patchAgentSpec(node *k3d.Node) error {
|
|
if node.Cmd == nil {
|
|
node.Cmd = []string{"agent"}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// patchServerSpec adds server node specific settings to a node
|
|
func patchServerSpec(node *k3d.Node, runtime runtimes.Runtime) error {
|
|
// command / arguments
|
|
if node.Cmd == nil {
|
|
node.Cmd = []string{"server"}
|
|
}
|
|
|
|
// Add labels and TLS SAN for the exposed API
|
|
// FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig
|
|
node.RuntimeLabels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here
|
|
node.RuntimeLabels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host
|
|
node.RuntimeLabels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort
|
|
|
|
// If the runtime is docker, attempt to use the docker host
|
|
if runtime == runtimes.Docker {
|
|
dockerHost := runtime.GetHost()
|
|
if dockerHost != "" {
|
|
dockerHost = strings.Split(dockerHost, ":")[0] // remove the port
|
|
l.Log().Tracef("Using docker host %s", dockerHost)
|
|
node.RuntimeLabels[k3d.LabelServerAPIHostIP] = dockerHost
|
|
node.RuntimeLabels[k3d.LabelServerAPIHost] = dockerHost
|
|
}
|
|
}
|
|
|
|
node.Args = append(node.Args, "--tls-san", node.RuntimeLabels[k3d.LabelServerAPIHost]) // add TLS SAN for non default host name
|
|
|
|
return nil
|
|
}
|
|
|
|
// NodeList returns a list of all existing clusters
|
|
func NodeList(ctx context.Context, runtime runtimes.Runtime) ([]*k3d.Node, error) {
|
|
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultRuntimeLabels)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to list nodes: %w", err)
|
|
}
|
|
|
|
return nodes, nil
|
|
}
|
|
|
|
// NodeGet returns a node matching the specified node fields
|
|
func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3d.Node, error) {
|
|
// get node
|
|
node, err := runtime.GetNode(ctx, node)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get node '%s': %w", node.Name, err)
|
|
}
|
|
|
|
return node, nil
|
|
}
|
|
|
|
// NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
|
|
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
|
|
l.Log().Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
if ctx.Err() == context.DeadlineExceeded {
|
|
d, ok := ctx.Deadline()
|
|
if ok {
|
|
l.Log().Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
|
|
}
|
|
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
|
|
}
|
|
return ctx.Err()
|
|
default:
|
|
}
|
|
|
|
// read the logs
|
|
out, err := runtime.GetNodeLogs(ctx, node, since)
|
|
if err != nil {
|
|
if out != nil {
|
|
out.Close()
|
|
}
|
|
return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err)
|
|
}
|
|
defer out.Close()
|
|
|
|
buf := new(bytes.Buffer)
|
|
nRead, _ := buf.ReadFrom(out)
|
|
out.Close()
|
|
output := buf.String()
|
|
|
|
if nRead > 0 && strings.Contains(os.Getenv("K3D_LOG_NODE_WAIT_LOGS"), string(node.Role)) {
|
|
l.Log().Tracef("=== Read logs since %s ===\n%s\n", since, output)
|
|
}
|
|
// check if we can find the specified line in the log
|
|
if nRead > 0 && strings.Contains(output, message) {
|
|
if l.Log().GetLevel() >= logrus.TraceLevel {
|
|
temp := strings.Split(output, "\n")
|
|
for _, t := range temp {
|
|
if strings.Contains(t, message) {
|
|
l.Log().Tracef("Found target log line: `%s`", t)
|
|
}
|
|
}
|
|
}
|
|
break
|
|
}
|
|
|
|
// check if the container is restarting
|
|
running, status, _ := runtime.GetNodeStatus(ctx, node)
|
|
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
|
|
l.Log().Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
|
|
}
|
|
|
|
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
|
|
}
|
|
l.Log().Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
|
|
return nil
|
|
}
|
|
|
|
// NodeFilterByRoles filters a list of nodes by their roles
|
|
func NodeFilterByRoles(nodes []*k3d.Node, includeRoles, excludeRoles []k3d.Role) []*k3d.Node {
|
|
// check for conflicting filters
|
|
for _, includeRole := range includeRoles {
|
|
for _, excludeRole := range excludeRoles {
|
|
if includeRole == excludeRole {
|
|
l.Log().Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
|
|
}
|
|
}
|
|
}
|
|
|
|
resultList := []*k3d.Node{}
|
|
nodeLoop:
|
|
for _, node := range nodes {
|
|
// exclude > include
|
|
for _, excludeRole := range excludeRoles {
|
|
if node.Role == excludeRole {
|
|
continue nodeLoop
|
|
}
|
|
}
|
|
|
|
// include < exclude
|
|
for _, includeRole := range includeRoles {
|
|
if node.Role == includeRole {
|
|
resultList = append(resultList, node)
|
|
continue nodeLoop
|
|
}
|
|
}
|
|
}
|
|
|
|
l.Log().Tracef("Filteres %d nodes by roles (in: %+v | ex: %+v), got %d left", len(nodes), includeRoles, excludeRoles, len(resultList))
|
|
|
|
return resultList
|
|
}
|
|
|
|
// NodeEdit let's you update an existing node
|
|
func NodeEdit(ctx context.Context, runtime runtimes.Runtime, existingNode, changeset *k3d.Node) error {
|
|
|
|
/*
|
|
* Make a deep copy of the existing node
|
|
*/
|
|
|
|
result, err := CopyNode(ctx, existingNode, CopyNodeOpts{keepState: false})
|
|
if err != nil {
|
|
return fmt.Errorf("failed to copy node %s: %w", existingNode.Name, err)
|
|
}
|
|
|
|
/*
|
|
* Apply changes
|
|
*/
|
|
|
|
// === Ports ===
|
|
if result.Ports == nil {
|
|
result.Ports = nat.PortMap{}
|
|
}
|
|
for port, portbindings := range changeset.Ports {
|
|
loopChangesetPortbindings:
|
|
for _, portbinding := range portbindings {
|
|
|
|
// loop over existing portbindings to avoid port collisions (docker doesn't check for it)
|
|
for _, existingPB := range result.Ports[port] {
|
|
if util.IsPortBindingEqual(portbinding, existingPB) { // also matches on "equal" HostIPs (127.0.0.1, "", 0.0.0.0)
|
|
l.Log().Tracef("Skipping existing PortBinding: %+v", existingPB)
|
|
continue loopChangesetPortbindings
|
|
}
|
|
}
|
|
l.Log().Tracef("Adding portbinding %+v for port %s", portbinding, port.Port())
|
|
result.Ports[port] = append(result.Ports[port], portbinding)
|
|
}
|
|
}
|
|
|
|
// --- Loadbalancer specifics ---
|
|
if result.Role == k3d.LoadBalancerRole {
|
|
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: existingNode.RuntimeLabels[k3d.LabelClusterName]})
|
|
if err != nil {
|
|
return fmt.Errorf("error updating loadbalancer config: %w", err)
|
|
}
|
|
if cluster.ServerLoadBalancer == nil {
|
|
cluster.ServerLoadBalancer = k3d.NewLoadbalancer()
|
|
}
|
|
cluster.ServerLoadBalancer.Node = result
|
|
lbConfig, err := LoadbalancerGenerateConfig(cluster)
|
|
if err != nil {
|
|
return fmt.Errorf("error generating loadbalancer config: %v", err)
|
|
}
|
|
|
|
// prepare to write config to lb container
|
|
configyaml, err := yaml.Marshal(lbConfig)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to marshal loadbalancer config: %w", err)
|
|
}
|
|
|
|
writeLbConfigAction := k3d.NodeHook{
|
|
Stage: k3d.LifecycleStagePreStart,
|
|
Action: actions.WriteFileAction{
|
|
Runtime: runtime,
|
|
Dest: k3d.DefaultLoadbalancerConfigPath,
|
|
Mode: 0744,
|
|
Content: configyaml,
|
|
},
|
|
}
|
|
|
|
result.HookActions = append(result.HookActions, writeLbConfigAction)
|
|
}
|
|
|
|
// replace existing node
|
|
return NodeReplace(ctx, runtime, existingNode, result)
|
|
}
|
|
|
|
func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.Node) error {
|
|
|
|
// rename existing node
|
|
oldNameTemp := fmt.Sprintf("%s-%s", old.Name, util.GenerateRandomString(5))
|
|
oldNameOriginal := old.Name
|
|
l.Log().Infof("Renaming existing node %s to %s...", old.Name, oldNameTemp)
|
|
if err := runtime.RenameNode(ctx, old, oldNameTemp); err != nil {
|
|
return fmt.Errorf("runtime failed to rename node '%s': %w", old.Name, err)
|
|
}
|
|
old.Name = oldNameTemp
|
|
|
|
// create (not start) new node
|
|
l.Log().Infof("Creating new node %s...", new.Name)
|
|
if err := NodeCreate(ctx, runtime, new, k3d.NodeCreateOpts{Wait: true}); err != nil {
|
|
if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil {
|
|
return fmt.Errorf("Failed to create new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
|
|
}
|
|
return fmt.Errorf("Failed to create new node. Brought back old node: %+v", err)
|
|
}
|
|
|
|
// stop existing/old node
|
|
l.Log().Infof("Stopping existing node %s...", old.Name)
|
|
if err := runtime.StopNode(ctx, old); err != nil {
|
|
return fmt.Errorf("runtime failed to stop node '%s': %w", old.Name, err)
|
|
}
|
|
|
|
// start new node
|
|
l.Log().Infof("Starting new node %s...", new.Name)
|
|
if err := NodeStart(ctx, runtime, new, &k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
|
|
if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
|
return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err)
|
|
}
|
|
if err := runtime.RenameNode(ctx, old, oldNameOriginal); err != nil {
|
|
return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
|
|
}
|
|
old.Name = oldNameOriginal
|
|
if err := NodeStart(ctx, runtime, old, &k3d.NodeStartOpts{Wait: true}); err != nil {
|
|
return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err)
|
|
}
|
|
return fmt.Errorf("Failed to start new node. Rolled back: %+v", err)
|
|
}
|
|
|
|
// cleanup: delete old node
|
|
l.Log().Infof("Deleting old node %s...", old.Name)
|
|
if err := NodeDelete(ctx, runtime, old, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
|
|
return fmt.Errorf("failed to delete old node '%s': %w", old.Name, err)
|
|
}
|
|
|
|
// done
|
|
return nil
|
|
}
|
|
|
|
type CopyNodeOpts struct {
|
|
keepState bool
|
|
}
|
|
|
|
func CopyNode(ctx context.Context, src *k3d.Node, opts CopyNodeOpts) (*k3d.Node, error) {
|
|
|
|
targetCopy, err := copystruct.Copy(src)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to copy node struct: %w", err)
|
|
}
|
|
|
|
result := targetCopy.(*k3d.Node)
|
|
|
|
if !opts.keepState {
|
|
// ensure that node state is empty
|
|
result.State = k3d.NodeState{}
|
|
}
|
|
|
|
return result, nil
|
|
}
|
|
|