[Enhancement] Network Magic (#721)

- before starting the cluster, gather environment info via tools node
- use hostIP/gatewayIP for DNS (iptables + resolv.conf updated in entrypoint script)
- revamp of custom entrypoint scripts
pull/724/head
Thorsten Klein 3 years ago committed by GitHub
parent 7c635c29ab
commit b4158a1dc1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      cmd/image/imageImport.go
  2. 31
      pkg/client/cluster.go
  3. 44
      pkg/client/environment.go
  4. 4
      pkg/client/fixes.go
  5. 8
      pkg/client/host.go
  6. 109
      pkg/client/node.go
  7. 2
      pkg/client/registry.go
  8. 110
      pkg/client/tools.go
  9. 2
      pkg/client/tools_test.go
  10. 4
      pkg/runtimes/docker/network.go
  11. 4
      pkg/runtimes/docker/translate.go
  12. 4
      pkg/runtimes/docker/translate_test.go
  13. 5
      pkg/types/fixes/assets/k3d-entrypoint-cgroupv2.sh
  14. 34
      pkg/types/fixes/assets/k3d-entrypoint-dns.sh
  15. 19
      pkg/types/fixes/assets/k3d-entrypoint.sh
  16. 34
      pkg/types/fixes/fixes.go
  17. 20
      pkg/types/types.go

@ -27,9 +27,9 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
l "github.com/rancher/k3d/v4/pkg/logger"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/tools"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
@ -63,7 +63,7 @@ So if a file './rancher/k3d-tools' exists, k3d will try to import it instead of
errOccured := false
for _, cluster := range clusters {
l.Log().Infof("Importing image(s) into cluster '%s'", cluster.Name)
if err := tools.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
if err := client.ImageImportIntoClusterMulti(cmd.Context(), runtimes.SelectedRuntime, images, &cluster, loadImageOpts); err != nil {
l.Log().Errorf("Failed to import image(s) into cluster '%s': %+v", cluster.Name, err)
errOccured = true
}

@ -70,15 +70,23 @@ func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *confi
/*
* Step 2: Pre-Start Configuration
*/
// TODO: ClusterRun: add cluster configuration step here
_, err := EnsureToolsNode(ctx, runtime, &clusterConfig.Cluster)
if err != nil {
return err
}
envInfo, err := GatherEnvironmentInfo(ctx, runtime, &clusterConfig.Cluster)
if err != nil {
return err
}
/*
* Step 3: Start Containers
*/
if err := ClusterStart(ctx, runtime, &clusterConfig.Cluster, k3d.ClusterStartOpts{
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
EnvironmentInfo: envInfo,
}); err != nil {
return fmt.Errorf("Failed Cluster Start: %+v", err)
}
@ -859,10 +867,11 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
*/
if initNode != nil {
l.Log().Infoln("Starting the initializing server...")
if err := NodeStart(ctx, runtime, initNode, k3d.NodeStartOpts{
if err := NodeStart(ctx, runtime, initNode, &k3d.NodeStartOpts{
Wait: true, // always wait for the init node
NodeHooks: startClusterOpts.NodeHooks,
ReadyLogMessage: "Running kube-apiserver", // initNode means, that we're using etcd -> this will need quorum, so "k3s is up and running" won't happen right now
EnvironmentInfo: startClusterOpts.EnvironmentInfo,
}); err != nil {
return fmt.Errorf("Failed to start initializing server node: %+v", err)
}
@ -872,9 +881,10 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
* Server Nodes
*/
l.Log().Infoln("Starting servers...")
nodeStartOpts := k3d.NodeStartOpts{
Wait: true,
NodeHooks: startClusterOpts.NodeHooks,
nodeStartOpts := &k3d.NodeStartOpts{
Wait: true,
NodeHooks: startClusterOpts.NodeHooks,
EnvironmentInfo: startClusterOpts.EnvironmentInfo,
}
for _, serverNode := range servers {
if err := NodeStart(ctx, runtime, serverNode, nodeStartOpts); err != nil {
@ -909,8 +919,9 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
currentHelperNode := helperNode
helperWG.Go(func() error {
nodeStartOpts := k3d.NodeStartOpts{
NodeHooks: currentHelperNode.HookActions,
nodeStartOpts := &k3d.NodeStartOpts{
NodeHooks: currentHelperNode.HookActions,
EnvironmentInfo: startClusterOpts.EnvironmentInfo,
}
if currentHelperNode.Role == k3d.LoadBalancerRole {
nodeStartOpts.Wait = true

@ -0,0 +1,44 @@
/*
Copyright © 2020-2021 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package client
import (
"context"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func GatherEnvironmentInfo(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*k3d.EnvironmentInfo, error) {
envInfo := &k3d.EnvironmentInfo{}
hostIP, err := GetHostIP(ctx, runtime, cluster)
if err != nil {
return envInfo, err
}
envInfo.HostGateway = hostIP
return envInfo, nil
}

@ -32,7 +32,7 @@ import (
// FIXME: FixCgroupV2 - to be removed when fixed upstream
func EnableCgroupV2FixIfNeeded(runtime runtimes.Runtime) {
if _, isSet := os.LookupEnv(fixes.EnvFixCgroupV2); !isSet {
if _, isSet := os.LookupEnv(string(fixes.EnvFixCgroupV2)); !isSet {
runtimeInfo, err := runtime.Info()
if err != nil {
l.Log().Warnf("Failed to get runtime information: %+v", err)
@ -45,7 +45,7 @@ func EnableCgroupV2FixIfNeeded(runtime runtimes.Runtime) {
}
if cgroupVersion == 2 {
l.Log().Debugf("Detected CgroupV2, enabling custom entrypoint (disable by setting %s=false)", fixes.EnvFixCgroupV2)
if err := os.Setenv(fixes.EnvFixCgroupV2, "true"); err != nil {
if err := os.Setenv(string(fixes.EnvFixCgroupV2), "true"); err != nil {
l.Log().Errorf("Detected CgroupsV2 but failed to enable k3d's hotfix (try `export %s=true`): %+v", fixes.EnvFixCgroupV2, err)
}
}

@ -57,7 +57,13 @@ func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net
// Docker (for Desktop) on MacOS or Windows
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
ip, err := resolveHostnameFromInside(ctx, rtime, cluster.Nodes[0], "host.docker.internal")
toolsNode, err := EnsureToolsNode(ctx, rtime, cluster)
if err != nil {
return nil, err
}
ip, err := resolveHostnameFromInside(ctx, rtime, toolsNode, "host.docker.internal")
if err != nil {
return nil, err
}

@ -283,10 +283,11 @@ func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, node
return err
}
if err := NodeStart(ctx, runtime, node, k3d.NodeStartOpts{
Wait: nodeCreateOpts.Wait,
Timeout: nodeCreateOpts.Timeout,
NodeHooks: nodeCreateOpts.NodeHooks,
if err := NodeStart(ctx, runtime, node, &k3d.NodeStartOpts{
Wait: nodeCreateOpts.Wait,
Timeout: nodeCreateOpts.Timeout,
NodeHooks: nodeCreateOpts.NodeHooks,
EnvironmentInfo: nodeCreateOpts.EnvironmentInfo,
}); err != nil {
return err
}
@ -295,7 +296,7 @@ func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, node
}
// NodeStart starts an existing node
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts k3d.NodeStartOpts) error {
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
// return early, if the node is already running
if node.State.Running {
@ -303,25 +304,8 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
return nil
}
// FIXME: FixCgroupV2 - to be removed when fixed upstream
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
EnableCgroupV2FixIfNeeded(runtime)
if fixes.FixCgroupV2Enabled() {
if nodeStartOpts.NodeHooks == nil {
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
}
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: fixes.CgroupV2Entrypoint,
Dest: "/bin/entrypoint.sh",
Mode: 0744,
},
})
}
if err := enableFixes(ctx, runtime, node, nodeStartOpts); err != nil {
return err
}
startTime := time.Now()
@ -371,6 +355,79 @@ func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, no
return nil
}
func enableFixes(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts *k3d.NodeStartOpts) error {
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
// FIXME: FixCgroupV2 - to be removed when fixed upstream
// auto-enable, if needed
EnableCgroupV2FixIfNeeded(runtime)
// early exit if we don't need any fix
if !fixes.FixEnabledAny() {
l.Log().Debugln("No fix enabled.")
return nil
}
// ensure nodehook list
if nodeStartOpts.NodeHooks == nil {
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
}
// write umbrella entrypoint
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: fixes.K3DEntrypoint,
Dest: "/bin/k3d-entrypoint.sh",
Mode: 0744,
},
})
// DNS Fix
if fixes.FixEnabled(fixes.EnvFixDNS) {
l.Log().Debugf("ENABLING DNS MAGIC!!!")
if nodeStartOpts.EnvironmentInfo == nil || nodeStartOpts.EnvironmentInfo.HostGateway == nil {
return fmt.Errorf("Cannot enable DNS fix, as Host Gateway IP is missing!")
}
data := []byte(strings.ReplaceAll(string(fixes.DNSMagicEntrypoint), "GATEWAY_IP", nodeStartOpts.EnvironmentInfo.HostGateway.String()))
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: data,
Dest: "/bin/k3d-entrypoint-dns.sh",
Mode: 0744,
},
})
}
// CGroupsV2Fix
if fixes.FixEnabled(fixes.EnvFixCgroupV2) {
l.Log().Debugf("ENABLING CGROUPSV2 MAGIC!!!")
if nodeStartOpts.NodeHooks == nil {
nodeStartOpts.NodeHooks = []k3d.NodeHook{}
}
nodeStartOpts.NodeHooks = append(nodeStartOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: fixes.CgroupV2Entrypoint,
Dest: "/bin/k3d-entrypoint-cgroupv2.sh",
Mode: 0744,
},
})
}
}
return nil
}
// NodeCreate creates a new containerized k3s node
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
// FIXME: FixCgroupV2 - to be removed when fixed upstream
@ -742,7 +799,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
// start new node
l.Log().Infof("Starting new node %s...", new.Name)
if err := NodeStart(ctx, runtime, new, k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
if err := NodeStart(ctx, runtime, new, &k3d.NodeStartOpts{Wait: true, NodeHooks: new.HookActions}); err != nil {
if err := NodeDelete(ctx, runtime, new, k3d.NodeDeleteOpts{SkipLBUpdate: true}); err != nil {
return fmt.Errorf("Failed to start new node. Also failed to rollback: %+v", err)
}
@ -750,7 +807,7 @@ func NodeReplace(ctx context.Context, runtime runtimes.Runtime, old, new *k3d.No
return fmt.Errorf("Failed to start new node. Also failed to rename %s back to %s: %+v", old.Name, oldNameOriginal, err)
}
old.Name = oldNameOriginal
if err := NodeStart(ctx, runtime, old, k3d.NodeStartOpts{Wait: true}); err != nil {
if err := NodeStart(ctx, runtime, old, &k3d.NodeStartOpts{Wait: true}); err != nil {
return fmt.Errorf("Failed to start new node. Also failed to restart old node: %+v", err)
}
return fmt.Errorf("Failed to start new node. Rolled back: %+v", err)

@ -43,7 +43,7 @@ func RegistryRun(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Registr
return nil, fmt.Errorf("Failed to create registry: %+v", err)
}
if err := NodeStart(ctx, runtime, regNode, k3d.NodeStartOpts{}); err != nil {
if err := NodeStart(ctx, runtime, regNode, &k3d.NodeStartOpts{}); err != nil {
return nil, fmt.Errorf("Failed to start registry: %+v", err)
}

@ -20,7 +20,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package tools
package client
import (
"context"
@ -31,11 +31,9 @@ import (
"sync"
"time"
k3dc "github.com/rancher/k3d/v4/pkg/client"
l "github.com/rancher/k3d/v4/pkg/logger"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
)
// ImageImportIntoClusterMulti starts up a k3d tools container for the selected cluster and uses it to export
@ -51,55 +49,12 @@ func ImageImportIntoClusterMulti(ctx context.Context, runtime runtimes.Runtime,
return fmt.Errorf("No valid images specified")
}
cluster, err = k3dc.ClusterGet(ctx, runtime, cluster)
// create tools node to export images
toolsNode, err := EnsureToolsNode(ctx, runtime, cluster)
if err != nil {
l.Log().Errorf("Failed to find the specified cluster")
return err
}
if cluster.Network.Name == "" {
return fmt.Errorf("Failed to get network for cluster '%s'", cluster.Name)
}
var imageVolume string
var ok bool
for _, node := range cluster.Nodes {
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok {
break
}
}
}
if imageVolume == "" {
return fmt.Errorf("Failed to find image volume for cluster '%s'", cluster.Name)
}
l.Log().Debugf("Attaching to cluster's image volume '%s'", imageVolume)
// create tools node to export images
var toolsNode *k3d.Node
toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
if err != nil || toolsNode == nil {
l.Log().Infoln("Starting new tools node...")
toolsNode, err = runToolsNode( // TODO: re-use existing container
ctx,
runtime,
cluster,
cluster.Network.Name,
[]string{
fmt.Sprintf("%s:%s", imageVolume, k3d.DefaultImageVolumeMountPath),
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
})
if err != nil {
l.Log().Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
}
} else if !toolsNode.State.Running {
l.Log().Infof("Starting existing tools node %s...", toolsNode.Name)
if err := runtime.StartNode(ctx, toolsNode); err != nil {
return fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err)
}
}
/* TODO:
* Loop over list of images and check, whether they are files (tar archives) and sort them respectively
* Special case: '-' means "read from stdin"
@ -288,7 +243,7 @@ func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cl
}
node := &k3d.Node{
Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: fmt.Sprintf("%s:%s", k3d.DefaultToolsImageRepo, version.GetHelperImageVersion()),
Image: k3d.GetToolsImage(),
Role: k3d.NoRole,
Volumes: volumes,
Networks: []string{network},
@ -297,10 +252,65 @@ func runToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cl
RuntimeLabels: k3d.DefaultRuntimeLabels,
}
node.RuntimeLabels[k3d.LabelClusterName] = cluster.Name
if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
if err := NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
l.Log().Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
return node, err
}
return node, nil
}
func EnsureToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) (*k3d.Node, error) {
var err error
cluster, err = ClusterGet(ctx, runtime, cluster)
if err != nil {
l.Log().Errorf("Failed to find the specified cluster")
return nil, err
}
if cluster.Network.Name == "" {
return nil, fmt.Errorf("Failed to get network for cluster '%s'", cluster.Name)
}
var imageVolume string
var ok bool
for _, node := range cluster.Nodes {
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
if imageVolume, ok = node.RuntimeLabels[k3d.LabelImageVolume]; ok {
break
}
}
}
if imageVolume == "" {
return nil, fmt.Errorf("Failed to find image volume for cluster '%s'", cluster.Name)
}
l.Log().Debugf("Attaching to cluster's image volume '%s'", imageVolume)
var toolsNode *k3d.Node
toolsNode, err = runtime.GetNode(ctx, &k3d.Node{Name: fmt.Sprintf("%s-%s-tools", k3d.DefaultObjectNamePrefix, cluster.Name)})
if err != nil || toolsNode == nil {
l.Log().Infoln("Starting new tools node...")
toolsNode, err = runToolsNode(
ctx,
runtime,
cluster,
cluster.Network.Name,
[]string{
fmt.Sprintf("%s:%s", imageVolume, k3d.DefaultImageVolumeMountPath),
fmt.Sprintf("%s:%s", runtime.GetRuntimePath(), runtime.GetRuntimePath()),
})
if err != nil {
l.Log().Errorf("Failed to run tools container for cluster '%s'", cluster.Name)
}
} else if !toolsNode.State.Running {
l.Log().Infof("Starting existing tools node %s...", toolsNode.Name)
if err := runtime.StartNode(ctx, toolsNode); err != nil {
return nil, fmt.Errorf("error starting existing tools node %s: %v", toolsNode.Name, err)
}
}
return toolsNode, err
}

@ -20,7 +20,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package tools
package client
import (
"context"

@ -163,6 +163,10 @@ func (d Docker) CreateNetworkIfNotPresent(ctx context.Context, inNet *k3d.Cluste
// (3) Create a new network
netCreateOpts := types.NetworkCreate{
Driver: "bridge",
Options: map[string]string{
"com.docker.network.bridge.enable_ip_masquerade": "true",
},
CheckDuplicate: true,
Labels: k3d.DefaultRuntimeLabels,
}

@ -58,10 +58,10 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
/* Command & Arguments */
// FIXME: FixCgroupV2 - to be removed when fixed upstream
if fixes.FixCgroupV2Enabled() {
if fixes.FixEnabledAny() {
if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole {
containerConfig.Entrypoint = []string{
"/bin/entrypoint.sh",
"/bin/k3d-entrypoint.sh",
}
}
}

@ -95,8 +95,8 @@ func TestTranslateNodeToContainer(t *testing.T) {
}
// TODO: // FIXME: FixCgroupV2 - to be removed when fixed upstream
if fixes.FixCgroupV2Enabled() {
expectedRepresentation.ContainerConfig.Entrypoint = []string{"/bin/entrypoint.sh"}
if fixes.FixEnabledAny() {
expectedRepresentation.ContainerConfig.Entrypoint = []string{"/bin/k3d-entrypoint.sh"}
}
actualRepresentation, err := TranslateNodeToContainer(inputNode)

@ -2,7 +2,6 @@
set -o errexit
set -o nounset
set -o pipefail
#########################################################################################################################################
# DISCLAIMER #
@ -11,12 +10,12 @@ set -o pipefail
# Moby License Apache 2.0: https://github.com/moby/moby/blob/ed89041433a031cafc0a0f19cfe573c31688d377/LICENSE #
#########################################################################################################################################
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
echo "[$(date -Iseconds)] [CgroupV2 Fix] Evacuating Root Cgroup ..."
# move the processes from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
mkdir -p /sys/fs/cgroup/init
busybox xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' <"/sys/fs/cgroup/cgroup.controllers" >"/sys/fs/cgroup/cgroup.subtree_control"
echo "[$(date -Iseconds)] [CgroupV2 Fix] Done"
fi
exec /bin/k3s "$@"

@ -0,0 +1,34 @@
#!/bin/sh
# DISCLAIMER
# Heavily inspired by / copied from https://github.com/kubernetes-sigs/kind/pull/1414/files#diff-3c55751d83af635109cece495ee2ff38206764a8b95f4cb8f11fc08a5c0ea8dc
# Apache 2.0 License (Kubernetes Authors): https://github.com/kubernetes-sigs/kind/blob/9222508298c50ce8c5ba1f364f37307e81ba915e/LICENSE
set -o errexit
set -o nounset
docker_dns="127.0.0.11"
gateway="GATEWAY_IP" # replaced within k3d Go code
echo "[$(date -Iseconds)] [DNS Fix] Use the detected Gateway IP $gateway instead of Docker's embedded DNS ($docker_dns)"
# Change iptables rules added by Docker to route traffic to out Gateway IP instead of Docker's embedded DNS
echo "[$(date -Iseconds)] [DNS Fix] > Changing iptables rules ..."
iptables-save \
| sed \
-e "s/-d ${docker_dns}/-d ${gateway}/g" \
-e 's/-A OUTPUT \(.*\) -j DOCKER_OUTPUT/\0\n-A PREROUTING \1 -j DOCKER_OUTPUT/' \
-e "s/--to-source :53/--to-source ${gateway}:53/g"\
| iptables-restore
# Update resolv.conf to use the Gateway IP if needed: this will also make CoreDNS use it via k3s' default `forward . /etc/resolv.conf` rule in the CoreDNS config
grep -q "${docker_dns}" /etc/resolv.conf
grepstatus=$?
if test $grepstatus -eq 0; then
echo "[$(date -Iseconds)] [DNS Fix] > Replacing IP in /etc/resolv.conf ..."
cp /etc/resolv.conf /etc/resolv.conf.original
sed -e "s/${docker_dns}/${gateway}/g" /etc/resolv.conf.original >/etc/resolv.conf
fi
echo "[$(date -Iseconds)] [DNS Fix] Done"

@ -0,0 +1,19 @@
#!/bin/sh
set -o errexit
set -o nounset
LOGFILE="/var/log/k3d-entrypoints_$(date "+%y%m%d%H%M%S").log"
touch "$LOGFILE"
echo "[$(date -Iseconds)] Running k3d entrypoints..." >> "$LOGFILE"
for entrypoint in /bin/k3d-entrypoint-*.sh ; do
echo "[$(date -Iseconds)] Running $entrypoint" >> "$LOGFILE"
"$entrypoint" >> "$LOGFILE" 2>&1 || exit 1
done
echo "[$(date -Iseconds)] Finished k3d entrypoint scripts!" >> "$LOGFILE"
exec /bin/k3s "$@"

@ -37,16 +37,40 @@ import (
* FIXME: FixCgroupV2 - to be removed when fixed upstream
*/
// EnvFixCgroupV2 is the environment variable that k3d will check for to enable/disable the cgroupv2 workaround
const EnvFixCgroupV2 = "K3D_FIX_CGROUPV2"
type K3DFixEnv string
//go:embed assets/cgroupv2-entrypoint.sh
const (
EnvFixCgroupV2 K3DFixEnv = "K3D_FIX_CGROUPV2" // EnvFixCgroupV2 is the environment variable that k3d will check for to enable/disable the cgroupv2 workaround
EnvFixDNS K3DFixEnv = "K3D_FIX_DNS" // EnvFixDNS is the environment variable that check for to enable/disable the application of network magic related to DNS
)
var FixEnvs []K3DFixEnv = []K3DFixEnv{
EnvFixCgroupV2,
EnvFixDNS,
}
//go:embed assets/k3d-entrypoint-cgroupv2.sh
var CgroupV2Entrypoint []byte
func FixCgroupV2Enabled() bool {
enabled, err := strconv.ParseBool(os.Getenv(EnvFixCgroupV2))
//go:embed assets/k3d-entrypoint-dns.sh
var DNSMagicEntrypoint []byte
//go:embed assets/k3d-entrypoint.sh
var K3DEntrypoint []byte
func FixEnabled(fixenv K3DFixEnv) bool {
enabled, err := strconv.ParseBool(os.Getenv(string(fixenv)))
if err != nil {
return false
}
return enabled
}
func FixEnabledAny() bool {
for _, fixenv := range FixEnvs {
if FixEnabled(fixenv) {
return true
}
}
return false
}

@ -24,6 +24,7 @@ package types
import (
"context"
"fmt"
"net"
"time"
"github.com/docker/go-connections/nat"
@ -198,9 +199,10 @@ const (
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
type ClusterStartOpts struct {
WaitForServer bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
WaitForServer bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
EnvironmentInfo *EnvironmentInfo
}
// ClusterDeleteOpts describe a set of options one can set when deleting a cluster
@ -210,9 +212,10 @@ type ClusterDeleteOpts struct {
// NodeCreateOpts describes a set of options one can set when creating a new node
type NodeCreateOpts struct {
Wait bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
Wait bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
EnvironmentInfo *EnvironmentInfo
}
// NodeStartOpts describes a set of options one can set when (re-)starting a node
@ -221,6 +224,7 @@ type NodeStartOpts struct {
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
ReadyLogMessage string
EnvironmentInfo *EnvironmentInfo
}
// NodeDeleteOpts describes a set of options one can set when deleting a node
@ -408,3 +412,7 @@ type RegistryExternal struct {
Host string `yaml:"host" json:"host"`
Port string `yaml:"port" json:"port"`
}
type EnvironmentInfo struct {
HostGateway net.IP
}

Loading…
Cancel
Save