Merge pull request #360 from rancher/enhancement/host-ip

[Enhancement] Expose Host IP to services inside the cluster
pull/368/head
Thorsten Klein 4 years ago committed by GitHub
commit 54b4123fc4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      cmd/cluster/clusterCreate.go
  2. 4
      cmd/util/filter.go
  3. 10
      cmd/util/util.go
  4. 61
      pkg/cluster/cluster.go
  5. 102
      pkg/cluster/host.go
  6. 32
      pkg/runtimes/containerd/host.go
  7. 6
      pkg/runtimes/containerd/node.go
  8. 2
      pkg/runtimes/docker/container.go
  9. 43
      pkg/runtimes/docker/host.go
  10. 14
      pkg/runtimes/docker/network.go
  11. 35
      pkg/runtimes/docker/node.go
  12. 3
      pkg/runtimes/docker/translate.go
  13. 4
      pkg/runtimes/runtime.go
  14. 17
      pkg/types/types.go
  15. 33
      pkg/util/regexp.go
  16. 44
      tests/common.sh
  17. 20
      tests/test_full_lifecycle.sh

@ -134,6 +134,7 @@ func NewCmdClusterCreate() *cobra.Command {
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --update-default-kubeconfig)")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
cmd.Flags().BoolVar(&noRollback, "no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
cmd.Flags().BoolVar(&createClusterOpts.PrepDisableHostIPInjection, "no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
/* Image Importing */
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")

@ -30,6 +30,8 @@ import (
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
"regexp"
)
@ -99,7 +101,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
}
// map capturing group names to submatches
submatches := mapSubexpNames(filterRegexp.SubexpNames(), match)
submatches := util.MapSubexpNames(filterRegexp.SubexpNames(), match)
// if one of the filters is 'all', we only return this and drop all others
if submatches["group"] == "all" {

@ -20,13 +20,3 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
// mapSubexpNames maps regex capturing group names to corresponding matches
func mapSubexpNames(names, matches []string) map[string]string {
//names, matches = names[1:], matches[1:]
nameMatchMap := make(map[string]string, len(matches))
for index := range names {
nameMatchMap[names[index]] = matches[index]
}
return nameMatchMap
}

@ -44,10 +44,15 @@ import (
// - some containerized k3s nodes
// - a docker network
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
clusterCreateCtx := ctx
clusterPrepCtx := ctx
if cluster.CreateClusterOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
defer cancel()
var cancelClusterCreateCtx context.CancelFunc
var cancelClusterPrepCtx context.CancelFunc
clusterCreateCtx, cancelClusterCreateCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
clusterPrepCtx, cancelClusterPrepCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
defer cancelClusterCreateCtx()
defer cancelClusterPrepCtx()
}
/*
@ -74,7 +79,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
// create cluster network or use an existing one
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, cluster.Network.Name)
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(clusterCreateCtx, cluster.Network.Name)
if err != nil {
log.Errorln("Failed to create cluster network")
return err
@ -102,7 +107,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
*/
if !cluster.CreateClusterOpts.DisableImageVolume {
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
if err := runtime.CreateVolume(clusterCreateCtx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
return err
}
@ -157,7 +162,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// create node
log.Infof("Creating node '%s'", node.Name)
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
if err := NodeCreate(clusterCreateCtx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
log.Errorln("Failed to create node")
return err
}
@ -189,13 +194,13 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// wait for the initnode to come up before doing anything else
for {
select {
case <-ctx.Done():
case <-clusterCreateCtx.Done():
log.Errorln("Failed to bring up initializing server node in time")
return fmt.Errorf(">>> %w", ctx.Err())
return fmt.Errorf(">>> %w", clusterCreateCtx.Err())
default:
}
log.Debugln("Waiting for initializing server node...")
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
logreader, err := runtime.GetNodeLogs(clusterCreateCtx, cluster.InitNode, time.Time{})
if err != nil {
if logreader != nil {
logreader.Close()
@ -219,7 +224,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
// vars to support waiting for server nodes to be ready
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
waitForServerWaitgroup, clusterCreateCtx := errgroup.WithContext(clusterCreateCtx)
// create all other nodes, but skip the init node
for _, node := range cluster.Nodes {
@ -257,7 +262,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{})
return NodeWaitForLogMessage(clusterCreateCtx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{})
})
}
}
@ -322,7 +327,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
cluster.Nodes = append(cluster.Nodes, lbNode) // append lbNode to list of cluster nodes, so it will be considered during rollback
log.Infof("Creating LoadBalancer '%s'", lbNode.Name)
if err := NodeCreate(ctx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
if err := NodeCreate(clusterCreateCtx, runtime, lbNode, k3d.NodeCreateOpts{}); err != nil {
log.Errorln("Failed to create loadbalancer")
return err
}
@ -331,7 +336,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
return NodeWaitForLogMessage(ctx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
return NodeWaitForLogMessage(clusterCreateCtx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
})
}
} else {
@ -345,6 +350,36 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
return fmt.Errorf("Failed to bring up cluster")
}
/**********************************
* Additional Cluster Preparation *
**********************************/
/*
* Networking Magic
*/
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
if !cluster.CreateClusterOpts.PrepDisableHostIPInjection {
hostIP, err := GetHostIP(clusterPrepCtx, runtime, cluster)
if err != nil {
log.Errorln("Failed to get HostIP")
return err
}
hostsEntry := fmt.Sprintf("%s %s", hostIP, k3d.DefaultK3dInternalHostRecord)
log.Debugf("Adding extra host entry '%s'...", hostsEntry)
for _, node := range cluster.Nodes {
if err := runtime.ExecInNode(clusterPrepCtx, node, []string{"sh", "-c", fmt.Sprintf("echo '%s' >> /etc/hosts", hostsEntry)}); err != nil {
log.Warnf("Failed to add extra entry '%s' to /etc/hosts in node '%s'", hostsEntry, node.Name)
}
}
patchCmd := `test=$(kubectl get cm coredns -n kube-system --template='{{.data.NodeHosts}}' | sed -n -E -e '/[0-9\.]{4,12}\s+host\.k3d\.internal$/!p' -e '$a` + hostsEntry + `' | tr '\n' '^' | xargs -0 printf '{"data": {"NodeHosts":"%s"}}'| sed -E 's%\^%\\n%g') && kubectl patch cm coredns -n kube-system -p="$test"`
err = runtime.ExecInNode(clusterPrepCtx, cluster.Nodes[0], []string{"sh", "-c", patchCmd})
if err != nil {
log.Warnf("Failed to patch CoreDNS ConfigMap to include entry '%s': %+v", hostsEntry, err)
}
}
return nil
}

@ -0,0 +1,102 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
import (
"bufio"
"context"
"fmt"
"net"
"regexp"
"runtime"
rt "github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
log "github.com/sirupsen/logrus"
)
var nsLookupAddressRegexp = regexp.MustCompile(`^Address:\s+(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$`)
// GetHostIP returns the routable IP address to be able to access services running on the host system from inside the cluster.
// This depends on the Operating System and the chosen Runtime.
func GetHostIP(ctx context.Context, rtime rt.Runtime, cluster *k3d.Cluster) (net.IP, error) {
// Docker Runtime
if rtime == rt.Docker {
log.Debugf("Runtime GOOS: %s", runtime.GOOS)
// "native" Docker on Linux
if runtime.GOOS == "linux" {
ip, err := rtime.GetHostIP(ctx, cluster.Network.Name)
if err != nil {
return nil, err
}
return ip, nil
}
// Docker (for Desktop) on MacOS or Windows
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
ip, err := resolveHostnameFromInside(ctx, rtime, cluster.Nodes[0], "host.docker.internal")
if err != nil {
return nil, err
}
return ip, nil
}
// Catch all other GOOS cases
return nil, fmt.Errorf("GetHostIP only implemented for Linux, MacOS (Darwin) and Windows")
}
// Catch all other runtime selections
return nil, fmt.Errorf("GetHostIP only implemented for the docker runtime")
}
func resolveHostnameFromInside(ctx context.Context, rtime rt.Runtime, node *k3d.Node, hostname string) (net.IP, error) {
logreader, err := rtime.ExecInNodeGetLogs(ctx, node, []string{"sh", "-c", fmt.Sprintf("nslookup %s", hostname)})
if err != nil {
return nil, err
}
submatches := map[string]string{}
scanner := bufio.NewScanner(logreader)
for scanner.Scan() {
match := nsLookupAddressRegexp.FindStringSubmatch(scanner.Text())
if len(match) == 0 {
continue
}
submatches = util.MapSubexpNames(nsLookupAddressRegexp.SubexpNames(), match)
break
}
if _, ok := submatches["ip"]; !ok {
return nil, fmt.Errorf("Failed to read address for '%s' from nslookup response", hostname)
}
log.Debugf("Hostname '%s' -> Address '%s'", hostname, submatches["ip"])
return net.ParseIP(submatches["ip"]), nil
}

@ -0,0 +1,32 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package containerd
import (
"context"
"net"
)
// GetHostIP returns the IP of the containerd host
func (d Containerd) GetHostIP(ctx context.Context, network string) (net.IP, error) {
return nil, nil
}

@ -23,6 +23,7 @@ THE SOFTWARE.
package containerd
import (
"bufio"
"context"
"io"
"time"
@ -126,3 +127,8 @@ func (d Containerd) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.
func (d Containerd) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
return nil
}
// ExecInNodeGetLogs execs a command inside a node and returns its logreader
func (d Containerd) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []string) (*bufio.Reader, error) {
return nil, nil
}

@ -147,7 +147,7 @@ func getNodeContainer(ctx context.Context, node *k3d.Node) (*types.Container, er
for k, v := range node.Labels {
filters.Add("label", fmt.Sprintf("%s=%s", k, v))
}
// See https://github.com/moby/moby/issues/29997 for explanation around initial /
// See https://github.com/moby/moby/issues/29997 for explanation around initial /
filters.Add("name", fmt.Sprintf("^/?%s$", node.Name)) // regex filtering for exact name match
containers, err := docker.ContainerList(ctx, types.ContainerListOptions{

@ -0,0 +1,43 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package docker
import (
"context"
"fmt"
"net"
"runtime"
)
// GetHostIP returns the IP of the docker host (routable from inside the containers)
func (d Docker) GetHostIP(ctx context.Context, network string) (net.IP, error) {
if runtime.GOOS == "linux" {
ip, err := GetGatewayIP(ctx, network)
if err != nil {
return nil, err
}
return ip, nil
}
return nil, fmt.Errorf("Docker Runtime: GetHostIP only implemented for Linux")
}

@ -23,6 +23,7 @@ package docker
import (
"context"
"net"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
@ -104,3 +105,16 @@ func GetNetwork(ctx context.Context, ID string) (types.NetworkResource, error) {
defer docker.Close()
return docker.NetworkInspect(ctx, ID, types.NetworkInspectOptions{})
}
// GetGatewayIP returns the IP of the network gateway
func GetGatewayIP(ctx context.Context, network string) (net.IP, error) {
bridgeNetwork, err := GetNetwork(ctx, network)
if err != nil {
log.Errorf("Failed to get bridge network with name '%s'", network)
return nil, err
}
gatewayIP := net.ParseIP(bridgeNetwork.IPAM.Config[0].Gateway)
return gatewayIP, nil
}

@ -23,6 +23,7 @@ THE SOFTWARE.
package docker
import (
"bufio"
"context"
"fmt"
"io"
@ -303,22 +304,36 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
return logreader, nil
}
// ExecInNodeGetLogs executes a command inside a node and returns the logs to the caller, e.g. to parse them
func (d Docker) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []string) (*bufio.Reader, error) {
resp, err := executeInNode(ctx, node, cmd)
if err != nil {
return nil, err
}
return resp.Reader, nil
}
// ExecInNode execs a command inside a node
func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
_, err := executeInNode(ctx, node, cmd)
return err
}
func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.HijackedResponse, error) {
log.Debugf("Executing command '%+v' in node '%s'", cmd, node.Name)
// get the container for the given node
container, err := getNodeContainer(ctx, node)
if err != nil {
return err
return nil, err
}
// create docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
return nil, err
}
defer docker.Close()
@ -332,7 +347,7 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
})
if err != nil {
log.Errorf("Failed to create exec config for node '%s'", node.Name)
return err
return nil, err
}
execConnection, err := docker.ContainerExecAttach(ctx, exec.ID, types.ExecStartCheck{
@ -340,13 +355,12 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
})
if err != nil {
log.Errorf("Failed to connect to exec process in node '%s'", node.Name)
return err
return nil, err
}
defer execConnection.Close()
if err := docker.ContainerExecStart(ctx, exec.ID, types.ExecStartCheck{Tty: true}); err != nil {
log.Errorf("Failed to start exec process in node '%s'", node.Name)
return err
return nil, err
}
for {
@ -354,7 +368,7 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
execInfo, err := docker.ContainerExecInspect(ctx, exec.ID)
if err != nil {
log.Errorf("Failed to inspect exec process in node '%s'", node.Name)
return err
return &execConnection, err
}
// if still running, continue loop
@ -375,13 +389,14 @@ func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) er
logs, err := ioutil.ReadAll(execConnection.Reader)
if err != nil {
log.Errorf("Failed to get logs from node '%s'", node.Name)
return err
return &execConnection, err
}
return fmt.Errorf("Logs from failed access process:\n%s", string(logs))
return &execConnection, fmt.Errorf("Logs from failed access process:\n%s", string(logs))
}
}
return nil
return &execConnection, nil
}

@ -41,7 +41,8 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
/* initialize everything that we need */
containerConfig := docker.Config{}
hostConfig := docker.HostConfig{
Init: &[]bool{true}[0],
Init: &[]bool{true}[0],
ExtraHosts: node.ExtraHosts,
}
networkingConfig := network.NetworkingConfig{}

@ -22,9 +22,11 @@ THE SOFTWARE.
package runtimes
import (
"bufio"
"context"
"fmt"
"io"
"net"
"time"
"github.com/rancher/k3d/v3/pkg/runtimes/containerd"
@ -63,9 +65,11 @@ type Runtime interface {
GetVolume(string) (string, error)
GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup
ExecInNode(context.Context, *k3d.Node, []string) error
ExecInNodeGetLogs(context.Context, *k3d.Node, []string) (*bufio.Reader, error)
GetNodeLogs(context.Context, *k3d.Node, time.Time) (io.ReadCloser, error)
GetImages(context.Context) ([]string, error)
CopyToNode(context.Context, string, string, *k3d.Node) error
GetHostIP(context.Context, string) (net.IP, error)
}
// GetRuntime checks, if a given name is represented by an implemented k3d runtime and returns it

@ -108,6 +108,9 @@ var DefaultNodeEnv = []string{
"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml",
}
// DefaultK3dInternalHostRecord defines the default /etc/hosts entry for the k3d host
const DefaultK3dInternalHostRecord = "host.k3d.internal"
// DefaultImageVolumeMountPath defines the mount path inside k3d nodes where we will mount the shared image volume by default
const DefaultImageVolumeMountPath = "/k3d/images"
@ -130,12 +133,13 @@ var DoNotCopyServerFlags = []string{
// ClusterCreateOpts describe a set of options one can set when creating a cluster
type ClusterCreateOpts struct {
DisableImageVolume bool
WaitForServer bool
Timeout time.Duration
DisableLoadBalancer bool
K3sServerArgs []string
K3sAgentArgs []string
PrepDisableHostIPInjection bool
DisableImageVolume bool
WaitForServer bool
Timeout time.Duration
DisableLoadBalancer bool
K3sServerArgs []string
K3sAgentArgs []string
}
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
@ -234,6 +238,7 @@ type Node struct {
Restart bool `yaml:"restart" json:"restart,omitempty"`
Labels map[string]string // filled automatically
Network string // filled automatically
ExtraHosts []string // filled automatically
ServerOpts ServerOpts `yaml:"server_opts" json:"serverOpts,omitempty"`
AgentOpts AgentOpts `yaml:"agent_opts" json:"agentOpts,omitempty"`
State NodeState // filled automatically

@ -0,0 +1,33 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
// MapSubexpNames maps regex capturing group names to corresponding matches
func MapSubexpNames(names, matches []string) map[string]string {
//names, matches = names[1:], matches[1:]
nameMatchMap := make(map[string]string, len(matches))
for index := range names {
nameMatchMap[names[index]] = matches[index]
}
return nameMatchMap
}

@ -119,3 +119,47 @@ check_cluster_token_exist() {
[ -n "$EXE" ] || abort "EXE is not defined"
$EXE cluster get "$1" --token | grep "TOKEN" >/dev/null 2>&1
}
wait_for_pod_running_by_label() {
podname=$(kubectl get pod -l "$1" $([[ -n "$2" ]] && echo "--namespace $2") -o jsonpath='{.items[0].metadata.name}')
wait_for_pod_running_by_name "$podname" "$2"
}
wait_for_pod_running_by_name() {
while : ; do
podstatus=$(kubectl get pod "$1" $([[ -n "$2" ]] && echo "--namespace $2") -o go-template='{{.status.phase}}')
case "$podstatus" in
"ErrImagePull" )
echo "Pod $1 is NOT running: ErrImagePull"
return 1
;;
"ContainerCreating" )
continue
;;
"Pending" )
continue
;;
"Running" )
echo "Pod $1 is Running"
return 0
;;
* )
echo "Pod $1 is NOT running: Unknown status '$podstatus'"
kubectl describe pod "$1" || kubectl get pods $([[ -n "$2" ]] && echo "--namespace $2")
return 1
esac
done
}
wait_for_pod_exec() {
# $1 = pod name
# $2 = command
# $3 = max. retries (default: 10)
max_retries=$([[ -n "$3" ]] && echo "$3" || echo "10")
for (( i=0; i<=max_retries; i++ )); do
echo "Try #$i"
kubectl exec "$1" -- $2 && return 0
done
echo "Command '$2' in pod '$1' did NOT return successfully in $max_retries tries"
return 1
}

@ -47,16 +47,24 @@ check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
# 4. load an image into the cluster
info "Importing an image into the cluster..."
docker pull nginx:latest > /dev/null
docker tag nginx:latest nginx:local > /dev/null
$EXE image import nginx:local -c $clustername || failed "could not import image in $clustername"
docker pull alpine:latest > /dev/null
docker tag alpine:latest alpine:local > /dev/null
$EXE image import alpine:local -c $clustername || failed "could not import image in $clustername"
# 5. use that image
# 5. use imported image
info "Spawning a pod using the imported image..."
kubectl run --image nginx:local testimage
kubectl run --image alpine:local testimage --command -- tail -f /dev/null
info "Waiting for a bit for the pod to start..."
sleep 5
kubectl get pod testimage | grep 'Running' || failed "Pod using the imported image is not running after 5 seconds"
wait_for_pod_running_by_name "testimage"
wait_for_pod_running_by_label "k8s-app=kube-dns" "kube-system"
sleep 5
# 6. test host.k3d.internal
info "Checking DNS Lookup for host.k3d.internal"
wait_for_pod_exec "testimage" "nslookup host.k3d.internal" 6
# Cleanup

Loading…
Cancel
Save