Merge branch 'master' of github.com:rancher/k3d into enhancement/use-context

pull/244/head
iwilltry42 4 years ago
commit 4710f65b85
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 11
      Makefile
  2. 14
      cmd/create/createCluster.go
  3. 9
      cmd/start/startCluster.go
  4. 2
      cmd/util/filter.go
  5. 30
      cmd/util/volumes.go
  6. 2
      docs/usage/commands.md
  7. 41
      pkg/cluster/cluster.go
  8. 4
      pkg/cluster/node.go
  9. 3
      pkg/runtimes/containerd/node.go
  10. 5
      pkg/runtimes/containerd/volume.go
  11. 8
      pkg/runtimes/docker/node.go
  12. 26
      pkg/runtimes/docker/volume.go
  13. 5
      pkg/runtimes/runtime.go
  14. 6
      pkg/types/types.go
  15. 5
      tests/test_full_lifecycle.sh

@ -10,15 +10,12 @@ ifeq ($(GIT_TAG),)
GIT_TAG := $(shell git describe --always)
endif
# get latest k3s version: grep the tag JSON field, extract the tag and replace + with - (difference between git and dockerhub tags)
ifneq (${GITHUB_API_TOKEN},)
K3S_TAG := $(shell curl --silent -H "Authorization: token: ${GITHUB_API_TOKEN}" "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/')
else
K3S_TAG := $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/')
endif
# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags)
K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/')
ifeq ($(K3S_TAG),)
$(warning K3S_TAG undefined: couldn't get latest k3s image tag!)
$(warning Output of curl: $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest"))
$(warning Output of curl: $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable"))
$(error exiting)
endif

@ -251,7 +251,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
// validate the specified volume mount and return it in SRC:DEST format
volume, err = cliutil.ValidateVolumeMount(volume)
volume, err = cliutil.ValidateVolumeMount(runtimes.SelectedRuntime, volume)
if err != nil {
log.Fatalln(err)
}
@ -374,11 +374,17 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
// append ports
nodeCount := masterCount + workerCount
nodeList := cluster.Nodes
if !createClusterOpts.DisableLoadBalancer {
nodeCount++
nodeList = append(nodeList, cluster.MasterLoadBalancer)
}
for portmap, filters := range portFilterMap {
if len(filters) == 0 && (masterCount+workerCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node.", portmap)
if len(filters) == 0 && (nodeCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap)
}
nodes, err := cliutil.FilterNodes(append(cluster.Nodes, cluster.MasterLoadBalancer), filters)
nodes, err := cliutil.FilterNodes(nodeList, filters)
if err != nil {
log.Fatalln(err)
}

@ -22,8 +22,11 @@ THE SOFTWARE.
package start
import (
"time"
"github.com/rancher/k3d/pkg/cluster"
"github.com/rancher/k3d/pkg/runtimes"
"github.com/rancher/k3d/pkg/types"
"github.com/spf13/cobra"
k3d "github.com/rancher/k3d/pkg/types"
@ -34,6 +37,8 @@ import (
// NewCmdStartCluster returns a new cobra command
func NewCmdStartCluster() *cobra.Command {
startClusterOpts := types.StartClusterOpts{}
// create new command
cmd := &cobra.Command{
Use: "cluster (NAME [NAME...] | --all)",
@ -45,7 +50,7 @@ func NewCmdStartCluster() *cobra.Command {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.StartCluster(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
if err := cluster.StartCluster(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
log.Fatalln(err)
}
}
@ -55,6 +60,8 @@ func NewCmdStartCluster() *cobra.Command {
// add flags
cmd.Flags().BoolP("all", "a", false, "Start all existing clusters")
cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) (and loadbalancer) to be ready before returning.")
cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
// add subcommands

@ -67,7 +67,7 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No filter specified")
log.Warnln("No node filter specified")
return nodes, nil
}

@ -25,12 +25,16 @@ import (
"fmt"
"os"
"strings"
"github.com/rancher/k3d/pkg/runtimes"
log "github.com/sirupsen/logrus"
)
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
// - SRC: source directory/file -> tests: must exist
// - DEST: source directory/file -> tests: must be absolute path
func ValidateVolumeMount(volumeMount string) (string, error) {
func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string, error) {
src := ""
dest := ""
@ -51,8 +55,16 @@ func ValidateVolumeMount(volumeMount string) (string, error) {
// verify that the source exists
if src != "" {
if _, err := os.Stat(src); err != nil {
return "", fmt.Errorf("Failed to stat file/dir that you're trying to mount: '%s' in '%s'", src, volumeMount)
// a) named volume
isNamedVolume := true
if err := verifyNamedVolume(runtime, src); err != nil {
log.Debugf("Source '%s' is not a named volume, assuming it's a path...\n%+v", src, err)
isNamedVolume = false
}
if !isNamedVolume {
if _, err := os.Stat(src); err != nil {
return "", fmt.Errorf("Failed to stat file/dir that you're trying to mount: '%s' in '%s'", src, volumeMount)
}
}
}
@ -63,3 +75,15 @@ func ValidateVolumeMount(volumeMount string) (string, error) {
return fmt.Sprintf("%s:%s", src, dest), nil
}
// verifyNamedVolume checks whether a named volume exists in the runtime
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
volumeName, err := runtime.GetVolume(volumeName)
if err != nil {
return err
}
if volumeName == "" {
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
}
return nil
}

@ -33,6 +33,8 @@ k3d
start
cluster CLUSTERNAME # start a (stopped) cluster
-a, --all # start all clusters
--wait # wait for all masters and master-loadbalancer to be up before returning
--timeout # maximum waiting time for '--wait' before canceling/returning
node NODENAME # start a (stopped) node
stop
cluster CLUSTERNAME # stop a cluster

@ -30,6 +30,7 @@ import (
"time"
k3drt "github.com/rancher/k3d/pkg/runtimes"
"github.com/rancher/k3d/pkg/types"
k3d "github.com/rancher/k3d/pkg/types"
"github.com/rancher/k3d/pkg/util"
log "github.com/sirupsen/logrus"
@ -191,7 +192,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
default:
}
log.Debugln("Waiting for initializing master node...")
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode)
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
if err != nil {
if logreader != nil {
logreader.Close()
@ -253,7 +254,7 @@ func CreateCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig")
return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig", time.Time{})
})
}
}
@ -471,9 +472,20 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string {
}
// StartCluster starts a whole cluster (i.e. all nodes of the cluster)
func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, startClusterOpts types.StartClusterOpts) error {
log.Infof("Starting cluster '%s'", cluster.Name)
start := time.Now()
if startClusterOpts.Timeout > 0*time.Second {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout)
defer cancel()
}
// vars to support waiting for master nodes to be ready
waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx)
failed := 0
var masterlb *k3d.Node
for _, node := range cluster.Nodes {
@ -490,6 +502,17 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
failed++
continue
}
// asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage)
if node.Role == k3d.MasterRole && startClusterOpts.WaitForMaster {
masterNode := node
waitForMasterWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for master node '%s'", masterNode.Name)
return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig", start)
})
}
}
// start masterlb
@ -499,6 +522,18 @@ func StartCluster(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
log.Warningf("Failed to start masterlb '%s': Try to start it manually", masterlb.Name)
failed++
}
waitForMasterWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name)
return WaitForNodeLogMessage(ctx, runtime, masterlb, "start worker processes", start)
})
}
if err := waitForMasterWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorln(">>> ", err)
return fmt.Errorf("Failed to bring up cluster")
}
if failed > 0 {

@ -233,7 +233,7 @@ func GetNode(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
}
// WaitForNodeLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string) error {
func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
for {
select {
case <-ctx.Done():
@ -242,7 +242,7 @@ func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
}
// read the logs
out, err := runtime.GetNodeLogs(ctx, node)
out, err := runtime.GetNodeLogs(ctx, node, since)
if err != nil {
if out != nil {
out.Close()

@ -25,6 +25,7 @@ package containerd
import (
"context"
"io"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
@ -117,7 +118,7 @@ func (d Containerd) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, err
}
// GetNodeLogs returns the logs from a given node
func (d Containerd) GetNodeLogs(ctx context.Context, node *k3d.Node) (io.ReadCloser, error) {
func (d Containerd) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time) (io.ReadCloser, error) {
return nil, nil
}

@ -32,3 +32,8 @@ func (d Containerd) CreateVolume(ctx context.Context, name string, labels map[st
func (d Containerd) DeleteVolume(ctx context.Context, name string) error {
return nil
}
// GetVolume tries to get a named volume
func (d Containerd) GetVolume(name string) (string, error) {
return "", nil
}

@ -214,7 +214,7 @@ func (d Docker) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, error)
}
// GetNodeLogs returns the logs from a given node
func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node) (io.ReadCloser, error) {
func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time) (io.ReadCloser, error) {
// get the container for the given node
container, err := getNodeContainer(ctx, node)
if err != nil {
@ -239,7 +239,11 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node) (io.ReadCloser,
return nil, fmt.Errorf("Node '%s' (container '%s') not running", node.Name, containerInspectResponse.ID)
}
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
sinceStr := ""
if !since.IsZero() {
sinceStr = since.Format("2006-01-02T15:04:05")
}
logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr})
if err != nil {
log.Errorf("Failed to get logs from node '%s' (container '%s')", node.Name, container.ID)
return nil, err

@ -25,6 +25,7 @@ import (
"context"
"fmt"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/pkg/types"
@ -94,3 +95,28 @@ func (d Docker) DeleteVolume(ctx context.Context, name string) error {
return nil
}
// GetVolume tries to get a named volume
func (d Docker) GetVolume(name string) (string, error) {
// (0) create new docker client
ctx := context.Background()
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return "", err
}
defer docker.Close()
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s$", name))
volumeList, err := docker.VolumeList(ctx, filters)
if err != nil {
return "", err
}
if len(volumeList.Volumes) < 1 {
return "", fmt.Errorf("Failed to find named volume '%s'", name)
}
return volumeList.Volumes[0].Name, nil
}

@ -25,6 +25,7 @@ import (
"context"
"fmt"
"io"
"time"
"github.com/rancher/k3d/pkg/runtimes/containerd"
"github.com/rancher/k3d/pkg/runtimes/docker"
@ -53,10 +54,10 @@ type Runtime interface {
StopNode(context.Context, *k3d.Node) error
CreateVolume(context.Context, string, map[string]string) error
DeleteVolume(context.Context, string) error
GetVolume(string) (string, error)
GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup
ExecInNode(context.Context, *k3d.Node, []string) error
// DeleteContainer() error
GetNodeLogs(context.Context, *k3d.Node) (io.ReadCloser, error)
GetNodeLogs(context.Context, *k3d.Node, time.Time) (io.ReadCloser, error)
}
// GetRuntime checks, if a given name is represented by an implemented k3d runtime and returns it

@ -106,6 +106,12 @@ type CreateClusterOpts struct {
K3sAgentArgs []string
}
// StartClusterOpts describe a set of options one can set when (re-)starting a cluster
type StartClusterOpts struct {
WaitForMaster bool
Timeout time.Duration
}
// ClusterNetwork describes a network which a cluster is running in
type ClusterNetwork struct {
Name string `yaml:"name" json:"name,omitempty"`

@ -30,10 +30,7 @@ check_clusters "$clustername" && failed "cluster was not stopped, since we still
# 3. start the cluster
info "Starting cluster..."
$EXE start cluster "$clustername"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
$EXE start cluster "$clustername" --wait --timeout 360s || failed "cluster didn't come back in time"
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster"

Loading…
Cancel
Save