From 0270ac73278e2baed7aceccaf8f462e9c6002c6b Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 26 May 2020 17:20:38 +0200 Subject: [PATCH 1/9] createCluster: take loadbalancer into consideration when mapping ports --- cmd/create/createCluster.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/create/createCluster.go b/cmd/create/createCluster.go index 0b5df7db..cf308c79 100644 --- a/cmd/create/createCluster.go +++ b/cmd/create/createCluster.go @@ -374,9 +374,13 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts } // append ports + lbCount := 1 + if createClusterOpts.DisableLoadBalancer { + lbCount = 0 + } for portmap, filters := range portFilterMap { - if len(filters) == 0 && (masterCount+workerCount) > 1 { - log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node.", portmap) + if len(filters) == 0 && (masterCount+workerCount+lbCount) > 1 { + log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap) } nodes, err := cliutil.FilterNodes(append(cluster.Nodes, cluster.MasterLoadBalancer), filters) if err != nil { From cb2a3bad2f7109155e6f61e50d7156a73a5a81db Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Tue, 26 May 2020 18:03:11 +0200 Subject: [PATCH 2/9] installScript: use channel server to fetch latest stable k3s version --- Makefile | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index ac8629ed..18e343ba 100644 --- a/Makefile +++ b/Makefile @@ -10,15 +10,12 @@ ifeq ($(GIT_TAG),) GIT_TAG := $(shell git describe --always) endif -# get latest k3s version: grep the tag JSON field, extract the tag and replace + with - (difference between git and dockerhub tags) -ifneq (${GITHUB_API_TOKEN},) -K3S_TAG := $(shell curl --silent -H "Authorization: token: ${GITHUB_API_TOKEN}" "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/') -else -K3S_TAG := $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed -E 's/\+/\-/') -endif +# get latest k3s version: grep the tag and replace + with - (difference between git and dockerhub tags) +K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable" | egrep -o '/v[^ ]+"' | sed -E 's/\/|\"//g' | sed -E 's/\+/\-/') + ifeq ($(K3S_TAG),) $(warning K3S_TAG undefined: couldn't get latest k3s image tag!) -$(warning Output of curl: $(shell curl --silent "https://api.github.com/repos/rancher/k3s/releases/latest")) +$(warning Output of curl: $(shell curl --silent "curl --silent "https://update.k3s.io/v1-release/channels/stable"")) $(error exiting) endif From 1c7eaf7f3537fa0c0451c4422cc9ec30b8def1a5 Mon Sep 17 00:00:00 2001 From: Lionel Nicolas Date: Tue, 26 May 2020 23:06:23 -0400 Subject: [PATCH 3/9] installScript: fix duplication in curl command --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 18e343ba..f108f7b9 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ K3S_TAG := $(shell curl --silent "https://update.k3s.io/v1-release/channels/sta ifeq ($(K3S_TAG),) $(warning K3S_TAG undefined: couldn't get latest k3s image tag!) -$(warning Output of curl: $(shell curl --silent "curl --silent "https://update.k3s.io/v1-release/channels/stable"")) +$(warning Output of curl: $(shell curl --silent "https://update.k3s.io/v1-release/channels/stable")) $(error exiting) endif From dabec2f09152038a268cbbb5d3f24829c2aac0cc Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Wed, 27 May 2020 18:23:39 +0200 Subject: [PATCH 4/9] createCluster: fix loadblancer consideration - issue #248: nil pointer dereference because we're trying to access non-existent loadblancer node --- cmd/create/createCluster.go | 12 +++++++----- cmd/util/filter.go | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/create/createCluster.go b/cmd/create/createCluster.go index cf308c79..74f81a9d 100644 --- a/cmd/create/createCluster.go +++ b/cmd/create/createCluster.go @@ -374,15 +374,17 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts } // append ports - lbCount := 1 - if createClusterOpts.DisableLoadBalancer { - lbCount = 0 + nodeCount := masterCount + workerCount + nodeList := cluster.Nodes + if !createClusterOpts.DisableLoadBalancer { + nodeCount++ + nodeList = append(nodeList, cluster.MasterLoadBalancer) } for portmap, filters := range portFilterMap { - if len(filters) == 0 && (masterCount+workerCount+lbCount) > 1 { + if len(filters) == 0 && (nodeCount) > 1 { log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap) } - nodes, err := cliutil.FilterNodes(append(cluster.Nodes, cluster.MasterLoadBalancer), filters) + nodes, err := cliutil.FilterNodes(nodeList, filters) if err != nil { log.Fatalln(err) } diff --git a/cmd/util/filter.go b/cmd/util/filter.go index e9571069..5f62091e 100644 --- a/cmd/util/filter.go +++ b/cmd/util/filter.go @@ -67,7 +67,7 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) { func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) { if len(filters) == 0 || len(filters[0]) == 0 { - log.Warnln("No filter specified") + log.Warnln("No node filter specified") return nodes, nil } From 06f543518c5e9f1a8d87bdafeab41cece44a9846 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 28 May 2020 11:01:25 +0200 Subject: [PATCH 5/9] fix: re-add support for named volumes used with --volume flag --- cmd/create/createCluster.go | 2 +- cmd/util/volumes.go | 30 +++++++++++++++++++++++++++--- pkg/runtimes/containerd/volume.go | 5 +++++ pkg/runtimes/docker/volume.go | 26 ++++++++++++++++++++++++++ pkg/runtimes/runtime.go | 1 + 5 files changed, 60 insertions(+), 4 deletions(-) diff --git a/cmd/create/createCluster.go b/cmd/create/createCluster.go index 74f81a9d..f6c0dba9 100644 --- a/cmd/create/createCluster.go +++ b/cmd/create/createCluster.go @@ -251,7 +251,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts } // validate the specified volume mount and return it in SRC:DEST format - volume, err = cliutil.ValidateVolumeMount(volume) + volume, err = cliutil.ValidateVolumeMount(runtimes.SelectedRuntime, volume) if err != nil { log.Fatalln(err) } diff --git a/cmd/util/volumes.go b/cmd/util/volumes.go index 816ac0da..302b1fe6 100644 --- a/cmd/util/volumes.go +++ b/cmd/util/volumes.go @@ -25,12 +25,16 @@ import ( "fmt" "os" "strings" + + "github.com/rancher/k3d/pkg/runtimes" + + log "github.com/sirupsen/logrus" ) // ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path // - SRC: source directory/file -> tests: must exist // - DEST: source directory/file -> tests: must be absolute path -func ValidateVolumeMount(volumeMount string) (string, error) { +func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) (string, error) { src := "" dest := "" @@ -51,8 +55,16 @@ func ValidateVolumeMount(volumeMount string) (string, error) { // verify that the source exists if src != "" { - if _, err := os.Stat(src); err != nil { - return "", fmt.Errorf("Failed to stat file/dir that you're trying to mount: '%s' in '%s'", src, volumeMount) + // a) named volume + isNamedVolume := true + if err := verifyNamedVolume(runtime, src); err != nil { + log.Debugf("Source '%s' is not a named volume, assuming it's a path...\n%+v", src, err) + isNamedVolume = false + } + if !isNamedVolume { + if _, err := os.Stat(src); err != nil { + return "", fmt.Errorf("Failed to stat file/dir that you're trying to mount: '%s' in '%s'", src, volumeMount) + } } } @@ -63,3 +75,15 @@ func ValidateVolumeMount(volumeMount string) (string, error) { return fmt.Sprintf("%s:%s", src, dest), nil } + +// verifyNamedVolume checks whether a named volume exists in the runtime +func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error { + volumeName, err := runtime.GetVolume(volumeName) + if err != nil { + return err + } + if volumeName == "" { + return fmt.Errorf("Failed to find named volume '%s'", volumeName) + } + return nil +} diff --git a/pkg/runtimes/containerd/volume.go b/pkg/runtimes/containerd/volume.go index 7dc49dcf..1b06e11e 100644 --- a/pkg/runtimes/containerd/volume.go +++ b/pkg/runtimes/containerd/volume.go @@ -30,3 +30,8 @@ func (d Containerd) CreateVolume(name string, labels map[string]string) error { func (d Containerd) DeleteVolume(name string) error { return nil } + +// GetVolume tries to get a named volume +func (d Containerd) GetVolume(name string) (string, error) { + return "", nil +} diff --git a/pkg/runtimes/docker/volume.go b/pkg/runtimes/docker/volume.go index b64ece54..2400e27b 100644 --- a/pkg/runtimes/docker/volume.go +++ b/pkg/runtimes/docker/volume.go @@ -25,6 +25,7 @@ import ( "context" "fmt" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" k3d "github.com/rancher/k3d/pkg/types" @@ -96,3 +97,28 @@ func (d Docker) DeleteVolume(name string) error { return nil } + +// GetVolume tries to get a named volume +func (d Docker) GetVolume(name string) (string, error) { + // (0) create new docker client + ctx := context.Background() + docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + log.Errorln("Failed to create docker client") + return "", err + } + defer docker.Close() + + filters := filters.NewArgs() + filters.Add("name", fmt.Sprintf("^%s$", name)) + volumeList, err := docker.VolumeList(ctx, filters) + if err != nil { + return "", err + } + if len(volumeList.Volumes) < 1 { + return "", fmt.Errorf("Failed to find named volume '%s'", name) + } + + return volumeList.Volumes[0].Name, nil + +} diff --git a/pkg/runtimes/runtime.go b/pkg/runtimes/runtime.go index 93997583..428e0c61 100644 --- a/pkg/runtimes/runtime.go +++ b/pkg/runtimes/runtime.go @@ -52,6 +52,7 @@ type Runtime interface { StopNode(*k3d.Node) error CreateVolume(string, map[string]string) error DeleteVolume(string) error + GetVolume(string) (string, error) GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup ExecInNode(*k3d.Node, []string) error // DeleteContainer() error From 332c3a8c9b77603668267412849cc72bee22249a Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 28 May 2020 13:53:20 +0200 Subject: [PATCH 6/9] startCluster: add --wait and --timeout - functionality mostly copy-pasted from createCluster --- cmd/start/startCluster.go | 9 ++++++++- pkg/cluster/cluster.go | 29 ++++++++++++++++++++++++++++- pkg/types/types.go | 6 ++++++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/cmd/start/startCluster.go b/cmd/start/startCluster.go index c4c2e16a..e8de46d9 100644 --- a/cmd/start/startCluster.go +++ b/cmd/start/startCluster.go @@ -22,8 +22,11 @@ THE SOFTWARE. package start import ( + "time" + "github.com/rancher/k3d/pkg/cluster" "github.com/rancher/k3d/pkg/runtimes" + "github.com/rancher/k3d/pkg/types" "github.com/spf13/cobra" k3d "github.com/rancher/k3d/pkg/types" @@ -34,6 +37,8 @@ import ( // NewCmdStartCluster returns a new cobra command func NewCmdStartCluster() *cobra.Command { + startClusterOpts := types.StartClusterOpts{} + // create new command cmd := &cobra.Command{ Use: "cluster (NAME [NAME...] | --all)", @@ -45,7 +50,7 @@ func NewCmdStartCluster() *cobra.Command { log.Infoln("No clusters found") } else { for _, c := range clusters { - if err := cluster.StartCluster(c, runtimes.SelectedRuntime); err != nil { + if err := cluster.StartCluster(cmd.Context(), c, runtimes.SelectedRuntime, startClusterOpts); err != nil { log.Fatalln(err) } } @@ -55,6 +60,8 @@ func NewCmdStartCluster() *cobra.Command { // add flags cmd.Flags().BoolP("all", "a", false, "Start all existing clusters") + cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) to be ready before returning.") + cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.") // add subcommands diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index f50ccf86..a2254703 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -30,6 +30,7 @@ import ( "time" k3drt "github.com/rancher/k3d/pkg/runtimes" + "github.com/rancher/k3d/pkg/types" k3d "github.com/rancher/k3d/pkg/types" "github.com/rancher/k3d/pkg/util" log "github.com/sirupsen/logrus" @@ -471,9 +472,18 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string { } // StartCluster starts a whole cluster (i.e. all nodes of the cluster) -func StartCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error { +func StartCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runtime, startClusterOpts types.StartClusterOpts) error { log.Infof("Starting cluster '%s'", cluster.Name) + if startClusterOpts.Timeout > 0*time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout) + defer cancel() + } + + // vars to support waiting for master nodes to be ready + waitForMasterWaitgroup, ctx := errgroup.WithContext(ctx) + failed := 0 var masterlb *k3d.Node for _, node := range cluster.Nodes { @@ -490,6 +500,23 @@ func StartCluster(cluster *k3d.Cluster, runtime k3drt.Runtime) error { failed++ continue } + + // asynchronously wait for this master node to be ready (by checking the logs for a specific log mesage) + if node.Role == k3d.MasterRole && startClusterOpts.WaitForMaster { + masterNode := node + waitForMasterWaitgroup.Go(func() error { + // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` + // ... by scanning for this line in logs and restarting the container in case it appears + log.Debugf("Starting to wait for master node '%s'", masterNode.Name) + return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig") + }) + } + + if err := waitForMasterWaitgroup.Wait(); err != nil { + log.Errorln("Failed to bring up all master nodes in time. Check the logs:") + log.Errorln(">>> ", err) + return fmt.Errorf("Failed to bring up cluster") + } } // start masterlb diff --git a/pkg/types/types.go b/pkg/types/types.go index dd771e65..2b445036 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -106,6 +106,12 @@ type CreateClusterOpts struct { K3sAgentArgs []string } +// StartClusterOpts describe a set of options one can set when (re-)starting a cluster +type StartClusterOpts struct { + WaitForMaster bool + Timeout time.Duration +} + // ClusterNetwork describes a network which a cluster is running in type ClusterNetwork struct { Name string `yaml:"name" json:"name,omitempty"` From b6e3babb4acd26c8139a38bfce3c57951e6b11e5 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Thu, 28 May 2020 14:32:09 +0200 Subject: [PATCH 7/9] fix: move waiting out of loop (which caused early context cancelation issues) --- pkg/cluster/cluster.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index a2254703..d9125878 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -511,12 +511,12 @@ func StartCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runti return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig") }) } + } - if err := waitForMasterWaitgroup.Wait(); err != nil { - log.Errorln("Failed to bring up all master nodes in time. Check the logs:") - log.Errorln(">>> ", err) - return fmt.Errorf("Failed to bring up cluster") - } + if err := waitForMasterWaitgroup.Wait(); err != nil { + log.Errorln("Failed to bring up all master nodes in time. Check the logs:") + log.Errorln(">>> ", err) + return fmt.Errorf("Failed to bring up cluster") } // start masterlb From 99f5a6884a84e0df7f202f4a1e4a9189d4a7d833 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 29 May 2020 11:21:12 +0200 Subject: [PATCH 8/9] getNodeLogs: accept time.Time parameter - fixes startCluster's --wait/--timeout functionality --> now doesn't check all the logs (e.g. from previous run) ----> no more false positives when checking for a specifc log message only in the current run --- pkg/cluster/cluster.go | 26 +++++++++++++++++--------- pkg/cluster/node.go | 4 ++-- pkg/runtimes/containerd/node.go | 3 ++- pkg/runtimes/docker/node.go | 8 ++++++-- pkg/runtimes/runtime.go | 3 ++- tests/test_full_lifecycle.sh | 5 +---- 6 files changed, 30 insertions(+), 19 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index d9125878..1461a525 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -192,7 +192,7 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt default: } log.Debugln("Waiting for initializing master node...") - logreader, err := runtime.GetNodeLogs(cluster.InitNode) + logreader, err := runtime.GetNodeLogs(cluster.InitNode, time.Time{}) if err != nil { if logreader != nil { logreader.Close() @@ -254,7 +254,7 @@ func CreateCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runt // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` // ... by scanning for this line in logs and restarting the container in case it appears log.Debugf("Starting to wait for master node '%s'", masterNode.Name) - return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig") + return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig", time.Time{}) }) } } @@ -475,6 +475,8 @@ func generateNodeName(cluster string, role k3d.Role, suffix int) string { func StartCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runtime, startClusterOpts types.StartClusterOpts) error { log.Infof("Starting cluster '%s'", cluster.Name) + start := time.Now() + if startClusterOpts.Timeout > 0*time.Second { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, startClusterOpts.Timeout) @@ -508,17 +510,11 @@ func StartCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runti // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` // ... by scanning for this line in logs and restarting the container in case it appears log.Debugf("Starting to wait for master node '%s'", masterNode.Name) - return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig") + return WaitForNodeLogMessage(ctx, runtime, masterNode, "Wrote kubeconfig", start) }) } } - if err := waitForMasterWaitgroup.Wait(); err != nil { - log.Errorln("Failed to bring up all master nodes in time. Check the logs:") - log.Errorln(">>> ", err) - return fmt.Errorf("Failed to bring up cluster") - } - // start masterlb if masterlb != nil { log.Debugln("Starting masterlb...") @@ -526,6 +522,18 @@ func StartCluster(ctx context.Context, cluster *k3d.Cluster, runtime k3drt.Runti log.Warningf("Failed to start masterlb '%s': Try to start it manually", masterlb.Name) failed++ } + waitForMasterWaitgroup.Go(func() error { + // TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"` + // ... by scanning for this line in logs and restarting the container in case it appears + log.Debugf("Starting to wait for loadbalancer node '%s'", masterlb.Name) + return WaitForNodeLogMessage(ctx, runtime, masterlb, "start worker processes", start) + }) + } + + if err := waitForMasterWaitgroup.Wait(); err != nil { + log.Errorln("Failed to bring up all nodes in time. Check the logs:") + log.Errorln(">>> ", err) + return fmt.Errorf("Failed to bring up cluster") } if failed > 0 { diff --git a/pkg/cluster/node.go b/pkg/cluster/node.go index 7f93aea0..beade981 100644 --- a/pkg/cluster/node.go +++ b/pkg/cluster/node.go @@ -233,7 +233,7 @@ func GetNode(node *k3d.Node, runtime runtimes.Runtime) (*k3d.Node, error) { } // WaitForNodeLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached) -func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string) error { +func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error { for { select { case <-ctx.Done(): @@ -242,7 +242,7 @@ func WaitForNodeLogMessage(ctx context.Context, runtime runtimes.Runtime, node * } // read the logs - out, err := runtime.GetNodeLogs(node) + out, err := runtime.GetNodeLogs(node, since) if err != nil { if out != nil { out.Close() diff --git a/pkg/runtimes/containerd/node.go b/pkg/runtimes/containerd/node.go index 1547ac07..259f16e4 100644 --- a/pkg/runtimes/containerd/node.go +++ b/pkg/runtimes/containerd/node.go @@ -25,6 +25,7 @@ package containerd import ( "context" "io" + "time" "github.com/containerd/containerd" "github.com/containerd/containerd/containers" @@ -119,7 +120,7 @@ func (d Containerd) GetNode(node *k3d.Node) (*k3d.Node, error) { } // GetNodeLogs returns the logs from a given node -func (d Containerd) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) { +func (d Containerd) GetNodeLogs(node *k3d.Node, since time.Time) (io.ReadCloser, error) { return nil, nil } diff --git a/pkg/runtimes/docker/node.go b/pkg/runtimes/docker/node.go index 8dfd5662..bfc17a45 100644 --- a/pkg/runtimes/docker/node.go +++ b/pkg/runtimes/docker/node.go @@ -218,7 +218,7 @@ func (d Docker) GetNode(node *k3d.Node) (*k3d.Node, error) { } // GetNodeLogs returns the logs from a given node -func (d Docker) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) { +func (d Docker) GetNodeLogs(node *k3d.Node, since time.Time) (io.ReadCloser, error) { // get the container for the given node container, err := getNodeContainer(node) if err != nil { @@ -244,7 +244,11 @@ func (d Docker) GetNodeLogs(node *k3d.Node) (io.ReadCloser, error) { return nil, fmt.Errorf("Node '%s' (container '%s') not running", node.Name, containerInspectResponse.ID) } - logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}) + sinceStr := "" + if !since.IsZero() { + sinceStr = since.Format("2006-01-02T15:04:05") + } + logreader, err := docker.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Since: sinceStr}) if err != nil { log.Errorf("Failed to get logs from node '%s' (container '%s')", node.Name, container.ID) return nil, err diff --git a/pkg/runtimes/runtime.go b/pkg/runtimes/runtime.go index 428e0c61..fc05c465 100644 --- a/pkg/runtimes/runtime.go +++ b/pkg/runtimes/runtime.go @@ -24,6 +24,7 @@ package runtimes import ( "fmt" "io" + "time" "github.com/rancher/k3d/pkg/runtimes/containerd" "github.com/rancher/k3d/pkg/runtimes/docker" @@ -56,7 +57,7 @@ type Runtime interface { GetRuntimePath() string // returns e.g. '/var/run/docker.sock' for a default docker setup ExecInNode(*k3d.Node, []string) error // DeleteContainer() error - GetNodeLogs(*k3d.Node) (io.ReadCloser, error) + GetNodeLogs(*k3d.Node, time.Time) (io.ReadCloser, error) } // GetRuntime checks, if a given name is represented by an implemented k3d runtime and returns it diff --git a/tests/test_full_lifecycle.sh b/tests/test_full_lifecycle.sh index 12968ace..e73ebb01 100755 --- a/tests/test_full_lifecycle.sh +++ b/tests/test_full_lifecycle.sh @@ -30,10 +30,7 @@ check_clusters "$clustername" && failed "cluster was not stopped, since we still # 3. start the cluster info "Starting cluster..." -$EXE start cluster "$clustername" - -info "Sleeping for 5 seconds to give the cluster enough time to get ready..." -sleep 5 +$EXE start cluster "$clustername" --wait --timeout 360s || failed "cluster didn't come back in time" info "Checking that we have access to the cluster..." check_clusters "$clustername" || failed "error checking cluster" From 5afa25154cede24e8f752564c43a58e0b17d31f8 Mon Sep 17 00:00:00 2001 From: iwilltry42 Date: Fri, 29 May 2020 11:26:27 +0200 Subject: [PATCH 9/9] docs: add some notes about 'k3d start cluster --wait --timeout' --- cmd/start/startCluster.go | 4 ++-- docs/usage/commands.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/start/startCluster.go b/cmd/start/startCluster.go index e8de46d9..5a96157c 100644 --- a/cmd/start/startCluster.go +++ b/cmd/start/startCluster.go @@ -60,8 +60,8 @@ func NewCmdStartCluster() *cobra.Command { // add flags cmd.Flags().BoolP("all", "a", false, "Start all existing clusters") - cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) to be ready before returning.") - cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.") + cmd.Flags().BoolVar(&startClusterOpts.WaitForMaster, "wait", false, "Wait for the master(s) (and loadbalancer) to be ready before returning.") + cmd.Flags().DurationVar(&startClusterOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.") // add subcommands diff --git a/docs/usage/commands.md b/docs/usage/commands.md index 2b650814..4bb22642 100644 --- a/docs/usage/commands.md +++ b/docs/usage/commands.md @@ -33,6 +33,8 @@ k3d start cluster CLUSTERNAME # start a (stopped) cluster -a, --all # start all clusters + --wait # wait for all masters and master-loadbalancer to be up before returning + --timeout # maximum waiting time for '--wait' before canceling/returning node NODENAME # start a (stopped) node stop cluster CLUSTERNAME # stop a cluster