nodeCreate: remove dead code and parallelize adding nodes to the cluster completely

pull/670/head
iwilltry42 3 years ago
parent e063405b02
commit 7bcb1730c6
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 3
      CHANGELOG.md
  2. 2
      cmd/node/nodeCreate.go
  3. 2
      pkg/client/loadbalancer.go
  4. 23
      pkg/client/node.go

@ -18,10 +18,13 @@
- this also checks the config before applying it, so the lb doesn't crash on a faulty config
- updating the loadbalancer writes the new config file and also checks if everything's going fine afterwards
- helper images can now be set explicitly via environment variables: `K3D_IMAGE_LOADBALANCER` & `K3D_IMAGE_TOOLS` (#638)
- concurrently add new nodes to an existing cluster (remove some dumb code) (#640)
- `--wait` is now the default for `k3d node create`
### Misc
- tests/e2e: timeouts everywhere to avoid killing DroneCI (#638)
- logs: really final output when creating/deleting nodes (so far, we were not outputting a final success message and the process was still doing stuff) (#640)
## v4.4.7

@ -73,7 +73,7 @@ func NewCmdNodeCreate() *cobra.Command {
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
cmd.Flags().String("memory", "", "Memory limit imposed on the node [From docker]")
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", false, "Wait for the node(s) to be ready before returning.")
cmd.Flags().BoolVar(&createNodeOpts.Wait, "wait", true, "Wait for the node(s) to be ready before returning.")
cmd.Flags().DurationVar(&createNodeOpts.Timeout, "timeout", 0*time.Second, "Maximum waiting time for '--wait' before canceling/returning.")
cmd.Flags().StringSliceP("runtime-label", "", []string{}, "Specify container runtime labels in format \"foo=bar\"")

@ -77,7 +77,6 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
}
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
startTime := time.Now().Truncate(time.Second).UTC()
log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z"))
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil {
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
}
@ -102,6 +101,7 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
return LBConfigErrFailedTest
}
}
log.Infof("Successfully configured loadbalancer %s!", cluster.ServerLoadBalancer.Name)
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits

@ -228,26 +228,13 @@ func NodeAddToClusterMulti(ctx context.Context, runtime runtimes.Runtime, nodes
nodeWaitGroup, ctx := errgroup.WithContext(ctx)
for _, node := range nodes {
if err := NodeAddToCluster(ctx, runtime, node, cluster, createNodeOpts); err != nil {
return err
}
if createNodeOpts.Wait {
currentNode := node
nodeWaitGroup.Go(func() error {
log.Debugf("Starting to wait for node '%s'", currentNode.Name)
readyLogMessage := k3d.ReadyLogMessageByRole[currentNode.Role]
if readyLogMessage != "" {
return NodeWaitForLogMessage(ctx, runtime, currentNode, readyLogMessage, time.Time{})
}
log.Warnf("NodeAddToClusterMulti: Set to wait for node %s to get ready, but there's no target log message defined", currentNode.Name)
return nil
})
}
currentNode := node
nodeWaitGroup.Go(func() error {
return NodeAddToCluster(ctx, runtime, currentNode, cluster, createNodeOpts)
})
}
if err := nodeWaitGroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to add nodes")
return fmt.Errorf("Failed to add one or more nodes: %w", err)
}
return nil

Loading…
Cancel
Save