updatelbconfig: check for log output to see if the update succeeded and give proper info

pull/670/head
iwilltry42 3 years ago
parent 4a84874a86
commit 1a68ae0372
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 32
      pkg/client/loadbalancer.go
  2. 15
      pkg/client/node.go

@ -24,8 +24,10 @@ package client
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"time"
"github.com/docker/go-connections/nat"
"github.com/go-test/deep"
@ -36,6 +38,11 @@ import (
"gopkg.in/yaml.v2"
)
var (
LBConfigErrHostNotFound = errors.New("lbconfig: host not found")
LBConfigErrFailedTest = errors.New("lbconfig: failed to test")
)
// UpdateLoadbalancerConfig updates the loadbalancer config with an updated list of servers belonging to that cluster
func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.Cluster) error {
@ -69,11 +76,34 @@ func UpdateLoadbalancerConfig(ctx context.Context, runtime runtimes.Runtime, clu
return fmt.Errorf("error marshalling the new loadbalancer config: %w", err)
}
log.Debugf("Writing lb config:\n%s", string(newLbConfigYaml))
startTime := time.Now().Truncate(time.Second).UTC()
log.Debugf("timestamp: %s", startTime.Format("2006-01-02T15:04:05.999999999Z"))
if err := runtime.WriteToNode(ctx, newLbConfigYaml, k3d.DefaultLoadbalancerConfigPath, 0744, cluster.ServerLoadBalancer); err != nil {
return fmt.Errorf("error writing new loadbalancer config to container: %w", err)
}
// TODO: check if loadbalancer is running fine after auto-applying the change
successCtx, successCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer successCtxCancel()
err = NodeWaitForLogMessage(successCtx, runtime, cluster.ServerLoadBalancer, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], startTime)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
failureCtx, failureCtxCancel := context.WithDeadline(ctx, time.Now().Add(5*time.Second))
defer failureCtxCancel()
err = NodeWaitForLogMessage(failureCtx, runtime, cluster.ServerLoadBalancer, "host not found in upstream", startTime)
if err != nil {
log.Warnf("Failed to check if the loadbalancer was configured correctly or if it broke. Please check it manually or try again: %v", err)
return LBConfigErrFailedTest
} else {
log.Warnln("Failed to configure loadbalancer because one of the nodes seems to be down! Run `k3d node list` to see which one it could be.")
return LBConfigErrHostNotFound
}
} else {
log.Warnf("Failed to ensure that loadbalancer was configured correctly. Please check it manually or try again: %v", err)
return LBConfigErrFailedTest
}
}
time.Sleep(1 * time.Second) // waiting for a second, to avoid issues with too fast lb updates which would screw up the log waits
return nil
}

@ -208,8 +208,9 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
return err
if !errors.Is(err, LBConfigErrHostNotFound) {
return fmt.Errorf("error updating loadbalancer: %w", err)
}
}
}
@ -496,8 +497,9 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, o
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
return err
if !errors.Is(err, LBConfigErrHostNotFound) {
return fmt.Errorf("Failed to update cluster loadbalancer: %w", err)
}
}
}
}
@ -577,7 +579,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
if ok {
log.Debugf("NodeWaitForLogMessage: Context Deadline (%s) > Current Time (%s)", d, time.Now())
}
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s", message, node.Name)
return fmt.Errorf("Context deadline exceeded while waiting for log message '%s' of node %s: %w", message, node.Name, ctx.Err())
}
return ctx.Err()
default:
@ -589,8 +591,7 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
if out != nil {
out.Close()
}
log.Errorf("Failed waiting for log message '%s' from node '%s'", message, node.Name)
return err
return fmt.Errorf("Failed waiting for log message '%s' from node '%s': %w", message, node.Name, err)
}
defer out.Close()

Loading…
Cancel
Save