Merge pull request #849 from rancher:fix/e2e-speedup-nofails

[FIX] Fix/e2e-speedup-nofails
pull/860/head
Thorsten Klein 3 years ago committed by GitHub
commit f77fb62934
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      Makefile
  2. 9
      pkg/client/cluster.go
  3. 10
      tests/assets/config_test_simple.yaml
  4. 11
      tests/common.sh
  5. 25
      tests/dind.sh
  6. 24
      tests/extra_test_k3s_versions.sh
  7. 8
      tests/runner.sh
  8. 29
      tests/test_basic.sh
  9. 28
      tests/test_config_file.sh
  10. 12
      tests/test_config_file_migration.sh
  11. 8
      tests/test_config_with_overrides.sh
  12. 10
      tests/test_full_lifecycle.sh
  13. 16
      tests/test_ipam.sh
  14. 8
      tests/test_loadbalancer.sh
  15. 8
      tests/test_memory_limits.sh
  16. 10
      tests/test_multi_master.sh
  17. 10
      tests/test_multi_master_start_stop.sh
  18. 8
      tests/test_node_edit.sh
  19. 10
      tests/test_registry.sh

@ -51,6 +51,8 @@ E2E_EXCLUDE ?=
E2E_EXTRA ?=
E2E_RUNNER_START_TIMEOUT ?= 10
E2E_HELPER_IMAGE_TAG ?=
E2E_KEEP ?=
E2E_PARALLEL ?=
########## Go Build Options ##########
# Build targets
@ -125,7 +127,7 @@ build-cross:
# build a specific docker target ( '%' matches the target as specified in the Dockerfile)
build-docker-%:
@echo "Building Docker image k3d:$(K3D_IMAGE_TAG)-$*"
DOCKER_BUILDKIT=1 docker build . -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
DOCKER_BUILDKIT=1 docker build . --no-cache -t k3d:$(K3D_IMAGE_TAG)-$* --target $*
# build helper images
build-helper-images: build-proxy-image build-tools-image
@ -175,7 +177,7 @@ test:
e2e: build-docker-dind
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_INCLUDE="$(E2E_INCLUDE)" E2E_EXCLUDE="$(E2E_EXCLUDE)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) E2E_HELPER_IMAGE_TAG="$(E2E_HELPER_IMAGE_TAG)" tests/dind.sh "${K3D_IMAGE_TAG}-dind"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_INCLUDE="$(E2E_INCLUDE)" E2E_EXCLUDE="$(E2E_EXCLUDE)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) E2E_HELPER_IMAGE_TAG="$(E2E_HELPER_IMAGE_TAG)" E2E_KEEP="$(E2E_KEEP)" E2E_PARALLEL="$(E2E_PARALLEL)" tests/dind.sh "${K3D_IMAGE_TAG}-dind"
ci-tests: fmt check e2e

@ -290,6 +290,15 @@ func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
}
// just reserve some IPs for k3d (e.g. k3d-tools container), so we don't try to use them again
if cluster.Network.IPAM.Managed {
reservedIP, err := GetIP(ctx, runtime, &cluster.Network)
if err != nil {
return fmt.Errorf("error reserving IP in new cluster network %s", network.Name)
}
cluster.Network.IPAM.IPsUsed = append(cluster.Network.IPAM.IPsUsed, reservedIP)
}
return nil
}

@ -3,21 +3,11 @@ kind: Simple
name: test
servers: 3
agents: 2
kubeAPI:
hostIP: "0.0.0.0"
hostPort: "6446"
#image: rancher/k3s:latest
volumes:
- volume: $HOME:/some/path
nodeFilters:
- all
ports:
- port: 80:80
nodeFilters:
- loadbalancer
- port: 0.0.0.0:443:443
nodeFilters:
- loadbalancer
env:
- envVar: bar=baz,bob
nodeFilters:

@ -42,6 +42,8 @@ failed() {
fi
if [[ -n "$2" ]]; then
mv "$2" "$2.failed"
elif [[ -n "$LOG_FILE" ]]; then
mv "$LOG_FILE" "$LOG_FILE.failed"
fi
abort "test failed"
}
@ -71,7 +73,7 @@ check_url() {
check_clusters() {
[ -n "$EXE" ] || abort "EXE is not defined"
for c in "$@" ; do
$EXE kubeconfig merge "$c" --kubeconfig-switch-context
$EXE kubeconfig merge "$c" --kubeconfig-switch-context || return 1
if kubectl cluster-info ; then
passed "cluster $c is reachable"
else
@ -85,8 +87,9 @@ check_clusters() {
check_cluster_count() {
expectedClusterCount=$1
actualClusterCount=$(LOG_LEVEL=warn $EXE cluster list --no-headers | wc -l) # this must always have a loglevel of <= warn or it will fail
if [[ $actualClusterCount != $expectedClusterCount ]]; then
shift # all remaining args are clusternames
actualClusterCount=$(LOG_LEVEL=warn $EXE cluster list --no-headers "$@" | wc -l) # this must always have a loglevel of <= warn or it will fail
if [[ $actualClusterCount -ne $expectedClusterCount ]]; then
failed "incorrect number of clusters available: $actualClusterCount != $expectedClusterCount"
return 1
fi
@ -99,7 +102,7 @@ check_multi_node() {
expectedNodeCount=$2
$EXE kubeconfig merge "$cluster" --kubeconfig-switch-context
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
if [[ $nodeCount == $expectedNodeCount ]]; then
if [[ $nodeCount -eq $expectedNodeCount ]]; then
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
else
warn "cluster $cluster has incorrect number of nodes: $nodeCount != $expectedNodeCount"

@ -6,13 +6,31 @@ K3D_IMAGE_TAG=$1
# define E2E_KEEP to non-empty for keeping the e2e runner container after running the tests
E2E_KEEP=${E2E_KEEP:-}
# Max. number of tests executed in parallel
E2E_PARALLEL=${E2E_PARALLEL:-}
# Max. time to wait for the runner container to be up
RUNNER_START_TIMEOUT=${E2E_RUNNER_START_TIMEOUT:-10}
####################################################################################
CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
[ -d "$CURR_DIR" ] || {
echo "FATAL: no current dir (maybe running in zsh?)"
exit 1
}
export CURRENT_STAGE="DIND"
# shellcheck disable=SC1091
source "$CURR_DIR/common.sh"
####################################################################################
# Start the runner container
TIMESTAMP=$(date "+%y%m%d%H%M%S")
container_name="k3d-e2e-runner-$TIMESTAMP"
k3de2e=$(docker run -d \
-v "$(pwd):/src" \
--privileged \
@ -21,17 +39,18 @@ k3de2e=$(docker run -d \
-e LOG_LEVEL="$LOG_LEVEL" \
-e E2E_INCLUDE="$E2E_INCLUDE" \
-e E2E_EXCLUDE="$E2E_EXCLUDE" \
-e E2E_PARALLEL="$E2E_PARALLEL" \
-e E2E_EXTRA="$E2E_EXTRA" \
-e LOG_TIMESTAMPS="true" \
--add-host "k3d-registrytest-registry:127.0.0.1" \
--name "k3d-e2e-runner-$TIMESTAMP" \
--name "$container_name" \
"k3d:$K3D_IMAGE_TAG")
# setup exit trap (make sure that we always stop and remove the runner container)
finish() {
docker stop "$k3de2e" || /bin/true
if [ -z "$E2E_KEEP" ] ; then
docker rm "$k3de2e" || /bin/true
info "Cleaning up test container $container_name"
docker rm -f "$k3de2e" || /bin/true
fi
}
trap finish EXIT

@ -9,21 +9,32 @@ FAILED_TESTS=()
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
for version in "${K3S_VERSIONS[@]}"; do
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" )_${version//./-}.log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" )_${version//./-}.yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Suite | k3s-versions | $version"
clustername="k3s-version-${version//./-}"
info "Creating a cluster with k3s version $version ..."
$EXE cluster create c1 --wait --timeout 60s --image "rancher/k3s:$version" || failed "could not create cluster with k3s version $version"
$EXE cluster create $clustername --wait --timeout 60s --image "rancher/k3s:$version" || failed "could not create cluster with k3s version $version"
info "Checking we have access to the cluster ..."
check_clusters "c1" || failed "error checking cluster with k3s version $version"
check_clusters "$clustername" || failed "error checking cluster with k3s version $version"
info "Deleting cluster ..."
$EXE cluster delete c1 || failed "could not delete the cluster c1"
$EXE cluster delete $clustername || failed "could not delete the cluster $clustername"
K3S_IMAGE_TAG="$version" $CURR_DIR/test_full_lifecycle.sh
if [[ $? -eq 1 ]]; then
@ -53,6 +64,7 @@ if [[ ${#FAILED_TESTS[@]} -gt 0 ]]; then
for failed_test in "${FAILED_TESTS[@]}"; do
warn "- $failed_test"
done
exit 1
else
passed "Successfully verified all given k3s versions"
exit 0

@ -11,6 +11,7 @@ CURR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
: "${E2E_INCLUDE:=""}"
: "${E2E_EXCLUDE:=""}"
: "${E2E_EXTRA:=""}"
: "${E2E_PARALLEL:="4"}"
export CURRENT_STAGE="Runner"
@ -23,7 +24,8 @@ source "$CURR_DIR/common.sh"
info "Preparing filesystem and environment..."
mkdir -p "$HOME"/.kube
export KUBECONFIG_ROOT="$HOME/.kube"
mkdir -p "$KUBECONFIG_ROOT"
export TEST_OUTPUT_DIR="$HOME"/testoutput
mkdir -p "$TEST_OUTPUT_DIR"
@ -91,7 +93,7 @@ function run_tests() {
#
# Run Tests
#
local max_batch_size=4
local max_batch_size=$E2E_PARALLEL
local current_batch_size=0
local current_batch_number=1
local total_batch_number=$(((num_included_tests + (max_batch_size - 1)) / max_batch_size))
@ -139,7 +141,7 @@ function run_tests() {
info "FINISHED $section_name${END}
> ${WHT}Total:\t$num_total_tests${END}
> ${BLU}Run:\t$num_included_tests${END}
> ${YEL}Not Run:\t$num_excluded_tests${END}
> ${YEL}Skipped:\t$num_excluded_tests${END}
> ${GRN}Passed:\t$((num_included_tests - num_failed_tests))${END}
> ${RED}Failed:\t$num_failed_tests${END}"

@ -6,31 +6,42 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | basic"
clustername_1="test-basic-1"
clustername_2="test-basic-2"
info "Creating two clusters..."
$EXE cluster create c1 --wait --timeout 60s --api-port 6443 --env 'TEST_VAR=user\@pass\\@server:0' || failed_with_logfile "could not create cluster c1" "$LOG_FILE"
$EXE cluster create c2 --wait --timeout 60s || failed_with_logfile "could not create cluster c2" "$LOG_FILE"
$EXE cluster create $clustername_1 --wait --timeout 60s --env 'TEST_VAR=user\@pass\\@server:0' || failed "could not create cluster $clustername_1"
$EXE cluster create $clustername_2 --wait --timeout 60s || failed "could not create cluster $clustername_2"
info "Checking that we can get both clusters..."
check_cluster_count 2
check_cluster_count 2 "$clustername_1" "$clustername_2"
info "Checking we have access to both clusters..."
check_clusters "c1" "c2" || failed_with_logfile "error checking cluster" "$LOG_FILE"
check_clusters "$clustername_1" "$clustername_2" || failed "error checking cluster"
info "Checking cluster env var with escaped @ signs..."
docker exec k3d-c1-server-0 env | grep -qE '^TEST_VAR=user@pass\\$' || failed_with_logfile "Failed to lookup proper env var in container" "$LOG_FILE"
docker exec k3d-$clustername_1-server-0 env | grep -qE '^TEST_VAR=user@pass\\$' || failed "Failed to lookup proper env var in container"
info "Check k3s token retrieval"
check_cluster_token_exist "c1" || failed_with_logfile "could not find cluster token c1" "$LOG_FILE"
check_cluster_token_exist "c2" || failed_with_logfile "could not find cluster token c2" "$LOG_FILE"
check_cluster_token_exist "$clustername_1" || failed "could not find cluster token $clustername_1"
check_cluster_token_exist "$clustername_2" || failed "could not find cluster token $clustername_2"
info "Deleting clusters..."
$EXE cluster delete c1 || failed_with_logfile "could not delete the cluster c1" "$LOG_FILE"
$EXE cluster delete c2 || failed_with_logfile "could not delete the cluster c2" "$LOG_FILE"
$EXE cluster delete $clustername_1 || failed "could not delete the cluster $clustername_1"
$EXE cluster delete $clustername_2 || failed "could not delete the cluster $clustername_2"
exit 0

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"
@ -29,46 +37,46 @@ sed -E "s/^name:.+/name: $clustername/g" < "$configfileoriginal" > "$configfile"
highlight "[START] ConfigTest $EXTRA_TITLE"
info "Creating cluster $clustername..."
$EXE cluster create "$clustername" --config "$configfile" $EXTRA_FLAG || failed_with_logfile "could not create cluster $clustername $EXTRA_TITLE" "$LOG_FILE"
$EXE cluster create "$clustername" --config "$configfile" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5
# 1. check initial access to the cluster
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed_with_logfile "error checking cluster" "$LOG_FILE"
check_clusters "$clustername" || failed "error checking cluster"
info "Checking that we have 5 nodes online..."
check_multi_node "$clustername" 5 || failed_with_logfile "failed to verify number of nodes" "$LOG_FILE"
check_multi_node "$clustername" 5 || failed "failed to verify number of nodes"
# 2. check some config settings
## Environment Variables
info "Ensuring that environment variables are present in the node containers as set in the config (with comma)"
exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz,bob" || failed_with_logfile "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0" "$LOG_FILE"
exec_in_node "k3d-$clustername-server-0" "env" | grep -q "bar=baz,bob" || failed "Expected env var 'bar=baz,bob' is not present in node k3d-$clustername-server-0"
## Container Labels
info "Ensuring that container labels have been set as stated in the config"
docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed_with_logfile "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0" "$LOG_FILE"
docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0"
## K3s Node Labels
info "Ensuring that k3s node labels have been set as stated in the config"
k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed_with_logfile "Expected label 'foo=bar' not present on node k3d-$clustername-server-0" "$LOG_FILE"
k3s_assert_node_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on node k3d-$clustername-server-0"
## Registry Node
registryname="registry.localhost"
info "Ensuring, that we have a registry node present"
$EXE node list "$registryname" || failed_with_logfile "Expected registry node $registryname to be present" "$LOG_FILE"
$EXE node list "$registryname" || failed "Expected registry node $registryname to be present"
## merged registries.yaml
info "Ensuring, that the registries.yaml file contains both registries"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed_with_logfile "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml" "$LOG_FILE"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "$registryname" || failed_with_logfile "Expected '$registryname' to be in the /etc/rancher/k3s/registries.yaml" "$LOG_FILE"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml"
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -qi "$registryname" || failed "Expected '$registryname' to be in the /etc/rancher/k3s/registries.yaml"
# Cleanup
info "Deleting cluster $clustername (using config file)..."
$EXE cluster delete --config "$configfile" || failed_with_logfile "could not delete the cluster $clustername" "$LOG_FILE"
$EXE cluster delete --config "$configfile" || failed "could not delete the cluster $clustername"
rm "$configfile"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | config-file-migration"
@ -17,8 +25,8 @@ export CURRENT_STAGE="Test | config-file-migration"
highlight "[START] ConfigMigrateTest"
tempdir=$(mktemp -d)
$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha2.yaml" "$tempdir/expected.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple.yaml"
$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha3.yaml" "$tempdir/actual.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple_migrate.yaml"
$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha2.yaml" "$tempdir/expected.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple_migration_v1alpha2.yaml"
$EXE config migrate "$CURR_DIR/assets/config_test_simple_migration_v1alpha3.yaml" "$tempdir/actual.yaml" || failed "failed on $CURR_DIR/assets/config_test_simple_migration_v1alpha3.yaml"
diff "$tempdir/actual.yaml" "$tempdir/expected.yaml" || failed "config migration failed" && passed "config migration succeeded"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"
@ -26,7 +34,7 @@ clustername="lifecycletest"
highlight "[START] Lifecycletest $EXTRA_TITLE"
info "Creating cluster $clustername..."
$EXE cluster create "$clustername" --agents 1 --api-port 6443 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
$EXE cluster create "$clustername" --agents 1 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 10

@ -6,19 +6,27 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | IPAM"
highlight "[START] IPAM $EXTRA_TITLE"
clustername="ipamtest"
subnet="172.45.0.0/16"
expectedIPGateway="172.45.0.1" # k3d defaults to subnet_start+1 for the Gateway IP
expectedIPLabelServer0="172.45.0.3"
subnet="172.80.0.0/16"
expectedIPGateway="172.80.0.1" # k3d defaults to subnet_start+1 for the Gateway IP
expectedIPLabelServer0="172.80.0.3"
expectedIPServer0="$expectedIPLabelServer0/16" # k3d excludes the subnet_start (x.x.x.0) and then uses IPs in sequential order, but .2 will be used by the tools container that gathers information at start
expectedIPServerLB="172.45.0.4/16"
expectedIPServerLB="172.80.0.4/16"
info "Creating cluster $clustername..."
$EXE cluster create $clustername --timeout 360s --subnet $subnet || failed "could not create cluster $clustername"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | Loadbalancer"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | MemoryLimits"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"
: "${EXTRA_TITLE:=""}"
@ -22,7 +30,7 @@ export CURRENT_STAGE="Test | multi-server | $K3S_IMAGE_TAG"
clustername="multiserver"
info "Creating cluster $clustername $EXTRA_TITLE ..."
$EXE cluster create "$clustername" --servers 3 --api-port 6443 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
$EXE cluster create "$clustername" --servers 3 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster $EXTRA_TITLE"

@ -8,8 +8,16 @@ KNOWN_TO_FAIL=("v1.17.17-k3s1" "v1.18.15-k3s1") # some versions of k3s don't wor
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"
: "${EXTRA_TITLE:=""}"
@ -31,7 +39,7 @@ fi
clustername="multiserverstartstop"
info "Creating cluster $clustername $EXTRA_TITLE ..."
$EXE cluster create "$clustername" --servers 3 --api-port 6443 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
$EXE cluster create "$clustername" --servers 3 --wait --timeout 360s $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Checking that we have access to the cluster..."
check_clusters "$clustername" || failed "error checking cluster $EXTRA_TITLE"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
export CURRENT_STAGE="Test | NodeEdit"

@ -6,8 +6,16 @@ CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
# shellcheck source=./common.sh
source "$CURR_DIR/common.sh"
### Step Setup ###
# Redirect all stdout/stderr output to logfile
LOG_FILE="$TEST_OUTPUT_DIR/$( basename "${BASH_SOURCE[0]}" ).log"
exec >${LOG_FILE} 2>&1
export LOG_FILE
# use a kubeconfig file specific to this test
KUBECONFIG="$KUBECONFIG_ROOT/$( basename "${BASH_SOURCE[0]}" ).yaml"
export KUBECONFIG
### Step Setup ###
: "${EXTRA_FLAG:=""}"
: "${EXTRA_TITLE:=""}"
@ -26,7 +34,7 @@ registryname="$clustername-registry"
highlight "[START] RegistryTest $EXTRA_TITLE"
info "Creating cluster $clustername..."
$EXE cluster create "$clustername" --agents 1 --api-port 6443 --wait --timeout 360s --registry-create "$registryname" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
$EXE cluster create "$clustername" --agents 1 --wait --timeout 360s --registry-create "$registryname" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
sleep 5

Loading…
Cancel
Save