[NEW VERSION v4] Merge pull request #447 from rancher/main-v4

pull/453/head
Thorsten Klein 4 years ago committed by GitHub
commit 5092a90d56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 10
      .drone.yml
  2. 69
      CHANGELOG.md
  3. 5
      Dockerfile
  4. 19
      Makefile
  5. 16
      README.md
  6. 457
      cmd/cluster/clusterCreate.go
  7. 21
      cmd/cluster/clusterDelete.go
  8. 8
      cmd/cluster/clusterList.go
  9. 16
      cmd/cluster/clusterStart.go
  10. 14
      cmd/cluster/clusterStop.go
  11. 46
      cmd/config/config.go
  12. 74
      cmd/config/configInit.go
  13. 44
      cmd/config/configView.go
  14. 8
      cmd/image/imageImport.go
  15. 16
      cmd/kubeconfig/kubeconfigGet.go
  16. 24
      cmd/kubeconfig/kubeconfigMerge.go
  17. 10
      cmd/node/nodeCreate.go
  18. 17
      cmd/node/nodeDelete.go
  19. 91
      cmd/node/nodeList.go
  20. 6
      cmd/node/nodeStart.go
  21. 6
      cmd/node/nodeStop.go
  22. 58
      cmd/registry/registry.go
  23. 29
      cmd/registry/registryConnect.go
  24. 117
      cmd/registry/registryCreate.go
  25. 97
      cmd/registry/registryDelete.go
  26. 112
      cmd/registry/registryList.go
  27. 29
      cmd/registry/registryStart.go
  28. 29
      cmd/registry/registryStop.go
  29. 22
      cmd/root.go
  30. 35
      cmd/util/completion.go
  31. 151
      cmd/util/filter.go
  32. 89
      cmd/util/listings.go
  33. 2
      cmd/util/plugins.go
  34. 87
      cmd/util/ports.go
  35. 2
      cmd/util/volumes.go
  36. 2
      docs/faq/v1vsv3-comparison.md
  37. 14
      docs/index.md
  38. 17
      docs/internals/defaults.md
  39. 2
      docs/internals/networking.md
  40. 126
      docs/usage/commands.md
  41. 40
      docs/usage/guides/cuda.md
  42. 2
      docs/usage/guides/cuda/build.sh
  43. 5
      docs/usage/guides/exposing_services.md
  44. 31
      docs/usage/guides/registries.md
  45. 20
      docs/usage/kubeconfig.md
  46. 57
      go.mod
  47. 230
      go.sum
  48. 2
      main.go
  49. 39
      pkg/actions/nodehooks.go
  50. 591
      pkg/client/cluster.go
  51. 4
      pkg/client/clusterName.go
  52. 8
      pkg/client/host.go
  53. 6
      pkg/client/kubeconfig.go
  54. 6
      pkg/client/loadbalancer.go
  55. 112
      pkg/client/node.go
  56. 306
      pkg/client/registry.go
  57. 66
      pkg/client/registry_test.go
  58. 1
      pkg/client/test.json
  59. 21
      pkg/client/test.yaml
  60. 83
      pkg/config/config.go
  61. 201
      pkg/config/config_test.go
  62. 42
      pkg/config/merge.go
  63. 59
      pkg/config/merge_test.go
  64. 6
      pkg/config/test_assets/config_test_cluster.yaml
  65. 12
      pkg/config/test_assets/config_test_cluster_list.yaml
  66. 43
      pkg/config/test_assets/config_test_simple.yaml
  67. 4
      pkg/config/test_assets/config_test_simple_2.yaml
  68. 3
      pkg/config/test_assets/config_test_unknown.yaml
  69. 271
      pkg/config/transform.go
  70. 55
      pkg/config/transform_test.go
  71. 167
      pkg/config/v1alpha1/types.go
  72. 84
      pkg/config/validate.go
  73. 44
      pkg/config/validate_test.go
  74. 2
      pkg/runtimes/containerd/kubeconfig.go
  75. 16
      pkg/runtimes/containerd/network.go
  76. 7
      pkg/runtimes/containerd/node.go
  77. 7
      pkg/runtimes/containerd/util.go
  78. 24
      pkg/runtimes/docker/container.go
  79. 2
      pkg/runtimes/docker/kubeconfig.go
  80. 66
      pkg/runtimes/docker/network.go
  81. 62
      pkg/runtimes/docker/node.go
  82. 42
      pkg/runtimes/docker/translate.go
  83. 13
      pkg/runtimes/docker/translate_test.go
  84. 49
      pkg/runtimes/docker/util.go
  85. 7
      pkg/runtimes/docker/volume.go
  86. 30
      pkg/runtimes/errors/errors.go
  87. 16
      pkg/runtimes/runtime.go
  88. 10
      pkg/tools/tools.go
  89. 81
      pkg/types/k3s/registry.go
  90. 94
      pkg/types/k8s/registry.go
  91. 186
      pkg/types/types.go
  92. 190
      pkg/util/filter.go
  93. 39
      pkg/util/labels.go
  94. 47
      pkg/util/ports.go
  95. 60
      pkg/util/registry.go
  96. 92
      pkg/util/volumes.go
  97. 8
      proxy/Dockerfile
  98. 46
      tests/assets/config_test_simple.yaml
  99. 16
      tests/common.sh
  100. 16
      tests/dind.sh
  101. Some files were not shown because too many files have changed in this diff Show More

@ -14,7 +14,7 @@ platform:
steps:
- name: lint
image: golang:1.14
image: golang:1.15
commands:
- make ci-setup
- make check-fmt lint
@ -40,7 +40,7 @@ steps:
- tag
- name: build
image: golang:1.14
image: golang:1.15
environment:
GIT_TAG: "${DRONE_TAG}"
commands:
@ -119,6 +119,8 @@ steps:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- GIT_TAG_OVERRIDE=${DRONE_TAG}
depends_on:
- lint
- test
@ -135,12 +137,14 @@ steps:
- latest
- "${DRONE_TAG}"
dockerfile: Dockerfile
target: dind
target: binary-only
context: .
username:
from_secret: docker_username
password:
from_secret: docker_password
build_args:
- GIT_TAG_OVERRIDE=${DRONE_TAG}
depends_on:
- lint
- test

@ -0,0 +1,69 @@
# Changelog
## v4.0.0
### Breaking Changes
#### Module
**If you're using k3d as a Go module, please have a look into the code to see all the changes!**
- We're open for chats via Slack or GitHub discussions
- Module is now on `github.com/rancher/k3d/v4` due to lots of breaking changes
- `pkg/cluster` is now `pkg/client`
- `ClusterCreate` and `NodeCreate` don't start the entities (containers) anymore
- `ClusterRun` and `NodeRun` orchestrate the new Create and Start functionality
- New config flow: CLIConfig (SimpleConfig) -> ClusterConfig -> Cluster + Opts
#### CLI
- Some flags changed to also use `noun-action` syntax
- e.g. `--switch-context --update-default-kubeconfig` -> `--kubeconfig-switch-context --kubeconfig-update-default`
- this eases grouping and visibility
### Changes
#### Features
- Registry Support
- k3d-managed registry like we had it in k3d v1.x
- Option 1: default settings, paired with cluster creation
- `k3d cluster create --registry-create` -> New registry for that cluster
- `k3d cluster create --registry-use` -> Re-use existing registry
- Option 2: customized, managed stand-alone
- `k3d registry [create/start/stop/delete]`
- Check the documentation, help text and tutorials for more details
- Communicate managed registry using the LocalRegistryHostingV1 spec from [KEP-1755](https://github.com/kubernetes/enhancements/blob/0d69f7cea6fbe73a7d70fab569c6898f5ccb7be0/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry/README.md)
- interesting especially for tools that reload images, like Tilt or Skaffold
- Config File Support
- Put all your CLI-Arguments/Flags into a more readable config file and re-use it everywhere (keep it in your repo)
- Note: this is not always a 1:1 matching in naming/syntax/semantics
- `k3d cluster create --config myconfig.yaml`
```yaml
apiVersion: k3d.io/v1alpha1
kind: Simple
name: mycluster
servers: 3
agents: 2
ports:
- port: 8080:80
nodeFilters:
- loadbalancer
```
- Check out our test cases in [pkg/config/test_assets/](./pkg/config/test_assets/) for more config file examples
- [WIP] Support for Lifecycle Hooks
- Run any executable at specific stages during the cluster and node lifecycles
- e.g. we modify the `registries.yaml` in the `preStart` stage of nodes
- Guides will follow
#### Misc
- Now building with Go 1.15
- same for the k3d-tools code
- updated dependencies (including Docker v20.10)
- tests/e2e: add E2E_INCLUDE and rename E2E_SKIP to E2E_EXCLUDE

@ -1,7 +1,8 @@
FROM golang:1.14 as builder
FROM golang:1.15 as builder
ARG GIT_TAG_OVERRIDE
WORKDIR /app
COPY . .
RUN make build && bin/k3d version
RUN make build -e GIT_TAG_OVERRIDE=${GIT_TAG_OVERRIDE} && bin/k3d version
FROM docker:19.03-dind as dind
RUN apk update && apk add bash curl sudo jq git make netcat-openbsd

@ -16,6 +16,11 @@ export GO111MODULE=on
########## Tags ##########
# get git tag
ifneq ($(GIT_TAG_OVERRIDE),)
$(info GIT_TAG set from env override!)
GIT_TAG := $(GIT_TAG_OVERRIDE)
endif
GIT_TAG ?= $(shell git describe --tags)
ifeq ($(GIT_TAG),)
GIT_TAG := $(shell git describe --always)
@ -41,9 +46,11 @@ REC_DIRS := cmd
########## Test Settings ##########
E2E_LOG_LEVEL ?= WARN
E2E_SKIP ?=
E2E_INCLUDE ?=
E2E_EXCLUDE ?=
E2E_EXTRA ?=
E2E_RUNNER_START_TIMEOUT ?= 10
E2E_HELPER_IMAGE_TAG ?=
########## Go Build Options ##########
# Build targets
@ -57,7 +64,7 @@ PKG := $(shell go mod vendor)
TAGS :=
TESTS := ./...
TESTFLAGS :=
LDFLAGS := -w -s -X github.com/rancher/k3d/v3/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v3/version.K3sVersion=${K3S_TAG}
LDFLAGS := -w -s -X github.com/rancher/k3d/v4/version.Version=${GIT_TAG} -X github.com/rancher/k3d/v4/version.K3sVersion=${K3S_TAG}
GCFLAGS :=
GOFLAGS :=
BINDIR := $(CURDIR)/bin
@ -66,7 +73,7 @@ BINARIES := k3d
# Set version of the k3d helper images for build
ifneq ($(K3D_HELPER_VERSION),)
$(info [INFO] Helper Image version set to ${K3D_HELPER_VERSION})
LDFLAGS += -X github.com/rancher/k3d/v3/version.HelperVersionOverride=${K3D_HELPER_VERSION}
LDFLAGS += -X github.com/rancher/k3d/v4/version.HelperVersionOverride=${K3D_HELPER_VERSION}
endif
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
@ -76,7 +83,7 @@ GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
########## Required Tools ##########
# Go Package required
PKG_GOX := github.com/mitchellh/gox@v1.0.1
PKG_GOLANGCI_LINT_VERSION := 1.28.3
PKG_GOLANGCI_LINT_VERSION := 1.31.0
PKG_GOLANGCI_LINT_SCRIPT := https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint@v${PKG_GOLANGCI_LINT_VERSION}
@ -95,7 +102,7 @@ LINT_DIRS := $(DIRS) $(foreach dir,$(REC_DIRS),$(dir)/...)
.PHONY: all build build-cross clean fmt check-fmt lint check extra-clean install-tools
all: clean fmt check build
all: clean fmt check test build
############################
########## Builds ##########
@ -163,7 +170,7 @@ test:
e2e: build-docker-dind
@echo "Running e2e tests in k3d:$(K3D_IMAGE_TAG)"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_SKIP="$(E2E_SKIP)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) tests/dind.sh "${K3D_IMAGE_TAG}-dind"
LOG_LEVEL="$(E2E_LOG_LEVEL)" E2E_INCLUDE="$(E2E_INCLUDE)" E2E_EXCLUDE="$(E2E_EXCLUDE)" E2E_EXTRA="$(E2E_EXTRA)" E2E_RUNNER_START_TIMEOUT=$(E2E_RUNNER_START_TIMEOUT) E2E_HELPER_IMAGE_TAG="$(E2E_HELPER_IMAGE_TAG)" tests/dind.sh "${K3D_IMAGE_TAG}-dind"
ci-tests: fmt check e2e

@ -4,7 +4,7 @@
[![License](https://img.shields.io/github/license/rancher/k3d?style=flat-square)](./LICENSE.md)
![Downloads](https://img.shields.io/github/downloads/rancher/k3d/total.svg?style=flat-square)
[![Go Module](https://img.shields.io/badge/Go%20Module-github.com%2Francher%2Fk3d%2Fv3-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/rancher/k3d/v3)
[![Go Module](https://img.shields.io/badge/Go%20Module-github.com%2Francher%2Fk3d%2Fv4-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/rancher/k3d/v4)
[![Go version](https://img.shields.io/github/go-mod/go-version/rancher/k3d?logo=go&logoColor=white&style=flat-square)](./go.mod)
[![Go Report Card](https://goreportcard.com/badge/github.com/rancher/k3d?style=flat-square)](https://goreportcard.com/report/github.com/rancher/k3d)
@ -13,7 +13,7 @@
<!-- ALL-CONTRIBUTORS-BADGE:END -->
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg)](code_of_conduct.md)
**Please Note:** `main` is now v3.0.0 and the code for v1.x can be found in the `main-v1` branch!
**Please Note:** `main` is now v4.0.0 and the code for v3.x can be found in the `main-v3` branch!
## [k3s in docker](https://k3d.io)
@ -36,6 +36,7 @@ k3d creates containerized k3s clusters. This means, that you can spin up a multi
## Releases
**Note**: In May 2020 we upgraded from v1.7.x to **v3.0.0** after a complete rewrite of k3d!
**Note**: In January 2021 we upgraded from v3.x.x to **v4.0.0** which includes some breaking changes!
| Platform | Stage | Version | Release Date | |
|-----------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|---|
@ -52,8 +53,8 @@ You have several options there:
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- use the install script to grab a specific release (via `TAG` environment variable):
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- wget: `wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- curl: `curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- use [Homebrew](https://brew.sh): `brew install k3d` (Homebrew is available for MacOS and Linux)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
@ -67,7 +68,7 @@ or...
## Build
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v3@main`
1. Clone this repo, e.g. via `git clone git@github.com:rancher/k3d.git` or `go get github.com/rancher/k3d/v4@main`
2. Inside the repo run
- 'make install-tools' to make sure required go packages are installed
3. Inside the repo run one of the following commands
@ -82,7 +83,7 @@ Check out what you can do via `k3d help` or check the docs @ [k3d.io](https://k3
Example Workflow: Create a new cluster and use it with `kubectl`
1. `k3d cluster create CLUSTER_NAME` to create a new single-node cluster (= 1 container running k3s + 1 loadbalancer container)
2. `k3d kubeconfig merge CLUSTER_NAME --switch-context` to update your default kubeconfig and switch the current-context to the new one
2. [Optional, included in cluster create] `k3d kubeconfig merge CLUSTER_NAME --kubeconfig-switch-context` to update your default kubeconfig and switch the current-context to the new one
3. execute some commands like `kubectl get pods --all-namespaces`
4. `k3d cluster delete CLUSTER_NAME` to delete the default cluster
@ -98,7 +99,8 @@ This repository is based on [@zeerorg](https://github.com/zeerorg/)'s [zeerorg/k
## Related Projects
- [k3x](https://github.com/inercia/k3x): a graphics interface (for Linux) to k3d.
- [k3x](https://github.com/inercia/k3x): GUI (Linux) to k3d
- [vscode-k3d](https://github.com/inercia/vscode-k3d): vscode plugin for k3d
## Contributing

@ -30,12 +30,13 @@ import (
"github.com/spf13/cobra"
cliutil "github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
k3dCluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
cliutil "github.com/rancher/k3d/v4/cmd/util"
k3dCluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/config"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)
@ -48,12 +49,22 @@ Every cluster will consist of one or more containers:
- (optionally) 1 (or more) agent node containers (k3s)
`
// flags that go through some pre-processing before transforming them to config
type preProcessedFlags struct {
APIPort string
Volumes []string
Ports []string
Labels []string
Env []string
RegistryUse []string
}
// NewCmdClusterCreate returns a new cobra command
func NewCmdClusterCreate() *cobra.Command {
createClusterOpts := &k3d.ClusterCreateOpts{}
var noRollback bool
var updateDefaultKubeconfig, updateCurrentContext bool
cliConfig := &conf.SimpleConfig{}
var configFile string
ppFlags := &preProcessedFlags{}
// create new command
cmd := &cobra.Command{
@ -62,55 +73,102 @@ func NewCmdClusterCreate() *cobra.Command {
Long: clusterCreateDescription,
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
Run: func(cmd *cobra.Command, args []string) {
// parse args and flags
cluster := parseCreateClusterCmd(cmd, args, createClusterOpts)
// check if a cluster with that name exists already
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, cluster); err == nil {
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", cluster.Name)
/*********************
* CLI Configuration *
*********************/
parseCreateClusterCmd(cmd, args, cliConfig, ppFlags)
/************************
* Merge Configurations *
************************/
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", cliConfig)
if configFile != "" {
configFromFile, err := config.ReadConfig(configFile)
if err != nil {
log.Fatalln(err)
}
cliConfig, err = config.MergeSimple(*cliConfig, configFromFile.(conf.SimpleConfig))
if err != nil {
log.Fatalln(err)
}
}
if !updateDefaultKubeconfig && updateCurrentContext {
log.Infoln("--update-default-kubeconfig=false --> sets --switch-context=false")
updateCurrentContext = false
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cliConfig)
/**************************************
* Transform & Validate Configuration *
**************************************/
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *cliConfig)
if err != nil {
log.Fatalln(err)
}
log.Debugf("===== Cluster Config =====\n%+v\n===== ===== =====\n", clusterConfig)
if err := config.ValidateClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *clusterConfig); err != nil {
log.Fatalln("Failed Cluster Configuration Validation: ", err)
}
/**************************************
* Create cluster if it doesn't exist *
**************************************/
// check if a cluster with that name exists already
if _, err := k3dCluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err == nil {
log.Fatalf("Failed to create cluster '%s' because a cluster with that name already exists", clusterConfig.Cluster.Name)
}
// create cluster
if updateDefaultKubeconfig {
log.Debugln("'--update-default-kubeconfig set: enabling wait-for-server")
cluster.CreateClusterOpts.WaitForServer = true
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
log.Debugln("'--kubeconfig-update-default set: enabling wait-for-server")
clusterConfig.ClusterCreateOpts.WaitForServer = true
}
if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
//if err := k3dCluster.ClusterCreate(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
// rollback if creation failed
log.Errorln(err)
if noRollback {
if cliConfig.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
}
// rollback if creation failed
log.Errorln("Failed to create cluster >>> Rolling Back")
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, cluster); err != nil {
if err := k3dCluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster); err != nil {
log.Errorln(err)
log.Fatalln("Cluster creation FAILED, also FAILED to rollback changes!")
}
log.Fatalln("Cluster creation FAILED, all changes have been rolled back!")
}
log.Infof("Cluster '%s' created successfully!", cluster.Name)
log.Infof("Cluster '%s' created successfully!", clusterConfig.Cluster.Name)
/**************
* Kubeconfig *
**************/
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && clusterConfig.KubeconfigOpts.SwitchCurrentContext {
log.Infoln("--kubeconfig-update-default=false --> sets --kubeconfig-switch-context=false")
clusterConfig.KubeconfigOpts.SwitchCurrentContext = false
}
if updateDefaultKubeconfig {
log.Debugf("Updating default kubeconfig with a new context for cluster %s", cluster.Name)
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: updateCurrentContext}); err != nil {
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: cliConfig.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
log.Warningln(err)
}
}
/*****************
* User Feedback *
*****************/
// print information on how to use the cluster with kubectl
log.Infoln("You can now use it like this:")
if updateDefaultKubeconfig && !updateCurrentContext {
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, cluster.Name))
} else if !updateCurrentContext {
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig && !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
fmt.Printf("kubectl config use-context %s\n", fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, clusterConfig.Cluster.Name))
} else if !clusterConfig.KubeconfigOpts.SwitchCurrentContext {
if runtime.GOOS == "windows" {
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig write %s)\n", os.Args[0], cluster.Name)
fmt.Printf("$env:KUBECONFIG=(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
} else {
fmt.Printf("export KUBECONFIG=$(%s kubeconfig write %s)\n", os.Args[0], cluster.Name)
fmt.Printf("export KUBECONFIG=$(%s kubeconfig write %s)\n", os.Args[0], clusterConfig.Cluster.Name)
}
}
fmt.Println("kubectl cluster-info")
@ -120,27 +178,37 @@ func NewCmdClusterCreate() *cobra.Command {
/*********
* Flags *
*********/
cmd.Flags().String("api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
cmd.Flags().IntP("servers", "s", 1, "Specify how many servers you want to create")
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
cmd.Flags().StringP("image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
cmd.Flags().String("network", "", "Join an existing network")
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v \"/my/path@agent[0,1]\" -v \"/tmp/test:/tmp/other@server[0]\"`")
cmd.Flags().StringArrayP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p \"8080:80@agent[0]\" -p \"8081@agent[1]\"`")
cmd.Flags().StringArrayP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -l \"other.label=somevalue@server[0]\"`")
cmd.Flags().BoolVar(&createClusterOpts.WaitForServer, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().DurationVar(&createClusterOpts.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
cmd.Flags().BoolVar(&updateDefaultKubeconfig, "update-default-kubeconfig", true, "Directly update the default kubeconfig with the new cluster's context")
cmd.Flags().BoolVar(&updateCurrentContext, "switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --update-default-kubeconfig)")
cmd.Flags().BoolVar(&createClusterOpts.DisableLoadBalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
cmd.Flags().BoolVar(&noRollback, "no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
cmd.Flags().BoolVar(&createClusterOpts.PrepDisableHostIPInjection, "no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
cmd.Flags().StringVar(&createClusterOpts.GPURequest, "gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
cmd.Flags().StringArrayP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com\" -e \"SOME_KEY=SOME_VAL@server[0]\"`")
cmd.Flags().StringVar(&ppFlags.APIPort, "api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
cmd.Flags().IntVarP(&cliConfig.Servers, "servers", "s", 1, "Specify how many servers you want to create")
cmd.Flags().IntVarP(&cliConfig.Agents, "agents", "a", 0, "Specify how many agents you want to create")
cmd.Flags().StringVarP(&cliConfig.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
cmd.Flags().StringVar(&cliConfig.Network, "network", "", "Join an existing network")
cmd.Flags().StringVar(&cliConfig.ClusterToken, "token", "", "Specify a cluster token. By default, we generate one.")
cmd.Flags().StringArrayVarP(&ppFlags.Volumes, "volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
cmd.Flags().StringArrayVarP(&ppFlags.Ports, "port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
cmd.Flags().StringArrayVarP(&ppFlags.Labels, "label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -v \"other.label=somevalue@server[0]\"`")
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.Wait, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
cmd.Flags().DurationVar(&cliConfig.Options.K3dOptions.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
cmd.Flags().BoolVar(&cliConfig.Options.KubeconfigOptions.UpdateDefaultKubeconfig, "kubeconfig-update-default", true, "Directly update the default kubeconfig with the new cluster's context")
cmd.Flags().BoolVar(&cliConfig.Options.KubeconfigOptions.SwitchCurrentContext, "kubeconfig-switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default)")
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.DisableLoadbalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.NoRollback, "no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.PrepDisableHostIPInjection, "no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
cmd.Flags().StringVar(&cliConfig.Options.Runtime.GPURequest, "gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
cmd.Flags().StringArrayVarP(&ppFlags.Env, "env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com\" -e \"SOME_KEY=SOME_VAL@server[0]\"`")
/* Image Importing */
cmd.Flags().BoolVar(&createClusterOpts.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
/* Config File */
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
if err := cobra.MarkFlagFilename(cmd.Flags(), "config", "yaml", "yml"); err != nil {
log.Fatalln("Failed to mark flag 'config' as filename flag")
}
/* Registry */
cmd.Flags().StringArrayVar(&cliConfig.Registries.Use, "registry-use", nil, "Connect to one or more k3d-managed registries running locally")
cmd.Flags().BoolVar(&cliConfig.Registries.Create, "registry-create", false, "Create a k3d-managed registry and connect it to the cluster")
/* Multi Server Configuration */
@ -157,8 +225,8 @@ func NewCmdClusterCreate() *cobra.Command {
*/
/* k3s */
cmd.Flags().StringArrayVar(&createClusterOpts.K3sServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&createClusterOpts.K3sAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&cliConfig.Options.K3sOptions.ExtraServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
cmd.Flags().StringArrayVar(&cliConfig.Options.K3sOptions.ExtraAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
/* Subcommands */
@ -167,103 +235,41 @@ func NewCmdClusterCreate() *cobra.Command {
}
// parseCreateClusterCmd parses the command input into variables required to create a cluster
func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts *k3d.ClusterCreateOpts) *k3d.Cluster {
func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.SimpleConfig, ppFlags *preProcessedFlags) {
/********************************
* Parse and validate arguments *
********************************/
clustername := k3d.DefaultClusterName
if len(args) != 0 {
clustername = args[0]
}
if err := cluster.CheckName(clustername); err != nil {
log.Fatal(err)
cliConfig.Name = args[0]
}
/****************************
* Parse and validate flags *
****************************/
// --image
image, err := cmd.Flags().GetString("image")
if err != nil {
log.Errorln("No image specified")
log.Fatalln(err)
}
if image == "latest" {
image = version.GetK3sVersion(true)
}
// --servers
serverCount, err := cmd.Flags().GetInt("servers")
if err != nil {
log.Fatalln(err)
}
// --agents
agentCount, err := cmd.Flags().GetInt("agents")
if err != nil {
log.Fatalln(err)
}
// --network
networkName, err := cmd.Flags().GetString("network")
if err != nil {
log.Fatalln(err)
}
network := k3d.ClusterNetwork{}
if networkName != "" {
network.Name = networkName
network.External = true
}
if networkName == "host" && (serverCount+agentCount) > 1 {
log.Fatalln("Can only run a single node in hostnetwork mode")
}
// --token
token, err := cmd.Flags().GetString("token")
if err != nil {
log.Fatalln(err)
}
// --timeout
if cmd.Flags().Changed("timeout") && createClusterOpts.Timeout <= 0*time.Second {
// -> WAIT TIMEOUT // TODO: timeout to be validated in pkg/
if cmd.Flags().Changed("timeout") && cliConfig.Options.K3dOptions.Timeout <= 0*time.Second {
log.Fatalln("--timeout DURATION must be >= 1s")
}
// --api-port
apiPort, err := cmd.Flags().GetString("api-port")
if err != nil {
log.Fatalln(err)
}
// -> API-PORT
// parse the port mapping
exposeAPI, err := cliutil.ParseAPIPort(apiPort)
exposeAPI, err := cliutil.ParsePortExposureSpec(ppFlags.APIPort, k3d.DefaultAPIPort)
if err != nil {
log.Fatalln(err)
}
if exposeAPI.Host == "" {
exposeAPI.Host = k3d.DefaultAPIHost
}
if exposeAPI.HostIP == "" {
exposeAPI.HostIP = k3d.DefaultAPIHost
}
if networkName == "host" {
// in hostNetwork mode, we're not going to map a hostport. Here it should always use 6443.
// Note that hostNetwork mode is super inflexible and since we don't change the backend port (on the container), it will only be one hostmode cluster allowed.
exposeAPI.Port = k3d.DefaultAPIPort
}
// --volume
volumeFlags, err := cmd.Flags().GetStringArray("volume")
if err != nil {
log.Fatalln(err)
cliConfig.ExposeAPI = conf.SimpleExposureOpts{
Host: exposeAPI.Host,
HostIP: exposeAPI.Binding.HostIP,
HostPort: exposeAPI.Binding.HostPort,
}
// -> VOLUMES
// volumeFilterMap will map volume mounts to applied node filters
volumeFilterMap := make(map[string][]string, 1)
for _, volumeFlag := range volumeFlags {
for _, volumeFlag := range ppFlags.Volumes {
// split node filter from the specified volume
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
@ -271,12 +277,6 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
log.Fatalln(err)
}
// validate the specified volume mount and return it in SRC:DEST format
volume, err = cliutil.ValidateVolumeMount(runtimes.SelectedRuntime, volume)
if err != nil {
log.Fatalln(err)
}
// create new entry or append filter to existing entry
if _, exists := volumeFilterMap[volume]; exists {
volumeFilterMap[volume] = append(volumeFilterMap[volume], filters...)
@ -285,13 +285,18 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
}
// --port
portFlags, err := cmd.Flags().GetStringArray("port")
if err != nil {
log.Fatalln(err)
for volume, nodeFilters := range volumeFilterMap {
cliConfig.Volumes = append(cliConfig.Volumes, conf.VolumeWithNodeFilters{
Volume: volume,
NodeFilters: nodeFilters,
})
}
log.Tracef("VolumeFilterMap: %+v", volumeFilterMap)
// -> PORTS
portFilterMap := make(map[string][]string, 1)
for _, portFlag := range portFlags {
for _, portFlag := range ppFlags.Ports {
// split node filter from the specified volume
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
if err != nil {
@ -302,14 +307,6 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
log.Fatalln("Can only apply a Portmap to one node")
}
// the same portmapping can't be applied to multiple nodes
// validate the specified volume mount and return it in SRC:DEST format
portmap, err = cliutil.ValidatePortMap(portmap)
if err != nil {
log.Fatalln(err)
}
// create new entry or append filter to existing entry
if _, exists := portFilterMap[portmap]; exists {
log.Fatalln("Same Portmapping can not be used for multiple nodes")
@ -318,43 +315,47 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
}
for port, nodeFilters := range portFilterMap {
cliConfig.Ports = append(cliConfig.Ports, conf.PortWithNodeFilters{
Port: port,
NodeFilters: nodeFilters,
})
}
log.Tracef("PortFilterMap: %+v", portFilterMap)
// --label
labelFlags, err := cmd.Flags().GetStringArray("label")
if err != nil {
log.Fatalln(err)
}
// labelFilterMap will add container label to applied node filters
labelFilterMap := make(map[string][]string, 1)
for _, labelFlag := range labelFlags {
for _, labelFlag := range ppFlags.Labels {
// split node filter from the specified label
label, filters, err := cliutil.SplitFiltersFromFlag(labelFlag)
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
if err != nil {
log.Fatalln(err)
}
// create new entry or append filter to existing entry
if _, exists := labelFilterMap[label]; exists {
labelFilterMap[label] = append(labelFilterMap[label], filters...)
labelFilterMap[label] = append(labelFilterMap[label], nodeFilters...)
} else {
labelFilterMap[label] = filters
labelFilterMap[label] = nodeFilters
}
}
for label, nodeFilters := range labelFilterMap {
cliConfig.Labels = append(cliConfig.Labels, conf.LabelWithNodeFilters{
Label: label,
NodeFilters: nodeFilters,
})
}
log.Tracef("LabelFilterMap: %+v", labelFilterMap)
// --env
envFlags, err := cmd.Flags().GetStringArray("env")
if err != nil {
log.Fatalln(err)
}
// envFilterMap will add container env vars to applied node filters
envFilterMap := make(map[string][]string, 1)
for _, envFlag := range envFlags {
for _, envFlag := range ppFlags.Env {
// split node filter from the specified env var
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
@ -370,134 +371,12 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, createClusterOpts
}
}
log.Tracef("EnvFilterMap: %+v", envFilterMap)
/********************
* *
* generate cluster *
* *
********************/
cluster := &k3d.Cluster{
Name: clustername,
Network: network,
Token: token,
CreateClusterOpts: createClusterOpts,
ExposeAPI: exposeAPI,
}
// generate list of nodes
cluster.Nodes = []*k3d.Node{}
// ServerLoadBalancer
if !createClusterOpts.DisableLoadBalancer {
cluster.ServerLoadBalancer = &k3d.Node{
Role: k3d.LoadBalancerRole,
}
}
/****************
* Server Nodes *
****************/
for i := 0; i < serverCount; i++ {
node := k3d.Node{
Role: k3d.ServerRole,
Image: image,
Args: createClusterOpts.K3sServerArgs,
ServerOpts: k3d.ServerOpts{},
}
// TODO: by default, we don't expose an API port: should we change that?
// -> if we want to change that, simply add the exposeAPI struct here
// first server node will be init node if we have more than one server specified but no external datastore
if i == 0 && serverCount > 1 {
node.ServerOpts.IsInit = true
cluster.InitNode = &node
}
// append node to list
cluster.Nodes = append(cluster.Nodes, &node)
}
/****************
* Agent Nodes *
****************/
for i := 0; i < agentCount; i++ {
node := k3d.Node{
Role: k3d.AgentRole,
Image: image,
Args: createClusterOpts.K3sAgentArgs,
}
cluster.Nodes = append(cluster.Nodes, &node)
}
// append volumes
for volume, filters := range volumeFilterMap {
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
if err != nil {
log.Fatalln(err)
}
for _, node := range nodes {
node.Volumes = append(node.Volumes, volume)
}
}
// append ports
nodeCount := serverCount + agentCount
nodeList := cluster.Nodes
if !createClusterOpts.DisableLoadBalancer {
nodeCount++
nodeList = append(nodeList, cluster.ServerLoadBalancer)
}
for portmap, filters := range portFilterMap {
if len(filters) == 0 && (nodeCount) > 1 {
log.Fatalf("Malformed portmapping '%s' lacks a node filter, but there is more than one node (including the loadbalancer, if there is any).", portmap)
}
nodes, err := cliutil.FilterNodes(nodeList, filters)
if err != nil {
log.Fatalln(err)
}
for _, node := range nodes {
node.Ports = append(node.Ports, portmap)
}
}
// append labels
for label, filters := range labelFilterMap {
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
if err != nil {
log.Fatalln(err)
}
for _, node := range nodes {
// ensure node.Labels map is initialized (see also ClusterCreate.nodeSetup)
if node.Labels == nil {
node.Labels = make(map[string]string)
}
labelKey, labelValue := cliutil.SplitKV(label)
node.Labels[labelKey] = labelValue
}
}
// append env vars
for env, filters := range envFilterMap {
nodes, err := cliutil.FilterNodes(cluster.Nodes, filters)
if err != nil {
log.Fatalln(err)
}
for _, node := range nodes {
node.Env = append(node.Env, env)
}
for envVar, nodeFilters := range envFilterMap {
cliConfig.Env = append(cliConfig.Env, conf.EnvVarWithNodeFilters{
EnvVar: envVar,
NodeFilters: nodeFilters,
})
}
/**********************
* Utility Containers *
**********************/
// ...
return cluster
log.Tracef("EnvFilterMap: %+v", envFilterMap)
}

@ -26,11 +26,11 @@ import (
"os"
"path"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3dutil "github.com/rancher/k3d/v3/pkg/util"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
k3dutil "github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -54,11 +54,11 @@ func NewCmdClusterDelete() *cobra.Command {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
if err := client.ClusterDelete(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
log.Fatalln(err)
}
log.Infoln("Removing cluster details from default kubeconfig...")
if err := cluster.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
if err := client.KubeconfigRemoveClusterFromDefaultConfig(cmd.Context(), c); err != nil {
log.Warnln("Failed to remove cluster details from default kubeconfig")
log.Warnln(err)
}
@ -100,7 +100,8 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
log.Infoln("Deleting all clusters...")
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
@ -113,9 +114,9 @@ func parseDeleteClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
}
for _, name := range clusternames {
c, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
c, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
if err == cluster.ClusterGetNoNodesFoundError {
if err == client.ClusterGetNoNodesFoundError {
continue
}
log.Fatalln(err)

@ -28,10 +28,10 @@ import (
"os"
"strings"
"github.com/rancher/k3d/v3/cmd/util"
k3cluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
k3cluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"

@ -24,13 +24,13 @@ package cluster
import (
"time"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -52,7 +52,7 @@ func NewCmdClusterStart() *cobra.Command {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
if err := client.ClusterStart(cmd.Context(), runtimes.SelectedRuntime, c, startClusterOpts); err != nil {
log.Fatalln(err)
}
}
@ -79,7 +79,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
@ -92,7 +92,7 @@ func parseStartClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
}
for _, name := range clusternames {
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -24,10 +24,10 @@ package cluster
import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -47,7 +47,7 @@ func NewCmdClusterStop() *cobra.Command {
log.Infoln("No clusters found")
} else {
for _, c := range clusters {
if err := cluster.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
if err := client.ClusterStop(cmd.Context(), runtimes.SelectedRuntime, c); err != nil {
log.Fatalln(err)
}
}
@ -72,7 +72,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
@ -85,7 +85,7 @@ func parseStopClusterCmd(cmd *cobra.Command, args []string) []*k3d.Cluster {
}
for _, name := range clusternames {
cluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
cluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -0,0 +1,46 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdConfig returns a new cobra command
func NewCmdConfig() *cobra.Command {
cmd := &cobra.Command{
Use: "config",
Short: "Work with config file(s)",
Long: `Work with config file(s)`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
cmd.AddCommand(NewCmdConfigInit())
return cmd
}

@ -0,0 +1,74 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"fmt"
"os"
config "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdConfigInit returns a new cobra command
func NewCmdConfigInit() *cobra.Command {
var output string
var force bool
cmd := &cobra.Command{
Use: "init",
Aliases: []string{"create"},
Run: func(cmd *cobra.Command, args []string) {
log.Infoln("COMING SOON: print a basic k3d config with default pre-filled.")
if output == "-" {
fmt.Println(config.DefaultConfig)
} else {
// check if file exists
var file *os.File
var err error
_, err = os.Stat(output)
if os.IsNotExist(err) || force {
// create/overwrite file
file, err = os.Create(output)
if err != nil {
log.Fatalf("Failed to create/overwrite output file: %s", err)
}
// write content
if _, err = file.WriteString(config.DefaultConfig); err != nil {
log.Fatalf("Failed to write to output file: %+v", err)
}
} else if err != nil {
log.Fatalf("Failed to stat output file: %+v", err)
} else {
log.Errorln("Output file exists and --force was not set")
os.Exit(1)
}
}
},
}
cmd.Flags().StringVarP(&output, "output", "o", "k3d-default.yaml", "Write a default k3d config")
cmd.Flags().BoolVarP(&force, "force", "f", false, "Force overwrite of target file")
return cmd
}

@ -0,0 +1,44 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// NewCmdConfig returns a new cobra command
func NewCmdConfigView() *cobra.Command {
cmd := &cobra.Command{
Use: "view",
Aliases: []string{"show"},
Run: func(cmd *cobra.Command, args []string) {
log.Debugln("print config")
fmt.Printf("%+v", viper.AllSettings())
log.Debugln("printed config")
},
}
return cmd
}

@ -26,10 +26,10 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/tools"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/tools"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)

@ -25,10 +25,10 @@ import (
"fmt"
"os"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
@ -41,7 +41,7 @@ type getKubeconfigFlags struct {
// NewCmdKubeconfigGet returns a new cobra command
func NewCmdKubeconfigGet() *cobra.Command {
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{
writeKubeConfigOptions := client.WriteKubeConfigOptions{
UpdateExisting: true,
UpdateCurrentContext: true,
OverwriteExisting: true,
@ -68,13 +68,13 @@ func NewCmdKubeconfigGet() *cobra.Command {
// generate list of clusters
if getKubeconfigFlags.all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
} else {
for _, clusterName := range args {
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
log.Fatalln(err)
}
@ -87,7 +87,7 @@ func NewCmdKubeconfigGet() *cobra.Command {
for _, c := range clusters {
log.Debugf("Getting kubeconfig for cluster '%s'", c.Name)
fmt.Println("---") // YAML document separator
if _, err := cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
if _, err := client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, "-", &writeKubeConfigOptions); err != nil {
log.Errorln(err)
errorGettingKubeconfig = true
}

@ -27,11 +27,11 @@ import (
"path"
"strings"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3dutil "github.com/rancher/k3d/v3/pkg/util"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
k3dutil "github.com/rancher/k3d/v4/pkg/util"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/clientcmd"
@ -47,7 +47,7 @@ type mergeKubeconfigFlags struct {
// NewCmdKubeconfigMerge returns a new cobra command
func NewCmdKubeconfigMerge() *cobra.Command {
writeKubeConfigOptions := cluster.WriteKubeConfigOptions{}
writeKubeConfigOptions := client.WriteKubeConfigOptions{}
mergeKubeconfigFlags := mergeKubeconfigFlags{}
@ -64,12 +64,12 @@ func NewCmdKubeconfigMerge() *cobra.Command {
var err error
if mergeKubeconfigFlags.targetDefault && mergeKubeconfigFlags.output != "" {
log.Fatalln("Cannot use both '--output' and '--merge-default-kubeconfig' at the same time")
log.Fatalln("Cannot use both '--output' and '--kubeconfig-merge-default' at the same time")
}
// generate list of clusters
if mergeKubeconfigFlags.all {
clusters, err = cluster.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
clusters, err = client.ClusterList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
@ -81,7 +81,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
}
for _, clusterName := range clusternames {
retrievedCluster, err := cluster.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
retrievedCluster, err := client.ClusterGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Cluster{Name: clusterName})
if err != nil {
log.Fatalln(err)
}
@ -103,7 +103,7 @@ func NewCmdKubeconfigMerge() *cobra.Command {
if output == "" && !mergeKubeconfigFlags.targetDefault {
output = path.Join(outputDir, fmt.Sprintf("kubeconfig-%s.yaml", c.Name))
}
output, err = cluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
output, err = client.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, c, output, &writeKubeConfigOptions)
if err != nil {
log.Errorln(err)
errorGettingKubeconfig = true
@ -129,9 +129,9 @@ func NewCmdKubeconfigMerge() *cobra.Command {
if err := cmd.MarkFlagFilename("output"); err != nil {
log.Fatalln("Failed to mark flag --output as filename")
}
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "merge-default-kubeconfig", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.targetDefault, "kubeconfig-merge-default", "d", false, fmt.Sprintf("Merge into the default kubeconfig ($KUBECONFIG or %s)", clientcmd.RecommendedHomeFile))
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateExisting, "update", "u", true, "Update conflicting fields in existing kubeconfig")
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "switch-context", "s", true, "Switch to new context")
cmd.Flags().BoolVarP(&writeKubeConfigOptions.UpdateCurrentContext, "kubeconfig-switch-context", "s", true, "Switch to new context")
cmd.Flags().BoolVar(&writeKubeConfigOptions.OverwriteExisting, "overwrite", false, "[Careful!] Overwrite existing file, ignoring its contents")
cmd.Flags().BoolVarP(&mergeKubeconfigFlags.all, "all", "a", false, "Get kubeconfigs from all existing clusters")

@ -27,11 +27,11 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v3/cmd/util"
k3dc "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
"github.com/rancher/k3d/v4/cmd/util"
k3dc "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)

@ -22,10 +22,10 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -48,7 +48,7 @@ func NewCmdNodeDelete() *cobra.Command {
log.Infoln("No nodes found")
} else {
for _, node := range nodes {
if err := cluster.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
log.Fatalln(err)
}
}
@ -59,7 +59,7 @@ func NewCmdNodeDelete() *cobra.Command {
// add subcommands
// add flags
cmd.Flags().BoolP("all", "a", false, "Delete all existing clusters")
cmd.Flags().BoolP("all", "a", false, "Delete all existing nodes")
// done
return cmd
@ -74,10 +74,11 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
nodes, err = cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
nodes = client.NodeFilterByRoles(nodes, k3d.ClusterInternalNodeRoles, k3d.ClusterInternalNodeRoles)
return nodes
}
@ -86,7 +87,7 @@ func parseDeleteNodeCmd(cmd *cobra.Command, args []string) []*k3d.Node {
}
for _, name := range args {
node, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
if err != nil {
log.Fatalln(err)
}

@ -23,22 +23,26 @@ package node
import (
"fmt"
"os"
"sort"
"strings"
"github.com/liggitt/tabwriter"
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"
)
type nodeListFlags struct {
noHeader bool
output string
}
// NewCmdNodeList returns a new cobra command
func NewCmdNodeList() *cobra.Command {
nodeListFlags := nodeListFlags{}
// create new command
cmd := &cobra.Command{
@ -49,75 +53,52 @@ func NewCmdNodeList() *cobra.Command {
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
ValidArgsFunction: util.ValidArgsAvailableNodes,
Run: func(cmd *cobra.Command, args []string) {
nodes, headersOff := parseGetNodeCmd(cmd, args)
nodes := []*k3d.Node{}
for _, name := range args {
nodes = append(nodes, &k3d.Node{
Name: name,
})
}
var existingNodes []*k3d.Node
if len(nodes) == 0 { // Option a) no name specified -> get all nodes
found, err := cluster.NodeList(cmd.Context(), runtimes.SelectedRuntime)
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found...)
} else { // Option b) cluster name specified -> get specific cluster
for _, node := range nodes {
found, err := cluster.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found)
}
}
// print existing clusters
printNodes(existingNodes, headersOff)
// print existing nodes
headers := &[]string{}
if !nodeListFlags.noHeader {
headers = &[]string{"NAME", "ROLE", "CLUSTER", "STATUS"}
}
util.PrintNodes(existingNodes, nodeListFlags.output,
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n",
strings.TrimPrefix(node.Name, "/"),
string(node.Role),
node.Labels[k3d.LabelClusterName],
node.State.Status)
}))
},
}
// add flags
cmd.Flags().Bool("no-headers", false, "Disable headers")
cmd.Flags().BoolVar(&nodeListFlags.noHeader, "no-headers", false, "Disable headers")
cmd.Flags().StringVarP(&nodeListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
// add subcommands
// done
return cmd
}
func parseGetNodeCmd(cmd *cobra.Command, args []string) ([]*k3d.Node, bool) {
// --no-headers
headersOff, err := cmd.Flags().GetBool("no-headers")
if err != nil {
log.Fatalln(err)
}
// Args = node name
if len(args) == 0 {
return nil, headersOff
}
nodes := []*k3d.Node{}
for _, name := range args {
nodes = append(nodes, &k3d.Node{Name: name})
}
return nodes, headersOff
}
func printNodes(nodes []*k3d.Node, headersOff bool) {
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
defer tabwriter.Flush()
if !headersOff {
headers := []string{"NAME", "ROLE", "CLUSTER", "STATUS"}
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(headers, "\t"))
if err != nil {
log.Fatalln("Failed to print headers")
}
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Name < nodes[j].Name
})
for _, node := range nodes {
fmt.Fprintf(tabwriter, "%s\t%s\t%s\t%s\n", strings.TrimPrefix(node.Name, "/"), string(node.Role), node.Labels[k3d.LabelClusterName], node.State.Status)
}
}

@ -22,9 +22,9 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/spf13/cobra"
log "github.com/sirupsen/logrus"

@ -22,11 +22,11 @@ THE SOFTWARE.
package node
import (
"github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/spf13/cobra"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)

@ -0,0 +1,58 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdRegistry returns a new cobra command
func NewCmdRegistry() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "registry",
Aliases: []string{"registries", "reg"},
Short: "Manage registry/registries",
Long: `Manage registry/registries`,
Run: func(cmd *cobra.Command, args []string) {
if err := cmd.Help(); err != nil {
log.Errorln("Couldn't get help text")
log.Fatalln(err)
}
},
}
// add subcommands
cmd.AddCommand(NewCmdRegistryCreate())
cmd.AddCommand(NewCmdRegistryStart())
cmd.AddCommand(NewCmdRegistryStop())
cmd.AddCommand(NewCmdRegistryDelete())
cmd.AddCommand(NewCmdRegistryList())
// cmd.AddCommand(NewCmdRegistryConnect()) // TODO: registry connect requires reload capabilities for containerd config
// add flags
// done
return cmd
}

@ -0,0 +1,29 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import "github.com/spf13/cobra"
// NewCmdRegistryConnect creates a new cobra command
func NewCmdRegistryConnect() *cobra.Command {
return &cobra.Command{}
}

@ -0,0 +1,117 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/client"
cliutil "github.com/rancher/k3d/v4/cmd/util"
"github.com/spf13/cobra"
)
type regCreatePreProcessedFlags struct {
Port string
Clusters []string
}
type regCreateFlags struct {
Image string
}
// NewCmdRegistryCreate returns a new cobra command
func NewCmdRegistryCreate() *cobra.Command {
flags := &regCreateFlags{}
ppFlags := &regCreatePreProcessedFlags{}
// create new command
cmd := &cobra.Command{
Use: "create NAME",
Short: "Create a new registry",
Long: `Create a new registry.`,
Args: cobra.MaximumNArgs(1), // maximum one name accepted
Run: func(cmd *cobra.Command, args []string) {
reg, clusters := parseCreateRegistryCmd(cmd, args, flags, ppFlags)
regNode, err := client.RegistryRun(cmd.Context(), runtimes.SelectedRuntime, reg)
if err != nil {
log.Fatalln(err)
}
if err := client.RegistryConnectClusters(cmd.Context(), runtimes.SelectedRuntime, regNode, clusters); err != nil {
log.Errorln(err)
}
},
}
// add flags
// TODO: connecting to clusters requires non-existing config reload functionality in containerd
cmd.Flags().StringArrayVarP(&ppFlags.Clusters, "cluster", "c", nil, "[NotReady] Select the cluster(s) that the registry shall connect to.")
if err := cmd.RegisterFlagCompletionFunc("cluster", cliutil.ValidArgsAvailableClusters); err != nil {
log.Fatalln("Failed to register flag completion for '--cluster'", err)
}
if err := cmd.Flags().MarkHidden("cluster"); err != nil {
log.Fatalln("Failed to hide --cluster flag on registry create command")
}
cmd.Flags().StringVarP(&flags.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag), "Specify image used for the registry")
cmd.Flags().StringVarP(&ppFlags.Port, "port", "p", "random", "Select which port the registry should be listening on on your machine (localhost) (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d registry create --port 0.0.0.0:5111`")
// done
return cmd
}
// parseCreateRegistryCmd parses the command input into variables required to create a registry
func parseCreateRegistryCmd(cmd *cobra.Command, args []string, flags *regCreateFlags, ppFlags *regCreatePreProcessedFlags) (*k3d.Registry, []*k3d.Cluster) {
// --cluster
clusters := []*k3d.Cluster{}
for _, name := range ppFlags.Clusters {
clusters = append(clusters,
&k3d.Cluster{
Name: name,
},
)
}
// --port
exposePort, err := cliutil.ParsePortExposureSpec(ppFlags.Port, k3d.DefaultRegistryPort)
if err != nil {
log.Errorln("Failed to parse registry port")
log.Fatalln(err)
}
// set the name for the registry node
registryName := ""
if len(args) > 0 {
registryName = fmt.Sprintf("%s-%s", k3d.DefaultObjectNamePrefix, args[0])
}
return &k3d.Registry{Host: registryName, Image: flags.Image, ExposureOpts: *exposePort}, clusters
}

@ -0,0 +1,97 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import (
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// NewCmdRegistryDelete returns a new cobra command
func NewCmdRegistryDelete() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{
Use: "delete (NAME | --all)",
Short: "Delete registry/registries.",
Long: `Delete registry/registries.`,
Args: cobra.MinimumNArgs(1), // at least one node has to be specified
ValidArgsFunction: util.ValidArgsAvailableRegistries,
Run: func(cmd *cobra.Command, args []string) {
nodes := parseRegistryDeleteCmd(cmd, args)
if len(nodes) == 0 {
log.Infoln("No nodes found")
} else {
for _, node := range nodes {
if err := client.NodeDelete(cmd.Context(), runtimes.SelectedRuntime, node); err != nil {
log.Fatalln(err)
}
}
}
},
}
// add subcommands
// add flags
cmd.Flags().BoolP("all", "a", false, "Delete all existing registries")
// done
return cmd
}
// parseRegistryDeleteCmd parses the command input into variables required to delete nodes
func parseRegistryDeleteCmd(cmd *cobra.Command, args []string) []*k3d.Node {
// --all
var nodes []*k3d.Node
if all, err := cmd.Flags().GetBool("all"); err != nil {
log.Fatalln(err)
} else if all {
nodes, err = client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
return nodes
}
if len(args) < 1 {
log.Fatalln("Expecting at least one registry name if `--all` is not set")
}
for _, name := range args {
node, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, &k3d.Node{Name: name})
if err != nil {
log.Fatalln(err)
}
nodes = append(nodes, node)
}
return nodes
}

@ -0,0 +1,112 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import (
"fmt"
"strings"
"github.com/liggitt/tabwriter"
"github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type registryListFlags struct {
noHeader bool
output string
}
// NewCmdRegistryList creates a new cobra command
func NewCmdRegistryList() *cobra.Command {
registryListFlags := registryListFlags{}
// create new command
cmd := &cobra.Command{
Use: "list [NAME [NAME...]]",
Aliases: []string{"ls", "get"},
Short: "List registries",
Long: `List registries.`,
Args: cobra.MinimumNArgs(0), // 0 or more; 0 = all
ValidArgsFunction: util.ValidArgsAvailableRegistries,
Run: func(cmd *cobra.Command, args []string) {
var existingNodes []*k3d.Node
nodes := []*k3d.Node{}
for _, name := range args {
nodes = append(nodes, &k3d.Node{
Name: name,
})
}
if len(nodes) == 0 { // Option a) no name specified -> get all registries
found, err := client.NodeList(cmd.Context(), runtimes.SelectedRuntime)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found...)
} else { // Option b) registry name(s) specified -> get specific registries
for _, node := range nodes {
log.Tracef("Node %s", node.Name)
found, err := client.NodeGet(cmd.Context(), runtimes.SelectedRuntime, node)
if err != nil {
log.Fatalln(err)
}
existingNodes = append(existingNodes, found)
}
}
existingNodes = client.NodeFilterByRoles(existingNodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
// print existing registries
headers := &[]string{}
if !registryListFlags.noHeader {
headers = &[]string{"NAME", "ROLE", "CLUSTER"} // TODO: add status
}
util.PrintNodes(existingNodes, registryListFlags.output,
headers, util.NodePrinterFunc(func(tabwriter *tabwriter.Writer, node *k3d.Node) {
cluster := "*"
if _, ok := node.Labels[k3d.LabelClusterName]; ok {
cluster = node.Labels[k3d.LabelClusterName]
}
fmt.Fprintf(tabwriter, "%s\t%s\t%s\n",
strings.TrimPrefix(node.Name, "/"),
string(node.Role),
cluster,
)
}),
)
},
}
// add flags
cmd.Flags().BoolVar(&registryListFlags.noHeader, "no-headers", false, "Disable headers")
cmd.Flags().StringVarP(&registryListFlags.output, "output", "o", "", "Output format. One of: json|yaml")
// add subcommands
// done
return cmd
}

@ -0,0 +1,29 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import "github.com/spf13/cobra"
// NewCmdRegistryStart creates a new cobra command
func NewCmdRegistryStart() *cobra.Command {
return &cobra.Command{}
}

@ -0,0 +1,29 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package registry
import "github.com/spf13/cobra"
// NewCmdRegistryStop creates a new cobra command
func NewCmdRegistryStop() *cobra.Command {
return &cobra.Command{}
}

@ -31,13 +31,15 @@ import (
"github.com/spf13/cobra"
"github.com/rancher/k3d/v3/cmd/cluster"
"github.com/rancher/k3d/v3/cmd/image"
"github.com/rancher/k3d/v3/cmd/kubeconfig"
"github.com/rancher/k3d/v3/cmd/node"
cliutil "github.com/rancher/k3d/v3/cmd/util"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/version"
"github.com/rancher/k3d/v4/cmd/cluster"
cfg "github.com/rancher/k3d/v4/cmd/config"
"github.com/rancher/k3d/v4/cmd/image"
"github.com/rancher/k3d/v4/cmd/kubeconfig"
"github.com/rancher/k3d/v4/cmd/node"
"github.com/rancher/k3d/v4/cmd/registry"
cliutil "github.com/rancher/k3d/v4/cmd/util"
"github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/writer"
)
@ -51,8 +53,6 @@ type RootFlags struct {
var flags = RootFlags{}
// var cfgFile string
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "k3d",
@ -96,8 +96,6 @@ func Execute() {
func init() {
cobra.OnInitialize(initLogging, initRuntime)
// add persistent flags (present to all subcommands)
// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k3d/config.yaml)")
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
rootCmd.PersistentFlags().BoolVar(&flags.traceLogging, "trace", false, "Enable super verbose output (trace logging)")
@ -110,6 +108,8 @@ func init() {
rootCmd.AddCommand(kubeconfig.NewCmdKubeconfig())
rootCmd.AddCommand(node.NewCmdNode())
rootCmd.AddCommand(image.NewCmdImage())
rootCmd.AddCommand(cfg.NewCmdConfig())
rootCmd.AddCommand(registry.NewCmdRegistry())
rootCmd.AddCommand(&cobra.Command{
Use: "version",

@ -25,9 +25,9 @@ import (
"context"
"strings"
k3dcluster "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3dcluster "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -71,7 +71,34 @@ func ValidArgsAvailableNodes(cmd *cobra.Command, args []string, toComplete strin
nodeLoop:
for _, node := range nodes {
for _, arg := range args {
if arg == node.Name { // only clusters, that are not in the args yet
if arg == node.Name { // only nodes, that are not in the args yet
continue nodeLoop
}
}
if strings.HasPrefix(node.Name, toComplete) {
completions = append(completions, node.Name)
}
}
return completions, cobra.ShellCompDirectiveDefault
}
// ValidArgsAvailableRegistries is used for shell completions: proposes the list of existing registries
func ValidArgsAvailableRegistries(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var completions []string
var nodes []*k3d.Node
nodes, err := k3dcluster.NodeList(context.Background(), runtimes.SelectedRuntime)
if err != nil {
log.Errorln("Failed to get list of nodes for shell completion")
return nil, cobra.ShellCompDirectiveError
}
nodes = k3dcluster.NodeFilterByRoles(nodes, []k3d.Role{k3d.RegistryRole}, []k3d.Role{})
nodeLoop:
for _, node := range nodes {
for _, arg := range args {
if arg == node.Name { // only nodes, that are not in the args yet
continue nodeLoop
}
}

@ -23,21 +23,11 @@ package util
import (
"fmt"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
"regexp"
)
// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
// SplitFiltersFromFlag separates a flag's value from the node filter, if there is one
func SplitFiltersFromFlag(flag string) (string, []string, error) {
@ -87,144 +77,3 @@ func SplitFiltersFromFlag(flag string) (string, []string, error) {
return newsplit[0], strings.Split(newsplit[1], ";"), nil
}
// FilterNodes takes a string filter to return a filtered list of nodes
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No node filter specified")
return nodes, nil
}
// map roles to subsets
serverNodes := []*k3d.Node{}
agentNodes := []*k3d.Node{}
var serverlb *k3d.Node
for _, node := range nodes {
if node.Role == k3d.ServerRole {
serverNodes = append(serverNodes, node)
} else if node.Role == k3d.AgentRole {
agentNodes = append(agentNodes, node)
} else if node.Role == k3d.LoadBalancerRole {
serverlb = node
}
}
filteredNodes := []*k3d.Node{}
set := make(map[*k3d.Node]struct{})
// range over all instances of group[subset] specs
for _, filter := range filters {
// match regex with capturing groups
match := filterRegexp.FindStringSubmatch(filter)
if len(match) == 0 {
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter)
}
// map capturing group names to submatches
submatches := util.MapSubexpNames(filterRegexp.SubexpNames(), match)
// if one of the filters is 'all', we only return this and drop all others
if submatches["group"] == "all" {
// TODO: filterNodes: only log if really more than one is specified
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
return nodes, nil
}
// Choose the group of nodes to operate on
groupNodes := []*k3d.Node{}
if submatches["group"] == string(k3d.ServerRole) {
groupNodes = serverNodes
} else if submatches["group"] == string(k3d.AgentRole) {
groupNodes = agentNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}
/* Option 1) subset defined by list */
if submatches["subsetList"] != "" {
for _, index := range strings.Split(submatches["subsetList"], ",") {
if index != "" {
num, err := strconv.Atoi(index)
if err != nil {
return nil, fmt.Errorf("Failed to convert subset number to integer in '%s'", filter)
}
if num < 0 || num >= len(groupNodes) {
return nil, fmt.Errorf("Index out of range: index '%d' < 0 or > number of available nodes in filter '%s'", num, filter)
}
if _, exists := set[groupNodes[num]]; !exists {
filteredNodes = append(filteredNodes, groupNodes[num])
set[groupNodes[num]] = struct{}{}
}
}
}
/* Option 2) subset defined by range */
} else if submatches["subsetRange"] != "" {
/*
* subset specified by a range 'START:END', where each side is optional
*/
split := strings.Split(submatches["subsetRange"], ":")
if len(split) != 2 {
return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter)
}
start := 0
end := len(groupNodes) - 1
var err error
if split[0] != "" {
start, err = strconv.Atoi(split[0])
if err != nil {
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
}
if start < 0 || start >= len(groupNodes) {
return nil, fmt.Errorf("Invalid subset range: start < 0 or > number of available nodes in '%s'", filter)
}
}
if split[1] != "" {
end, err = strconv.Atoi(split[1])
if err != nil {
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
}
if end < start || end >= len(groupNodes) {
return nil, fmt.Errorf("Invalid subset range: end < start or > number of available nodes in '%s'", filter)
}
}
for i := start; i <= end; i++ {
if _, exists := set[groupNodes[i]]; !exists {
filteredNodes = append(filteredNodes, groupNodes[i])
set[groupNodes[i]] = struct{}{}
}
}
/* Option 3) subset defined by wildcard */
} else if submatches["subsetWildcard"] == "*" {
/*
* '*' = all nodes
*/
for _, node := range groupNodes {
if _, exists := set[node]; !exists {
filteredNodes = append(filteredNodes, node)
set[node] = struct{}{}
}
}
/* Option X) invalid/unknown subset */
} else {
return nil, fmt.Errorf("Failed to parse node specifiers: unknown subset in '%s'", filter)
}
}
return filteredNodes, nil
}

@ -0,0 +1,89 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"github.com/liggitt/tabwriter"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
type NodePrinter interface {
Print(*tabwriter.Writer, *k3d.Node)
}
type NodePrinterFunc func(*tabwriter.Writer, *k3d.Node)
func (npf NodePrinterFunc) Print(writter *tabwriter.Writer, node *k3d.Node) {
npf(writter, node)
}
// PrintNodes prints a list of nodes, either as a table or as a JSON/YAML listing
func PrintNodes(nodes []*k3d.Node, outputFormat string, headers *[]string, nodePrinter NodePrinter) {
outputFormat = strings.ToLower(outputFormat)
tabwriter := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', tabwriter.RememberWidths)
defer tabwriter.Flush()
if outputFormat != "json" && outputFormat != "yaml" {
if headers != nil {
_, err := fmt.Fprintf(tabwriter, "%s\n", strings.Join(*headers, "\t"))
if err != nil {
log.Fatalln("Failed to print headers")
}
}
}
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Name < nodes[j].Name
})
if outputFormat == "json" || outputFormat == "yaml" {
var b []byte
var err error
switch outputFormat {
case "json":
b, err = json.Marshal(nodes)
case "yaml":
b, err = yaml.Marshal(nodes)
}
if err != nil {
fmt.Println(err)
return
}
fmt.Println(string(b))
} else {
for _, node := range nodes {
if !(outputFormat == "json" || outputFormat == "yaml") {
nodePrinter.Print(tabwriter, node)
}
}
}
}

@ -28,7 +28,7 @@ import (
"os/exec"
"strings"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// HandlePlugin takes care of finding and executing a plugin based on the longest prefix

@ -24,60 +24,83 @@ package util
import (
"fmt"
"net"
"regexp"
"strconv"
"strings"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
)
// ParseAPIPort parses/validates a string to create an exposeAPI struct from it
func ParseAPIPort(portString string) (k3d.ExposeAPI, error) {
var apiPortRegexp = regexp.MustCompile(`^(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>\S+):)?(?P<port>(\d{1,5}|random))$`)
var exposeAPI k3d.ExposeAPI
// ParsePortExposureSpec parses/validates a string to create an exposePort struct from it
func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureOpts, error) {
split := strings.Split(portString, ":")
if len(split) > 2 {
log.Errorln("Failed to parse API Port specification")
return exposeAPI, fmt.Errorf("api-port format error")
match := apiPortRegexp.FindStringSubmatch(exposedPortSpec)
if len(match) == 0 {
log.Errorln("Failed to parse Port Exposure specification")
return nil, fmt.Errorf("Port Exposure Spec format error: Must be [(HostIP|HostName):]HostPort")
}
submatches := util.MapSubexpNames(apiPortRegexp.SubexpNames(), match)
// no port specified (or not matched via regex)
if submatches["port"] == "" {
return nil, fmt.Errorf("Failed to find port in Port Exposure spec '%s'", exposedPortSpec)
}
if len(split) == 1 {
exposeAPI = k3d.ExposeAPI{Port: split[0]}
} else {
// Make sure 'host' can be resolved to an IP address
addrs, err := net.LookupHost(split[0])
api := &k3d.ExposureOpts{}
// check if there's a host reference
if submatches["hostname"] != "" {
log.Tracef("Port Exposure: found hostname: %s", submatches["hostname"])
addrs, err := net.LookupHost(submatches["hostname"])
if err != nil {
return exposeAPI, err
return nil, fmt.Errorf("Failed to lookup host '%s' specified for Port Exposure: %+v", submatches["hostname"], err)
}
exposeAPI = k3d.ExposeAPI{Host: split[0], HostIP: addrs[0], Port: split[1]}
api.Host = submatches["hostname"]
submatches["hostip"] = addrs[0] // set hostip to the resolved address
}
// Verify 'port' is an integer and within port ranges
if exposeAPI.Port == "" || exposeAPI.Port == "random" {
log.Debugf("API-Port Mapping didn't specify hostPort, choosing one randomly...")
realPortString := ""
if submatches["hostip"] == "" {
submatches["hostip"] = k3d.DefaultAPIHost
}
// start with the IP, if there is any
if submatches["hostip"] != "" {
realPortString += submatches["hostip"] + ":"
}
// port: get a free one if there's none defined or set to random
if submatches["port"] == "" || submatches["port"] == "random" {
log.Debugf("Port Exposure Mapping didn't specify hostPort, choosing one randomly...")
freePort, err := GetFreePort()
if err != nil || freePort == 0 {
log.Warnf("Failed to get random free port:\n%+v", err)
log.Warnf("Falling back to default port %s (may be blocked though)...", k3d.DefaultAPIPort)
exposeAPI.Port = k3d.DefaultAPIPort
log.Warnf("Failed to get random free port: %+v", err)
log.Warnf("Falling back to internal port %s (may be blocked though)...", internalPort)
submatches["port"] = internalPort
} else {
exposeAPI.Port = strconv.Itoa(freePort)
log.Debugf("Got free port for API: '%d'", freePort)
submatches["port"] = strconv.Itoa(freePort)
log.Debugf("Got free port for Port Exposure: '%d'", freePort)
}
}
p, err := strconv.Atoi(exposeAPI.Port)
realPortString += fmt.Sprintf("%s:%s/tcp", submatches["port"], internalPort)
portMapping, err := nat.ParsePortSpec(realPortString)
if err != nil {
log.Errorln("Failed to parse port mapping")
return exposeAPI, err
return nil, fmt.Errorf("Failed to parse port spec for Port Exposure '%s': %+v", realPortString, err)
}
if p < 0 || p > 65535 {
log.Errorln("Failed to parse API Port specification")
return exposeAPI, fmt.Errorf("Port value '%d' out of range", p)
}
api.Port = portMapping[0].Port // there can be only one due to our regexp
api.Binding = portMapping[0].Binding
return exposeAPI, nil
return api, nil
}

@ -26,7 +26,7 @@ import (
"os"
"strings"
"github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes"
log "github.com/sirupsen/logrus"
)

@ -24,7 +24,7 @@
- --label -> planned
- --workers -> implemented
- --auto-restart -> dropped (docker's `unless-stopped` is set by default)
- --enable-registry -> planned (possible consolidation into less registry-related commands?)
- --enable-registry -> coming in v4.0.0 (2021) as `--registry-create` and `--registry-use`
- --registry-name -> TBD
- --registry-port -> TBD
- --registry-volume -> TBD

@ -2,7 +2,7 @@
![k3d](static/img/k3d_logo_black_blue.svg)
**This page is targeting k3d v3.0.0 and newer!**
**This page is targeting k3d v4.0.0 and newer!**
k3d is a lightweight wrapper to run [k3s](https://github.com/rancher/k3s) (Rancher Lab's minimal Kubernetes distribution) in docker.
@ -34,14 +34,14 @@ k3d makes it very easy to create single- and multi-node [k3s](https://github.com
You have several options there:
- use the install script to grab the latest release:
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash`
- use the install script to grab a specific release (via `TAG` environment variable):
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v3.0.0 bash`
- wget: `#!bash wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- curl: `#!bash curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | TAG=v4.0.0 bash`
- use [Homebrew](https://brew.sh): `#!bash brew install k3d` (Homebrew is available for MacOS and Linux)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
- Formula can be found in [homebrew/homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/k3d.rb) and is mirrored to [homebrew/linuxbrew-core](https://github.com/Homebrew/linuxbrew-core/blob/master/Formula/k3d.rb)
- install via [AUR](https://aur.archlinux.org/) package [rancher-k3d-bin](https://aur.archlinux.org/packages/rancher-k3d-bin/): `yay -S rancher-k3d-bin`
- grab a release from the [release tab](https://github.com/rancher/k3d/releases) and install it yourself.
- install via go: `#!bash go install github.com/rancher/k3d` (**Note**: this will give you unreleased/bleeding-edge changes)
@ -61,7 +61,7 @@ k3d cluster create mycluster
Get the new cluster's connection details merged into your default kubeconfig (usually specified using the `KUBECONFIG` environment variable or the default path `#!bash $HOME/.kube/config`) and directly switch to the new context:
```bash
k3d kubeconfig merge mycluster --switch-context
k3d kubeconfig merge mycluster --kubeconfig-switch-context
```
Use the new cluster with [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), e.g.:

@ -1,12 +1,15 @@
# Defaults
- multiple server nodes
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
- by default, when `--server` > 1 and no `--datastore-x` option is set, the first server node (server-0) will be the initializing server node
- the initializing server node will have the `--cluster-init` flag appended
- all other server nodes will refer to the initializing server node via `--server https://<init-node>:6443`
- API-Ports
- by default, we don't expose any API-Port (no host port mapping)
- by default, we expose the API-Port (`6443`) by forwarding traffic from the default server loadbalancer (nginx container) to the server node(s)
- port `6443` of the loadbalancer is then mapped to a specific (`--api-port` flag) or a random (default) port on the host system
- kubeconfig
- if `--[update|merge]-default-kubeconfig` is set, we use the default loading rules to get the default kubeconfig:
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
- if `--kubeconfig-update-default` is set, we use the default loading rules to get the default kubeconfig:
- First: kubeconfig specified via the KUBECONFIG environment variable (error out if multiple are specified)
- Second: default kubeconfig in home directory (e.g. `$HOME/.kube/config`)
- Networking
- [by default, k3d creates a new (docker) network for every cluster](./networking)

@ -1,7 +1,7 @@
# Networking
- Related issues:
- [rancher/k3d #220](https://github.com/rancher/k3d/issues/220)
- [rancher/k3d #220](https://github.com/rancher/k3d/issues/220)
## Introduction

@ -2,70 +2,90 @@
```bash
k3d
--verbose # enable verbose (debug) logging (default: false)
--verbose # GLOBAL: enable verbose (debug) logging (default: false)
--trace # GLOBAL: enable super verbose logging (trace logging) (default: false)
--version # show k3d and k3s version
-h, --help # show help text
version # show k3d and k3s version
help [COMMAND] # show help text for any command
completion [bash | zsh | (psh | powershell)] # generate completion scripts for common shells
-h, --help # GLOBAL: show help text
cluster [CLUSTERNAME] # default cluster name is 'k3s-default'
create
--api-port # specify the port on which the cluster will be accessible (e.g. via kubectl)
-i, --image # specify which k3s image should be used for the nodes
--k3s-agent-arg # add additional arguments to the k3s agent (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
--k3s-server-arg # add additional arguments to the k3s server (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
-s, --servers # specify how many server nodes you want to create
--network # specify a network you want to connect to
--no-hostip # disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDN
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d load image' command)
--no-lb # disable the creation of a LoadBalancer in front of the server nodes
--no-rollback # disable the automatic rollback actions, if anything goes wrong
-p, --port # add some more port mappings
--token # specify a cluster token (default: auto-generated)
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back
--update-default-kubeconfig # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true')
--switch-context # (implies --update-default-kubeconfig) automatically sets the current-context of your default kubeconfig to the new cluster's context
-v, --volume # specify additional bind-mounts
--wait # enable waiting for all server nodes to be ready before returning
-a, --agents # specify how many agent nodes you want to create
-e, --env # add environment variables to the node containers
-a, --agents # specify how many agent nodes you want to create (integer, default: 0)
--api-port # specify the port on which the cluster will be accessible (format '[HOST:]HOSTPORT', default: random)
-c, --config # use a config file (format 'PATH')
-e, --env # add environment variables to the nodes (quoted string, format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
--gpus # [from docker CLI] add GPU devices to the node containers (string, e.g. 'all')
-i, --image # specify which k3s image should be used for the nodes (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
--k3s-agent-arg # add additional arguments to the k3s agent (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/#k3s-agent-cli-help)
--k3s-server-arg # add additional arguments to the k3s server (quoted string, use flag multiple times) (see https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/#k3s-server-cli-help)
--kubeconfig-switch-context # (implies --kubeconfig-update-default) automatically sets the current-context of your default kubeconfig to the new cluster's context (default: true)
--kubeconfig-update-default # enable the automated update of the default kubeconfig with the details of the newly created cluster (also sets '--wait=true') (default: true)
-l, --label # add (docker) labels to the node containers (format: 'KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
--network # specify an existing (docker) network you want to connect to (string)
--no-hostip # disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS (default: false)
--no-image-volume # disable the creation of a volume for storing images (used for the 'k3d image import' command) (default: false)
--no-lb # disable the creation of a load balancer in front of the server nodes (default: false)
--no-rollback # disable the automatic rollback actions, if anything goes wrong (default: false)
-p, --port # add some more port mappings (format: '[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]', use flag multiple times)
--registry-create # create a new (docker) registry dedicated for this cluster (default: false)
--registry-use # use an existing local (docker) registry with this cluster (string, use multiple times)
-s, --servers # specify how many server nodes you want to create (integer, default: 1)
--token # specify a cluster token (string, default: auto-generated)
--timeout # specify a timeout, after which the cluster creation will be interrupted and changes rolled back (duration, e.g. '10s')
-v, --volume # specify additional bind-mounts (format: '[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]', use flag multiple times)
--wait # enable waiting for all server nodes to be ready before returning (default: true)
start CLUSTERNAME # start a (stopped) cluster
-a, --all # start all clusters
--wait # wait for all servers and server-loadbalancer to be up before returning
--timeout # maximum waiting time for '--wait' before canceling/returning
-a, --all # start all clusters (default: false)
--wait # wait for all servers and server-loadbalancer to be up before returning (default: true)
--timeout # maximum waiting time for '--wait' before canceling/returning (duration, e.g. '10s')
stop CLUSTERNAME # stop a cluster
-a, --all # stop all clusters
-a, --all # stop all clusters (default: false)
delete CLUSTERNAME # delete an existing cluster
-a, --all # delete all existing clusters
-a, --all # delete all existing clusters (default: false)
list [CLUSTERNAME [CLUSTERNAME ...]]
--no-headers # do not print headers
--token # show column with cluster tokens
--no-headers # do not print headers (default: false)
--token # show column with cluster tokens (default: false)
-o, --output # format the output (format: 'json|yaml')
completion [bash | zsh | fish | (psh | powershell)] # generate completion scripts for common shells
config
init # write a default k3d config (as a starting point)
-f, --force # force overwrite target file (default: false)
-o, --output # file to write to (string, default "k3d-default.yaml")
help [COMMAND] # show help text for any command
image
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
-c, --cluster # clusters to load the image into (string, use flag multiple times, default: k3s-default)
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion (default: false)
kubeconfig
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and write it to stdout
-a, --all # get kubeconfigs from all clusters (default: false)
merge | write (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into a (kubeconfig-)file
-a, --all # get kubeconfigs from all clusters (default: false)
-s, --kubeconfig-switch-context # switch current-context in kubeconfig to the new context (default: true)
-d, --kubeconfig-merge-default # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
-o, --output # specify the output file where the kubeconfig should be written to (string)
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents (default: false)
-u, --update # update conflicting fields in existing kubeconfig (default: true)
node
create NODENAME # Create new nodes (and add them to existing clusters)
-c, --cluster # specify the cluster that the node shall connect to
-i, --image # specify which k3s image should be used for the node(s)
--replicas # specify how many replicas you want to create with this spec
--role # specify the node role
--wait # wait for the node to be up and running before returning
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet
-c, --cluster # specify the cluster that the node shall connect to (string, default: k3s-default)
-i, --image # specify which k3s image should be used for the node(s) (string, default: 'docker.io/rancher/k3s:v1.20.0-k3s2', tag changes per build)
--replicas # specify how many replicas you want to create with this spec (integer, default: 1)
--role # specify the node role (string, format: 'agent|server', default: agent)
--timeout # specify a timeout duration, after which the node creation will be interrupted, if not done yet (duration, e.g. '10s')
--wait # wait for the node to be up and running before returning (default: true)
start NODENAME # start a (stopped) node
stop NODENAME # stop a node
delete NODENAME # delete an existing node
-a, --all # delete all existing nodes
-a, --all # delete all existing nodes (default: false)
list NODENAME
--no-headers # do not print headers
kubeconfig
get (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and write it to stdout
-a, --all # get kubeconfigs from all clusters
merge | write (CLUSTERNAME [CLUSTERNAME ...] | --all) # get kubeconfig from cluster(s) and merge it/them into into a file in $HOME/.k3d (or whatever you specify via the flags)
-a, --all # get kubeconfigs from all clusters
--output # specify the output file where the kubeconfig should be written to
--overwrite # [Careful!] forcefully overwrite the output file, ignoring existing contents
-s, --switch-context # switch current-context in kubeconfig to the new context
-u, --update # update conflicting fields in existing kubeconfig (default: true)
-d, --merge-default-kubeconfig # update the default kubeconfig (usually $KUBECONFIG or $HOME/.kube/config)
image
import [IMAGE | ARCHIVE [IMAGE | ARCHIVE ...]] # Load one or more images from the local runtime environment or tar-archives into k3d clusters
-c, --cluster # clusters to load the image into
-k, --keep-tarball # do not delete the image tarball from the shared volume after completion
--no-headers # do not print headers (default: false)
registry
create REGISTRYNAME
-i, --image # specify image used for the registry (string, default: "docker.io/library/registry:2")
-p, --port # select host port to map to (format: '[HOST:]HOSTPORT', default: 'random')
delete REGISTRYNAME
-a, --all # delete all existing registries (default: false)
list [NAME [NAME...]]
--no-headers # disable table headers (default: false)
version # show k3d and k3s version
```

@ -1,9 +1,11 @@
# Running CUDA workloads
If you want to run CUDA workloads on the K3S container you need to customize the container.
CUDA workloads require the NVIDIA Container Runtime, so containerd needs to be configured to use this runtime.
The K3S container itself also needs to run with this runtime. If you are using Docker you can install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html).
## Building a customized K3S image
To get the NVIDIA container runtime in the K3S image you need to build your own K3S image. The native K3S image is based on Alpine but the NVIDIA container runtime is not supported on Alpine yet. To get around this we need to build the image with a supported base image.
### Adapt the Dockerfile
@ -48,13 +50,16 @@ ENV PATH="$PATH:/bin/aux"
ENTRYPOINT ["/bin/k3s"]
CMD ["agent"]
```
This [Dockerfile](cuda/Dockerfile) is based on the [K3S Dockerfile](https://github.com/rancher/k3s/blob/master/package/Dockerfile).
The following changes are applied:
1. Change the base images to Ubuntu 18.04 so the NVIDIA Container Runtime can be installed
2. Add a custom containerd `config.toml` template to add the NVIDIA Container Runtime. This replaces the default `runc` runtime
3. Add a manifest for the NVIDIA driver plugin for Kubernetes
### Configure containerd
We need to configure containerd to use the NVIDIA Container Runtime. We need to customize the config.toml that is used at startup. K3S provides a way to do this using a [config.toml.tmpl](cuda/config.toml.tmpl) file. More information can be found on the [K3S site](https://rancher.com/docs/k3s/latest/en/advanced/#configuring-containerd).
```go
@ -116,7 +121,9 @@ We need to configure containerd to use the NVIDIA Container Runtime. We need to
```
### The NVIDIA device plugin
To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). The device plugin is a daemonset and allows you to automatically:
To enable NVIDIA GPU support on Kubernetes you also need to install the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). The device plugin is a deamonset and allows you to automatically:
* Expose the number of GPUs on each nodes of your cluster
* Keep track of the health of your GPUs
* Run GPU enabled containers in your Kubernetes cluster.
@ -166,23 +173,27 @@ spec:
```
### Build the K3S image
To build the custom image we need to build K3S because we need the generated output.
Put the following files in a directory:
* [Dockerfile](cuda/Dockerfile)
* [config.toml.tmpl](cuda/config.toml.tmpl)
* [gpu.yaml](cuda/gpu.yaml)
* [build.sh](cuda/build.sh)
* [cuda-vector-add.yaml](cuda/cuda-vector-add.yaml)
The `build.sh` files takes the K3S git tag as argument, it defaults to `v1.18.10+k3s1`. The script performs the following steps:
* pulls K3S
The `build.sh` files takes the K3S git tag as argument, it defaults to `v1.18.10+k3s1`. The script performs the following steps:
* pulls K3S
* builds K3S
* build the custom K3S Docker image
The resulting image is tagged as k3s-gpu:&lt;version tag&gt;. The version tag is the git tag but the '+' sign is replaced with a '-'.
[build.sh](cuda/build.sh):
```bash
#!/bin/bash
set -e
@ -202,35 +213,44 @@ docker build -t k3s-gpu:$IMAGE_TAG .
```
## Run and test the custom image with Docker
You can run a container based on the new image with Docker:
```
```bash
docker run --name k3s-gpu -d --privileged --gpus all k3s-gpu:v1.18.10-k3s1
```
Deploy a [test pod](cuda/cuda-vector-add.yaml):
```
```bash
docker cp cuda-vector-add.yaml k3s-gpu:/cuda-vector-add.yaml
docker exec k3s-gpu kubectl apply -f /cuda-vector-add.yaml
docker exec k3s-gpu kubectl logs cuda-vector-add
```
## Run and test the custom image with k3d
Tou can use the image with k3d:
```
```bash
k3d cluster create --no-lb --image k3s-gpu:v1.18.10-k3s1 --gpus all
```
Deploy a [test pod](cuda/cuda-vector-add.yaml):
```
```bash
kubectl apply -f cuda-vector-add.yaml
kubectl logs cuda-vector-add
```
## Known issues
* This approach does not work on WSL2 yet. The NVIDIA driver plugin and container runtime rely on the NVIDIA Management Library (NVML) which is not yet supported. See the [CUDA on WSL User Guide](https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations).
## Acknowledgements:
## Acknowledgements
Most of the information in this article was obtained from various sources:
* [Add NVIDIA GPU support to k3s with containerd](https://dev.to/mweibel/add-nvidia-gpu-support-to-k3s-with-containerd-4j17)
* [microk8s](https://github.com/ubuntu/microk8s)
* [K3S](https://github.com/rancher/k3s)

@ -1,7 +1,7 @@
#!/bin/bash
set -e
cd $(dirname $0)
K3S_TAG="${1:-v1.18.10+k3s1}"
IMAGE_TAG="${K3S_TAG/+/-}"

@ -1,6 +1,6 @@
# Exposing Services
## 1. via Ingress
## 1. via Ingress (recommended)
In this example, we will deploy a simple nginx webserver deployment and make it accessible via ingress.
Therefore, we have to create the cluster in a way, that the internal port 80 (where the `traefik` ingress controller is listening on) is exposed on the host system.
@ -16,7 +16,7 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
- the `loadbalancer` nodefilter matches only the `serverlb` that's deployed in front of a cluster's server nodes
- all ports exposed on the `serverlb` will be proxied to the same ports on all server nodes in the cluster
2. Get the kubeconfig file
2. Get the kubeconfig file (redundant, as `k3d cluster create` already merges it into your default kubeconfig file)
`#!bash export KUBECONFIG="$(k3d kubeconfig write k3s-default)"`
@ -65,6 +65,7 @@ Therefore, we have to create the cluster in a way, that the internal port 80 (wh
- **Note**: Kubernetes' default NodePort range is [`30000-32767`](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport)
- **Note**: You may as well expose the whole NodePort range from the very beginning, e.g. via `k3d cluster create mycluster --agents 3 -p "30000-32767:30000-32767@server[0]"` (See [this video from @portainer](https://www.youtube.com/watch?v=5HaU6338lAk))
- **Warning**: Docker creates iptable entries and a new proxy process per port-mapping, so this may take a very long time or even freeze your system!
... (Steps 2 and 3 like above) ...

@ -65,12 +65,27 @@ Finally, we can create the cluster, mounting the CA file in the path we specifie
## Using a local registry
### Using the k3d registry
### Using k3d-managed registries
!!! info "Not ported yet"
The k3d-managed registry has not yet been ported from v1.x to v3.x
!!! info "Just ported!"
The k3d-managed registry is available again as of k3d v4.0.0 (January 2021)
### Using your own local registry
#### Create a dedicated registry together with your cluster
1. `#!bash k3d cluster create mycluster --registry-create`: This creates your cluster `mycluster` together with a registry container called `k3d-mycluster-registry`
- k3d sets everything up in the cluster for containerd to be able to pull images from that registry (using the `registries.yaml` file)
- the port, which the registry is listening on will be mapped to a random port on your host system
2. Check the k3d command output or `#!bash docker ps -f name=k3d-mycluster-registry` to find the exposed port (let's use `12345` here)
3. Pull some image (optional) `#!bash docker pull alpine:latest`, re-tag it to reference your newly created registry `#!bash docker tag alpine:latest k3d-mycluster-registry:12345/testimage:local` and push it `#!bash docker push k3d-mycluster-registry:12345/testimage:local`
4. Use kubectl to create a new pod in your cluster using that image to see, if the cluster can pull from the new registry: `#!bash kubectl run --image k3d-mycluster-registry:12345/testimage:local testimage --command -- tail -f /dev/null` (creates a container that will not do anything but keep on running)
#### Create a customized k3d-managed registry
1. `#!bash k3d registry create myregistry.localhost --port 5111` creates a new registry called `myregistry.localhost` (could be used with automatic resolution of `*.localhost`, see next section)
2. `#!bash k3d cluster create newcluster --registry-use k3d-myregistry.localhost:5111` (make sure you use the `k3d-` prefix here) creates a new cluster set up to us that registry
3. continue with step 3 and 4 from the last section for testing
### Using your own (not k3d-managed) local registry
You can start your own local registry it with some `docker` commands, like:
@ -103,14 +118,14 @@ Once again, this will only work with k3s >= v0.10.0 (see the some sections below
You should test that you can
* push to your registry from your local development machine.
* use images from that registry in `Deployments` in your k3d cluster.
- push to your registry from your local development machine.
- use images from that registry in `Deployments` in your k3d cluster.
We will verify these two things for a local registry (located at `registry.localhost:5000`) running in your development machine. Things would be basically the same for checking an external registry, but some additional configuration could be necessary in your local machine when using an authenticated or secure registry (please refer to Docker's documentation for this).
First, we can download some image (like `nginx`) and push it to our local registry with:
```shell script
```bash
docker pull nginx:latest
docker tag nginx:latest registry.localhost:5000/nginx:latest
docker push registry.localhost:5000/nginx:latest
@ -118,7 +133,7 @@ docker push registry.localhost:5000/nginx:latest
Then we can deploy a pod referencing this image to your cluster:
```shell script
```bash
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment

@ -1,7 +1,7 @@
# Handling Kubeconfigs
By default, k3d won't touch your kubeconfig without you telling it to do so.
To get a kubeconfig set up for you to connect to a k3d cluster, you can go different ways.
By default, k3d will update your default kubeconfig with your new cluster's details and set the current-context to it (can be disabled).
To get a kubeconfig set up for you to connect to a k3d cluster without this automatism, you can go different ways.
??? question "What is the default kubeconfig?"
We determine the path of the used or default kubeconfig in two ways:
@ -16,12 +16,12 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
- *Note:* this will create (or update) the file `$HOME/.k3d/kubeconfig-mycluster.yaml`
- *Tip:* Use it: `#!bash export KUBECONFIG=$(k3d kubeconfig write mycluster)`
- *Note 2*: alternatively you can use `#!bash k3d kubeconfig get mycluster > some-file.yaml`
2. Update your default kubeconfig **upon** cluster creation
- `#!bash k3d cluster create mycluster --update-kubeconfig`
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
2. Update your default kubeconfig **upon** cluster creation (DEFAULT)
- `#!bash k3d cluster create mycluster --kubeconfig-update-default`
- *Note:* this won't switch the current-context (append `--kubeconfig-switch-context` to do so)
3. Update your default kubeconfig **after** cluster creation
- `#!bash k3d kubeconfig merge mycluster --merge-default-kubeconfig`
- *Note:* this won't switch the current-context (append `--switch-context` to do so)
- `#!bash k3d kubeconfig merge mycluster --kubeconfig-merge-default`
- *Note:* this won't switch the current-context (append `--kubeconfig-switch-context` to do so)
4. Update a different kubeconfig **after** cluster creation
- `#!bash k3d kubeconfig merge mycluster --output some/other/file.yaml`
- *Note:* this won't switch the current-context
@ -30,7 +30,7 @@ To get a kubeconfig set up for you to connect to a k3d cluster, you can go diffe
!!! info "Switching the current context"
None of the above options switch the current-context by default.
This is intended to be least intrusive, since the current-context has a global effect.
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--switch-context` flag.
You can switch the current-context directly with the `kubeconfig merge` command by adding the `--kubeconfig-switch-context` flag.
## Removing cluster details from the kubeconfig
@ -40,6 +40,6 @@ It will also delete the respective kubeconfig file in `$HOME/.k3d/` if it exists
## Handling multiple clusters
`k3d kubeconfig merge` let's you specify one or more clusters via arguments _or_ all via `--all`.
All kubeconfigs will then be merged into a single file if `--merge-default-kubeconfig` or `--output` is specified.
All kubeconfigs will then be merged into a single file if `--kubeconfig-merge-default` or `--output` is specified.
If none of those two flags was specified, a new file will be created per cluster and the merged path (e.g. `$HOME/.k3d/kubeconfig-cluster1.yaml:$HOME/.k3d/cluster2.yaml`) will be returned.
Note, that with multiple cluster specified, the `--switch-context` flag will change the current context to the cluster which was last in the list.
Note, that with multiple cluster specified, the `--kubeconfig-switch-context` flag will change the current context to the cluster which was last in the list.

@ -1,46 +1,49 @@
module github.com/rancher/k3d/v3
module github.com/rancher/k3d/v4
go 1.14
go 1.15
require (
github.com/Microsoft/hcsshim v0.8.9 // indirect
github.com/Microsoft/hcsshim/test v0.0.0-20201030212021-6e6b6ce98037 // indirect
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102 // indirect
github.com/Microsoft/go-winio v0.4.15 // indirect
github.com/Microsoft/hcsshim v0.8.10 // indirect
github.com/Microsoft/hcsshim/test v0.0.0-20201202232227-2010d9a3eeb0 // indirect
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect
github.com/containerd/containerd v1.4.1
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 // indirect
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd // indirect
github.com/containerd/continuity v0.0.0-20201204194424-b0f312dbb49a // indirect
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c // indirect
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0 // indirect
github.com/containerd/ttrpc v1.0.2 // indirect
github.com/containerd/typeurl v1.0.1 // indirect
github.com/docker/cli v20.10.0-beta1.0.20201103165149-c20be83d6b34+incompatible
github.com/docker/distribution v0.0.0-20201029003056-f5cdc24dd3d8 // indirect
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible
github.com/docker/docker v20.10.0+incompatible
github.com/docker/go-connections v0.4.0
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-test/deep v1.0.4
github.com/gogo/googleapis v1.3.0 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/gogo/googleapis v1.4.0 // indirect
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5
github.com/imdario/mergo v0.3.9
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mitchellh/go-homedir v1.1.0
github.com/moby/sys/mount v0.1.0 // indirect
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f // indirect
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/mitchellh/mapstructure v1.3.3 // indirect
github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/opencontainers/selinux v1.6.0 // indirect
github.com/pelletier/go-toml v1.8.0 // indirect
github.com/sirupsen/logrus v1.7.0
github.com/spf13/afero v1.3.4 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v1.1.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.6.1 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1 // indirect
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
google.golang.org/genproto v0.0.0-20201103154000-415bd0cd5df6 // indirect
google.golang.org/grpc v1.33.1 // indirect
google.golang.org/protobuf v1.25.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
google.golang.org/grpc v1.34.0 // indirect
gopkg.in/ini.v1 v1.58.0 // indirect
gopkg.in/yaml.v2 v2.3.0
gotest.tools/v3 v3.0.2 // indirect
gotest.tools v2.2.0+incompatible
gotest.tools/v3 v3.0.3 // indirect
k8s.io/client-go v0.17.0
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09 // indirect
)

230
go.sum

@ -1,3 +1,4 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@ -27,11 +28,14 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.15 h1:qkLXKzb1QoVatRyd/YlXZ/Kg0m5K3SPuoD82jjSOaBc=
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/Microsoft/hcsshim/test v0.0.0-20201030212021-6e6b6ce98037 h1:NEalIFiDFGrOYkshaQ2hLQhIMP4wDa+GnRbUu9RUTxU=
github.com/Microsoft/hcsshim/test v0.0.0-20201030212021-6e6b6ce98037/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4=
github.com/Microsoft/hcsshim v0.8.10 h1:k5wTrpnVU2/xv8ZuzGkbXVd3js5zJ8RnumPo5RxiIxU=
github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3HV0OhsddkmM=
github.com/Microsoft/hcsshim/test v0.0.0-20201202232227-2010d9a3eeb0 h1:/J0Cy6jvSLlBt+zWkTAjC3x5UOhUFTDvzNjRaxbBShY=
github.com/Microsoft/hcsshim/test v0.0.0-20201202232227-2010d9a3eeb0/go.mod h1:IrOsC3sbIiki4idDR4z1Plxm8vAUM5dKN/HEVkRU0GI=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
@ -45,11 +49,9 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
@ -61,33 +63,39 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775 h1:cHzBGGVew0ezFsq2grfy2RsB8hO/eNyBgOLHBCqfR1U=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102 h1:Qf4HiqfvmB7zS6scsmNgTLmByHbq8n9RTF39v+TzP7A=
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.1 h1:u7SFAJyRqWcG6ogaMAx3KjSTy1e3hT9QxqX7Jco7dRc=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY=
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20201204194424-b0f312dbb49a h1:SsffDYRSRlPV6Jcm8QpjPnRmiQ1265amULS37h+bwyg=
github.com/containerd/continuity v0.0.0-20201204194424-b0f312dbb49a/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c h1:1c6xmkNiu6Jnr6AKGM91GGNsfU+nPNFvw9BZFSo0E+c=
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3 h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0 h1:e+50zk22gvHLJKe8+d+xSMyA88PPQk/XfWuUw1BdnPA=
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8 h1:jYCTS/16RWXXtVHNHo1KWNegd1kKQ7lHd7BStj/0hKw=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd h1:bRLyitWw3PT/2YuVaCKTPg0cA5dOFKFwKtkfcP2dLsA=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1 h1:PvuK4E3D5S5q6IqsPDCy928FhP0LUIGcmZ/Yhgp5Djw=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@ -95,13 +103,14 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -112,35 +121,36 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v20.10.0-beta1.0.20201103165149-c20be83d6b34+incompatible h1:45GdER4Rb9sE5HfzyvJAKbQHLB4PcR/+WR+5HQQjH1I=
github.com/docker/cli v20.10.0-beta1.0.20201103165149-c20be83d6b34+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af h1:ujR+JcSHkOZMctuIgvi+a/VHpTn0nSy0W7eV5p34xjg=
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c h1:6L6qod4JzOm9KEqmfSyO6ZhsnN9dlcISRt+xdoyZeGE=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v0.0.0-20201029003056-f5cdc24dd3d8 h1:WhVldn4LROIhMl7x0YV7HHlLSgucXkeKBU2Wqr9yj9E=
github.com/docker/distribution v0.0.0-20201029003056-f5cdc24dd3d8/go.mod h1:WT7YsGUvF6+xhY8u7G9zLnX1g5tNDLvXCOEGXmbIzLw=
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible h1:oQeenT4rlzuBqBKczNk1n1aHdBxYVmv/uWZySvk3Boo=
github.com/docker/docker v17.12.0-ce-rc1.0.20200528204242-89382f2f2074+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.0+incompatible h1:4g8Xjho+7quMwzsTrhtrWpdQU9UTc2rX57A3iALaBmE=
github.com/docker/docker v20.10.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -157,7 +167,6 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
@ -180,13 +189,12 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.3.0 h1:M695OaDJ5ipWvDPcoAg/YL9c3uORAegkEfBqTQF/fTQ=
github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU=
github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@ -198,21 +206,18 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
@ -232,7 +237,6 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunE
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -241,6 +245,8 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -248,6 +254,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -255,6 +262,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -293,7 +301,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 h1:6ZR6HQ+P9ZUwHlYq+bU7e9wqAImxKUguq8fp2gZSgCo=
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5/go.mod h1:Yho0S7KhsnHQRCC5lDraYF1SsLMeWtf/tKdufKu3TJA=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@ -303,14 +310,13 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
@ -321,10 +327,11 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -335,6 +342,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
@ -353,15 +361,16 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/moby/sys/mount v0.1.0 h1:Ytx78EatgFKtrqZ0BvJ0UtJE472ZvawVmil6pIfuCCU=
github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo=
github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f h1:FQQ9Wo/j3IZrVSv8RkGZoeYMuec0xAoSNijF1UqEgB4=
github.com/moby/term v0.0.0-20200507201656-73f35e472e8f/go.mod h1:uF4OSdW39LLr+K/v/iL6dOm257SGdQJGiyMU1QlNd6s=
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf h1:Un6PNx5oMK6CCwO3QTUyPiK2mtZnPrpDl5UnZ64eCkw=
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -369,8 +378,8 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -405,14 +414,16 @@ github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw=
github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -420,8 +431,6 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -429,14 +438,10 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
@ -456,55 +461,62 @@ github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOms
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc=
github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.0 h1:aq3wCKjTPmzcNWLVGnsFVN4rflK7Uzn10F8/aw8MhdQ=
github.com/spf13/cobra v1.1.0/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
@ -523,6 +535,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@ -535,14 +548,12 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U=
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -573,24 +584,19 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -607,7 +613,6 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -618,28 +623,32 @@ golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1 h1:a/mKvvZr9Jcc8oKfcmgzyp7OwF73JPWsQLvH1z2Kxck=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -677,83 +686,77 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201103154000-415bd0cd5df6 h1:rMoZiLTOobSD3eg30lPMcFkBFNSyKUQQIQlw/hsAXME=
google.golang.org/genproto v0.0.0-20201103154000-415bd0cd5df6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1 h1:DGeFlSan2f+WEtCERJ4J9GJWk15TxUi8QGagfI87Xyc=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.58.0 h1:VdDvTzv/005R8vEFyQ56bpEnOKTNPbpJhL0VCohxlQw=
gopkg.in/ini.v1 v1.58.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -773,9 +776,8 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09 h1:sz6xjn8QP74104YNmJpzLbJ+a3ZtHt0tkD0g8vpdWNw=
k8s.io/utils v0.0.0-20200109141947-94aeca20bf09/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=

@ -21,7 +21,7 @@ THE SOFTWARE.
*/
package main
import "github.com/rancher/k3d/v3/cmd"
import "github.com/rancher/k3d/v4/cmd"
func main() {
cmd.Execute()

@ -0,0 +1,39 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package actions
import (
"context"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
type WriteFileAction struct {
Runtime runtimes.Runtime
Content []byte
Dest string
}
func (act WriteFileAction) Run(ctx context.Context, node *k3d.Node) error {
return act.Runtime.WriteToNode(ctx, act.Content, act.Dest, node)
}

@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"bytes"
@ -33,53 +33,199 @@ import (
gort "runtime"
"github.com/docker/go-connections/nat"
"github.com/imdario/mergo"
k3drt "github.com/rancher/k3d/v3/pkg/runtimes"
"github.com/rancher/k3d/v3/pkg/runtimes/docker"
"github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
"github.com/rancher/k3d/v3/version"
"github.com/rancher/k3d/v4/pkg/actions"
config "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
"github.com/rancher/k3d/v4/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v2"
)
// ClusterCreate creates a new cluster consisting of
// - some containerized k3s nodes
// - a docker network
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
clusterCreateCtx := ctx
// ClusterRun orchestrates the steps of cluster creation, configuration and starting
func ClusterRun(ctx context.Context, runtime k3drt.Runtime, clusterConfig *config.ClusterConfig) error {
/*
* Step 0: (Infrastructure) Preparation
*/
if err := ClusterPrep(ctx, runtime, clusterConfig); err != nil {
return fmt.Errorf("Failed Cluster Preparation: %+v", err)
}
/*
* Step 1: Create Containers
*/
if err := ClusterCreate(ctx, runtime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
return fmt.Errorf("Failed Cluster Creation: %+v", err)
}
/*
* Step 2: Pre-Start Configuration
*/
// TODO: ClusterRun: add cluster configuration step here
/*
* Step 3: Start Containers
*/
if err := ClusterStart(ctx, runtime, &clusterConfig.Cluster, k3d.ClusterStartOpts{
WaitForServer: clusterConfig.ClusterCreateOpts.WaitForServer,
Timeout: clusterConfig.ClusterCreateOpts.Timeout, // TODO: here we should consider the time used so far
NodeHooks: clusterConfig.ClusterCreateOpts.NodeHooks,
}); err != nil {
return fmt.Errorf("Failed Cluster Start: %+v", err)
}
/*
* Post-Start Configuration
*/
/**********************************
* Additional Cluster Preparation *
**********************************/
/*
* Networking Magic
*/
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
if !clusterConfig.ClusterCreateOpts.PrepDisableHostIPInjection {
prepInjectHostIP(ctx, runtime, &clusterConfig.Cluster)
}
// create the registry hosting configmap
if err := prepCreateLocalRegistryHostingConfigMap(ctx, runtime, &clusterConfig.Cluster); err != nil {
log.Warnf("Failed to create LocalRegistryHosting ConfigMap: %+v", err)
}
return nil
}
// ClusterPrep takes care of the steps required before creating/starting the cluster containers
func ClusterPrep(ctx context.Context, runtime k3drt.Runtime, clusterConfig *config.ClusterConfig) error {
/*
* Set up contexts
* Used for (early) termination (across API boundaries)
*/
clusterPrepCtx := ctx
if cluster.CreateClusterOpts.Timeout > 0*time.Second {
var cancelClusterCreateCtx context.CancelFunc
if clusterConfig.ClusterCreateOpts.Timeout > 0*time.Second {
var cancelClusterPrepCtx context.CancelFunc
clusterCreateCtx, cancelClusterCreateCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
clusterPrepCtx, cancelClusterPrepCtx = context.WithTimeout(ctx, cluster.CreateClusterOpts.Timeout)
defer cancelClusterCreateCtx()
clusterPrepCtx, cancelClusterPrepCtx = context.WithTimeout(ctx, clusterConfig.ClusterCreateOpts.Timeout)
defer cancelClusterPrepCtx()
}
/*
* Network
* Step 0: Pre-Pull Images
*/
// TODO: ClusterPrep: add image pre-pulling step
if cluster.ExposeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
/*
* Step 1: Network
*/
if err := ClusterPrepNetwork(clusterPrepCtx, runtime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
return fmt.Errorf("Failed Network Preparation: %+v", err)
}
/*
* Step 2: Volume(s)
*/
if !clusterConfig.ClusterCreateOpts.DisableImageVolume {
if err := ClusterPrepImageVolume(ctx, runtime, &clusterConfig.Cluster, &clusterConfig.ClusterCreateOpts); err != nil {
return fmt.Errorf("Failed Image Volume Preparation: %+v", err)
}
}
/*
* Step 3: Registries
*/
// Ensure referenced registries
for _, reg := range clusterConfig.ClusterCreateOpts.Registries.Use {
log.Debugf("Trying to find registry %s", reg.Host)
regNode, err := runtime.GetNode(ctx, &k3d.Node{Name: reg.Host})
if err != nil {
return fmt.Errorf("Failed to find registry node '%s': %+v", reg.Host, err)
}
regFromNode, err := RegistryFromNode(regNode)
if err != nil {
return err
}
*reg = *regFromNode
}
// Create managed registry bound to this cluster
if clusterConfig.ClusterCreateOpts.Registries.Create != nil {
registryNode, err := RegistryCreate(ctx, runtime, clusterConfig.ClusterCreateOpts.Registries.Create)
if err != nil {
return fmt.Errorf("Failed to create registry: %+v", err)
}
clusterConfig.Cluster.Nodes = append(clusterConfig.Cluster.Nodes, registryNode)
clusterConfig.ClusterCreateOpts.Registries.Use = append(clusterConfig.ClusterCreateOpts.Registries.Use, clusterConfig.ClusterCreateOpts.Registries.Create)
}
// Use existing registries (including the new one, if created)
log.Tracef("Using Registries: %+v", clusterConfig.ClusterCreateOpts.Registries.Use)
if len(clusterConfig.ClusterCreateOpts.Registries.Use) > 0 {
// ensure that all selected registries exist and connect them to the cluster network
for _, externalReg := range clusterConfig.ClusterCreateOpts.Registries.Use {
regNode, err := runtime.GetNode(ctx, &k3d.Node{Name: externalReg.Host})
if err != nil {
log.Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
} else if machineIP != "" {
log.Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
cluster.ExposeAPI.Host = machineIP
cluster.ExposeAPI.HostIP = machineIP
} else {
log.Traceln("Not using docker-machine")
return fmt.Errorf("Failed to find registry node '%s': %+v", externalReg.Host, err)
}
if err := RegistryConnectNetworks(ctx, runtime, regNode, []string{clusterConfig.Cluster.Network.Name}); err != nil {
return fmt.Errorf("Failed to connect registry node '%s' to cluster network: %+v", regNode.Name, err)
}
}
// generate the registries.yaml
regConf, err := RegistryGenerateK3sConfig(ctx, clusterConfig.ClusterCreateOpts.Registries.Use)
if err != nil {
return fmt.Errorf("Failed to generate registry config file for k3s: %+v", err)
}
regConfBytes, err := yaml.Marshal(&regConf)
if err != nil {
return fmt.Errorf("Failed to marshal registry configuration: %+v", err)
}
clusterConfig.ClusterCreateOpts.NodeHooks = append(clusterConfig.ClusterCreateOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: regConfBytes,
Dest: k3d.DefaultRegistriesFilePath,
},
})
// generate the LocalRegistryHosting configmap
regCm, err := RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx, clusterConfig.ClusterCreateOpts.Registries.Use)
if err != nil {
return fmt.Errorf("Failed to generate LocalRegistryHosting configmap: %+v", err)
}
log.Tracef("Writing LocalRegistryHosting YAML:\n%s", string(regCm))
clusterConfig.ClusterCreateOpts.NodeHooks = append(clusterConfig.ClusterCreateOpts.NodeHooks, k3d.NodeHook{
Stage: k3d.LifecycleStagePreStart,
Action: actions.WriteFileAction{
Runtime: runtime,
Content: regCm,
Dest: "/tmp/reg.yaml",
},
})
}
return nil
}
// ClusterPrepNetwork creates a new cluster network, if needed or sets everything up to re-use an existing network
func ClusterPrepNetwork(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
log.Infoln("Prep: Network")
// error out if external cluster network should be used but no name was set
if cluster.Network.Name == "" && cluster.Network.External {
return fmt.Errorf("Failed to use external network because no name was specified")
@ -91,83 +237,137 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
// handle hostnetwork
useHostNet := false
if cluster.Network.Name == "host" {
useHostNet = true
if len(cluster.Nodes) > 1 {
return fmt.Errorf("Only one server node supported when using host network")
}
}
// create cluster network or use an existing one
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(clusterCreateCtx, cluster.Network.Name)
networkID, networkExists, err := runtime.CreateNetworkIfNotPresent(ctx, cluster.Network.Name)
if err != nil {
log.Errorln("Failed to create cluster network")
return err
}
cluster.Network.Name = networkID
extraLabels := map[string]string{
k3d.LabelNetwork: networkID,
k3d.LabelNetworkExternal: strconv.FormatBool(cluster.Network.External),
}
clusterCreateOpts.GlobalLabels[k3d.LabelNetwork] = networkID
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = strconv.FormatBool(cluster.Network.External)
if networkExists {
extraLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
clusterCreateOpts.GlobalLabels[k3d.LabelNetworkExternal] = "true" // if the network wasn't created, we say that it's managed externally (important for cluster deletion)
}
return nil
}
func ClusterPrepImageVolume(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
/*
* Cluster Token
* Cluster-Wide volumes
* - image volume (for importing images)
*/
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
if err := runtime.CreateVolume(ctx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
return err
}
if cluster.Token == "" {
cluster.Token = GenerateClusterToken()
clusterCreateOpts.GlobalLabels[k3d.LabelImageVolume] = imageVolumeName
// attach volume to nodes
for _, node := range cluster.Nodes {
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s", imageVolumeName, k3d.DefaultImageVolumeMountPath))
}
return nil
}
// ClusterCreate creates a new cluster consisting of
// - some containerized k3s nodes
// - a docker network
func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster, clusterCreateOpts *k3d.ClusterCreateOpts) error {
log.Tracef(`
===== Creating Cluster =====
Runtime:
%+v
Cluster:
%+v
ClusterCreatOpts:
%+v
============================
`, runtime, cluster, clusterCreateOpts)
/*
* Cluster-Wide volumes
* - image volume (for importing images)
* Set up contexts
* Used for (early) termination (across API boundaries)
*/
if !cluster.CreateClusterOpts.DisableImageVolume {
imageVolumeName := fmt.Sprintf("%s-%s-images", k3d.DefaultObjectNamePrefix, cluster.Name)
if err := runtime.CreateVolume(clusterCreateCtx, imageVolumeName, map[string]string{k3d.LabelClusterName: cluster.Name}); err != nil {
log.Errorf("Failed to create image volume '%s' for cluster '%s'", imageVolumeName, cluster.Name)
return err
clusterCreateCtx := ctx
if clusterCreateOpts.Timeout > 0*time.Second {
var cancelClusterCreateCtx context.CancelFunc
clusterCreateCtx, cancelClusterCreateCtx = context.WithTimeout(ctx, clusterCreateOpts.Timeout)
defer cancelClusterCreateCtx()
}
/*
* Docker Machine Special Configuration
*/
if cluster.KubeAPI.Host == k3d.DefaultAPIHost && runtime == k3drt.Docker {
if gort.GOOS == "windows" || gort.GOOS == "darwin" {
log.Tracef("Running on %s: checking if it's using docker-machine", gort.GOOS)
machineIP, err := runtime.(docker.Docker).GetDockerMachineIP()
if err != nil {
log.Warnf("Using docker-machine, but failed to get it's IP: %+v", err)
} else if machineIP != "" {
log.Infof("Using the docker-machine IP %s to connect to the Kubernetes API", machineIP)
cluster.KubeAPI.Host = machineIP
cluster.KubeAPI.Binding.HostIP = machineIP
} else {
log.Traceln("Not using docker-machine")
}
}
}
extraLabels[k3d.LabelImageVolume] = imageVolumeName
/*
* Cluster Token
*/
// attach volume to nodes
for _, node := range cluster.Nodes {
node.Volumes = append(node.Volumes, fmt.Sprintf("%s:%s", imageVolumeName, k3d.DefaultImageVolumeMountPath))
}
if cluster.Token == "" {
cluster.Token = GenerateClusterToken()
}
clusterCreateOpts.GlobalLabels[k3d.LabelClusterToken] = cluster.Token
/*
* Nodes
*/
clusterCreateOpts.GlobalLabels[k3d.LabelClusterName] = cluster.Name
// agent defaults (per cluster)
// connection url is always the name of the first server node (index 0)
connectionURL := fmt.Sprintf("https://%s:%s", generateNodeName(cluster.Name, k3d.ServerRole, 0), k3d.DefaultAPIPort)
clusterCreateOpts.GlobalLabels[k3d.LabelClusterURL] = connectionURL
clusterCreateOpts.GlobalEnv = append(clusterCreateOpts.GlobalEnv, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
nodeSetup := func(node *k3d.Node, suffix int) error {
// cluster specific settings
if node.Labels == nil {
node.Labels = make(map[string]string) // TODO: maybe create an init function?
}
node.Labels[k3d.LabelClusterName] = cluster.Name
node.Env = append(node.Env, fmt.Sprintf("K3S_TOKEN=%s", cluster.Token))
node.Labels[k3d.LabelClusterToken] = cluster.Token
node.Labels[k3d.LabelClusterURL] = connectionURL
// append extra labels
for k, v := range extraLabels {
// ensure global labels
for k, v := range clusterCreateOpts.GlobalLabels {
node.Labels[k] = v
}
// ensure global env
node.Env = append(node.Env, clusterCreateOpts.GlobalEnv...)
// node role specific settings
if node.Role == k3d.ServerRole {
node.ServerOpts.ExposeAPI = cluster.ExposeAPI
node.ServerOpts.KubeAPI = cluster.KubeAPI
// the cluster has an init server node, but its not this one, so connect it to the init node
if cluster.InitNode != nil && !node.ServerOpts.IsInit {
@ -181,7 +381,7 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
node.Name = generateNodeName(cluster.Name, node.Role, suffix)
node.Network = cluster.Network.Name
node.Restart = true
node.GPURequest = cluster.CreateClusterOpts.GPURequest
node.GPURequest = clusterCreateOpts.GPURequest
// create node
log.Infof("Creating node '%s'", node.Name)
@ -191,7 +391,9 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
log.Debugf("Created node '%s'", node.Name)
return err
// start node
//return NodeStart(clusterCreateCtx, runtime, node, k3d.NodeStartOpts{PreStartActions: clusterCreateOpts.NodeHookActions})
return nil
}
// used for node suffices
@ -205,8 +407,8 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
cluster.InitNode.Args = append(cluster.InitNode.Args, "--cluster-init")
// in case the LoadBalancer was disabled, expose the API Port on the initializing server node
if cluster.CreateClusterOpts.DisableLoadBalancer {
cluster.InitNode.Ports = append(cluster.InitNode.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
if clusterCreateOpts.DisableLoadBalancer {
cluster.InitNode.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
}
if err := nodeSetup(cluster.InitNode, serverCount); err != nil {
@ -214,41 +416,8 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
}
serverCount++
// wait for the initnode to come up before doing anything else
for {
select {
case <-clusterCreateCtx.Done():
log.Errorln("Failed to bring up initializing server node in time")
return fmt.Errorf(">>> %w", clusterCreateCtx.Err())
default:
}
log.Debugln("Waiting for initializing server node...")
logreader, err := runtime.GetNodeLogs(clusterCreateCtx, cluster.InitNode, time.Time{})
if err != nil {
if logreader != nil {
logreader.Close()
}
log.Errorln(err)
log.Errorln("Failed to get logs from the initializing server node.. waiting for 3 seconds instead")
time.Sleep(3 * time.Second)
break
}
defer logreader.Close()
buf := new(bytes.Buffer)
nRead, _ := buf.ReadFrom(logreader)
logreader.Close()
if nRead > 0 && strings.Contains(buf.String(), k3d.ReadyLogMessageByRole[k3d.ServerRole]) {
log.Debugln("Initializing server node is up... continuing")
break
}
time.Sleep(time.Second)
}
}
// vars to support waiting for server nodes to be ready
waitForServerWaitgroup, clusterCreateCtx := errgroup.WithContext(clusterCreateCtx)
// create all other nodes, but skip the init node
for _, node := range cluster.Nodes {
if node.Role == k3d.ServerRole {
@ -256,9 +425,9 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// skip the init node here
if node == cluster.InitNode {
continue
} else if serverCount == 0 && cluster.CreateClusterOpts.DisableLoadBalancer {
} else if serverCount == 0 && clusterCreateOpts.DisableLoadBalancer {
// if this is the first server node and the server loadbalancer is disabled, expose the API Port on this server node
node.Ports = append(node.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort))
node.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
}
time.Sleep(1 * time.Second) // FIXME: arbitrary wait for one second to avoid race conditions of servers registering
@ -277,22 +446,14 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
return err
}
}
// asynchronously wait for this server node to be ready (by checking the logs for a specific log message)
if node.Role == k3d.ServerRole && cluster.CreateClusterOpts.WaitForServer {
log.Debugf("Waiting for server node '%s' to get ready", node.Name)
if err := NodeWaitForLogMessage(clusterCreateCtx, runtime, node, k3d.ReadyLogMessageByRole[k3d.ServerRole], time.Time{}); err != nil {
return fmt.Errorf("Server node '%s' failed to get ready: %+v", node.Name, err)
}
}
}
/*
* Auxiliary Containers
*/
// *** ServerLoadBalancer ***
if !cluster.CreateClusterOpts.DisableLoadBalancer {
if !useHostNet { // serverlb not supported in hostnetwork mode due to port collisions with server node
if !clusterCreateOpts.DisableLoadBalancer {
if cluster.Network.Name != "host" { // serverlb not supported in hostnetwork mode due to port collisions with server node
// Generate a comma-separated list of server/server names to pass to the LB container
servers := ""
for _, node := range cluster.Nodes {
@ -307,41 +468,27 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// generate comma-separated list of extra ports to forward
ports := k3d.DefaultAPIPort
for _, portString := range cluster.ServerLoadBalancer.Ports {
split := strings.Split(portString, ":")
port := split[len(split)-1]
if strings.Contains(port, "-") {
split := strings.Split(port, "-")
start, err := strconv.Atoi(split[0])
if err != nil {
log.Errorf("Failed to parse port mapping for loadbalancer '%s'", port)
return err
}
end, err := strconv.Atoi(split[1])
if err != nil {
log.Errorf("Failed to parse port mapping for loadbalancer '%s'", port)
return err
}
for i := start; i <= end; i++ {
ports += "," + strconv.Itoa(i)
}
} else {
ports += "," + port
}
for exposedPort := range cluster.ServerLoadBalancer.Ports {
ports += "," + exposedPort.Port()
}
if cluster.ServerLoadBalancer.Ports == nil {
cluster.ServerLoadBalancer.Ports = nat.PortMap{}
}
cluster.ServerLoadBalancer.Ports[k3d.DefaultAPIPort] = []nat.PortBinding{cluster.KubeAPI.Binding}
// Create LB as a modified node with loadbalancerRole
lbNode := &k3d.Node{
Name: fmt.Sprintf("%s-%s-serverlb", k3d.DefaultObjectNamePrefix, cluster.Name),
Image: fmt.Sprintf("%s:%s", k3d.DefaultLBImageRepo, version.GetHelperImageVersion()),
Ports: append(cluster.ServerLoadBalancer.Ports, fmt.Sprintf("%s:%s:%s/tcp", cluster.ExposeAPI.Host, cluster.ExposeAPI.Port, k3d.DefaultAPIPort)),
Ports: cluster.ServerLoadBalancer.Ports,
Env: []string{
fmt.Sprintf("SERVERS=%s", servers),
fmt.Sprintf("PORTS=%s", ports),
fmt.Sprintf("WORKER_PROCESSES=%d", len(strings.Split(ports, ","))),
},
Role: k3d.LoadBalancerRole,
Labels: k3d.DefaultObjectLabels, // TODO: createLoadBalancer: add more expressive labels
Labels: clusterCreateOpts.GlobalLabels, // TODO: createLoadBalancer: add more expressive labels
Network: cluster.Network.Name,
Restart: true,
}
@ -351,38 +498,12 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
log.Errorln("Failed to create loadbalancer")
return err
}
if cluster.CreateClusterOpts.WaitForServer {
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", lbNode.Name)
return NodeWaitForLogMessage(clusterCreateCtx, runtime, lbNode, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], time.Time{})
})
}
log.Debugf("Created loadbalancer '%s'", lbNode.Name)
} else {
log.Infoln("Hostnetwork selected -> Skipping creation of server LoadBalancer")
}
}
if err := waitForServerWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all server nodes (and loadbalancer) in time. Check the logs:")
log.Errorf(">>> %+v", err)
return fmt.Errorf("Failed to bring up cluster")
}
/**********************************
* Additional Cluster Preparation *
**********************************/
/*
* Networking Magic
*/
// add /etc/hosts and CoreDNS entry for host.k3d.internal, referring to the host system
if !cluster.CreateClusterOpts.PrepDisableHostIPInjection {
prepInjectHostIP(clusterPrepCtx, runtime, cluster)
}
return nil
}
@ -390,6 +511,10 @@ func ClusterCreate(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
log.Infof("Deleting cluster '%s'", cluster.Name)
cluster, err := ClusterGet(ctx, runtime, cluster)
if err != nil {
return err
}
log.Debugf("Cluster Details: %+v", cluster)
failed := 0
@ -406,8 +531,32 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
if !cluster.Network.External {
log.Infof("Deleting cluster network '%s'", cluster.Network.Name)
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
if strings.HasSuffix(err.Error(), "active endpoints") {
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
if errors.Is(err, runtimeErr.ErrRuntimeNetworkNotEmpty) { // there are still containers connected to that network
connectedNodes, err := runtime.GetNodesInNetwork(ctx, cluster.Network.Name) // check, if there are any k3d nodes connected to the cluster
if err != nil {
log.Warningf("Failed to check cluster network for connected nodes: %+v", err)
}
if len(connectedNodes) > 0 { // there are still k3d-managed containers (aka nodes) connected to the network
connectedRegistryNodes := util.FilterNodesByRole(connectedNodes, k3d.RegistryRole)
if len(connectedRegistryNodes) == len(connectedNodes) { // only registry node(s) left in the network
for _, node := range connectedRegistryNodes {
log.Debugf("Disconnecting registry node %s from the network...", node.Name)
if err := runtime.DisconnectNodeFromNetwork(ctx, node, cluster.Network.Name); err != nil {
log.Warnf("Failed to disconnect registry %s from network %s", node.Name, cluster.Network.Name)
} else {
if err := runtime.DeleteNetwork(ctx, cluster.Network.Name); err != nil {
log.Warningf("Failed to delete cluster network, even after disconnecting registry node(s): %+v", err)
}
}
}
} else { // besides the registry node(s), there are still other nodes... maybe they still need a registry
log.Debugf("There are some non-registry nodes left in the network")
}
} else {
log.Warningf("Failed to delete cluster network '%s' because it's still in use: is there another cluster using it?", cluster.Network.Name)
}
} else {
log.Warningf("Failed to delete cluster network '%s': '%+v'", cluster.Network.Name, err)
}
@ -434,12 +583,29 @@ func ClusterDelete(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clus
// ClusterList returns a list of all existing clusters
func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, error) {
log.Traceln("Listing Clusters...")
nodes, err := runtime.GetNodesByLabel(ctx, k3d.DefaultObjectLabels)
if err != nil {
log.Errorln("Failed to get clusters")
return nil, err
}
log.Debugf("Found %d nodes", len(nodes))
if log.GetLevel() == log.TraceLevel {
for _, node := range nodes {
log.Tracef("Found node %s of role %s", node.Name, node.Role)
}
}
nodes = NodeFilterByRoles(nodes, k3d.ClusterInternalNodeRoles, k3d.ClusterExternalNodeRoles)
log.Tracef("Found %d cluster-internal nodes", len(nodes))
if log.GetLevel() == log.TraceLevel {
for _, node := range nodes {
log.Tracef("Found cluster-internal node %s of role %s belonging to cluster %s", node.Name, node.Role, node.Labels[k3d.LabelClusterName])
}
}
clusters := []*k3d.Cluster{}
// for each node, check, if we can add it to a cluster or add the cluster if it doesn't exist yet
for _, node := range nodes {
@ -467,6 +633,7 @@ func ClusterList(ctx context.Context, runtime k3drt.Runtime) ([]*k3d.Cluster, er
log.Warnln(err)
}
}
log.Debugf("Found %d clusters", len(clusters))
return clusters, nil
}
@ -575,9 +742,52 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
defer cancel()
}
// vars to support waiting for server nodes to be ready
waitForServerWaitgroup, ctx := errgroup.WithContext(ctx)
/*
* Init Node
*/
for _, n := range cluster.Nodes {
if n.Role == k3d.ServerRole && n.ServerOpts.IsInit {
if err := NodeStart(ctx, runtime, n, k3d.NodeStartOpts{
NodeHooks: startClusterOpts.NodeHooks,
}); err != nil {
return fmt.Errorf("Failed to start initializing server node: %+v", err)
}
// wait for the initnode to come up before doing anything else
for {
select {
case <-ctx.Done():
log.Errorln("Failed to bring up initializing server node in time")
return fmt.Errorf(">>> %w", ctx.Err())
default:
}
log.Debugln("Waiting for initializing server node...")
logreader, err := runtime.GetNodeLogs(ctx, cluster.InitNode, time.Time{})
if err != nil {
if logreader != nil {
logreader.Close()
}
log.Errorln(err)
log.Errorln("Failed to get logs from the initializig server node.. waiting for 3 seconds instead")
time.Sleep(3 * time.Second)
break
}
defer logreader.Close()
buf := new(bytes.Buffer)
nRead, _ := buf.ReadFrom(logreader)
logreader.Close()
if nRead > 0 && strings.Contains(buf.String(), k3d.ReadyLogMessageByRole[k3d.ServerRole]) {
log.Debugln("Initializing server node is up... continuing")
break
}
time.Sleep(time.Second)
}
break
}
}
/*
* Other Nodes
*/
failed := 0
var serverlb *k3d.Node
for _, node := range cluster.Nodes {
@ -592,22 +802,22 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
if !node.State.Running {
// start node
if err := runtime.StartNode(ctx, node); err != nil {
if err := NodeStart(ctx, runtime, node, k3d.NodeStartOpts{
NodeHooks: startClusterOpts.NodeHooks,
}); err != nil {
log.Warningf("Failed to start node '%s': Try to start it manually", node.Name)
failed++
continue
}
// asynchronously wait for this server node to be ready (by checking the logs for a specific log message)
// wait for this server node to be ready (by checking the logs for a specific log message)
if node.Role == k3d.ServerRole && startClusterOpts.WaitForServer {
serverNode := node
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for server node '%s'", serverNode.Name)
return NodeWaitForLogMessage(ctx, runtime, serverNode, k3d.ReadyLogMessageByRole[k3d.ServerRole], start)
})
log.Debugf("Waiting for server node '%s' to get ready", node.Name)
if err := NodeWaitForLogMessage(ctx, runtime, node, k3d.ReadyLogMessageByRole[k3d.ServerRole], start); err != nil {
return fmt.Errorf("Server node '%s' failed to get ready: %+v", node.Name, err)
}
}
} else {
log.Infof("Node '%s' already running", node.Name)
}
@ -621,23 +831,17 @@ func ClusterStart(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Clust
log.Warningf("Failed to start serverlb '%s': Try to start it manually", serverlb.Name)
failed++
}
waitForServerWaitgroup.Go(func() error {
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", serverlb.Name)
return NodeWaitForLogMessage(ctx, runtime, serverlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start)
})
// TODO: avoid `level=fatal msg="starting kubernetes: preparing server: post join: a configuration change is already in progress (5)"`
// ... by scanning for this line in logs and restarting the container in case it appears
log.Debugf("Starting to wait for loadbalancer node '%s'", serverlb.Name)
if err := NodeWaitForLogMessage(ctx, runtime, serverlb, k3d.ReadyLogMessageByRole[k3d.LoadBalancerRole], start); err != nil {
return fmt.Errorf("Loadbalancer '%s' failed to get ready: %+v", serverlb.Name, err)
}
} else {
log.Infof("Serverlb '%s' already running", serverlb.Name)
}
}
if err := waitForServerWaitgroup.Wait(); err != nil {
log.Errorln("Failed to bring up all nodes in time. Check the logs:")
log.Errorln(">>> ", err)
return fmt.Errorf("Failed to bring up cluster")
}
if failed > 0 {
return fmt.Errorf("Failed to start %d nodes: Try to start them manually", failed)
}
@ -706,3 +910,22 @@ func prepInjectHostIP(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.C
}
}
func prepCreateLocalRegistryHostingConfigMap(ctx context.Context, runtime k3drt.Runtime, cluster *k3d.Cluster) error {
success := false
for _, node := range cluster.Nodes {
if node.Role == k3d.AgentRole || node.Role == k3d.ServerRole {
err := runtime.ExecInNode(ctx, node, []string{"sh", "-c", "kubectl apply -f /tmp/reg.yaml"})
if err == nil {
success = true
break
} else {
log.Debugf("Failed to create LocalRegistryHosting ConfigMap in node %s: %+v", node.Name, err)
}
}
}
if success == false {
log.Warnf("Failed to create LocalRegistryHosting ConfigMap")
}
return nil
}

@ -19,12 +19,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"fmt"
"github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/pkg/types"
)
// CheckName ensures that a cluster name is also a valid host name according to RFC 1123.

@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"bufio"
@ -29,9 +29,9 @@ import (
"regexp"
"runtime"
rt "github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/pkg/util"
rt "github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
log "github.com/sirupsen/logrus"
)

@ -19,7 +19,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"bytes"
@ -30,8 +30,8 @@ import (
"path"
"time"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"

@ -19,15 +19,15 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"context"
"fmt"
"strings"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)

@ -20,18 +20,19 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package cluster
package client
import (
"bytes"
"context"
"fmt"
"reflect"
"strings"
"time"
"github.com/imdario/mergo"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
@ -128,7 +129,7 @@ func NodeAddToCluster(ctx context.Context, runtime runtimes.Runtime, node *k3d.N
}
}
if err := NodeCreate(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
if err := NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
return err
}
@ -205,6 +206,40 @@ func NodeCreateMulti(ctx context.Context, runtime runtimes.Runtime, nodes []*k3d
}
// NodeRun creates and starts a node
func NodeRun(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeCreateOpts k3d.NodeCreateOpts) error {
if err := NodeCreate(ctx, runtime, node, nodeCreateOpts); err != nil {
return err
}
if err := NodeStart(ctx, runtime, node, k3d.NodeStartOpts{
Wait: nodeCreateOpts.Wait,
Timeout: nodeCreateOpts.Timeout,
}); err != nil {
return err
}
return nil
}
// NodeStart starts an existing node
func NodeStart(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, nodeStartOpts k3d.NodeStartOpts) error {
for _, hook := range nodeStartOpts.NodeHooks {
if hook.Stage == k3d.LifecycleStagePreStart {
log.Tracef("Executing preStartAction '%s'", reflect.TypeOf(hook))
if err := hook.Action.Run(ctx, node); err != nil {
log.Errorf("Failed executing preStartAction '%+v': %+v", hook, err)
}
}
}
log.Tracef("Starting node '%s'", node.Name)
if err := runtime.StartNode(ctx, node); err != nil {
log.Errorf("Failed to start node *'%s'", node.Name)
return err
}
return nil
}
// NodeCreate creates a new containerized k3s node
func NodeCreate(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, createNodeOpts k3d.NodeCreateOpts) error {
log.Tracef("Creating node from spec\n%+v", node)
@ -258,18 +293,20 @@ func NodeDelete(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) e
log.Error(err)
}
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
if err != nil {
log.Errorf("Failed to update loadbalancer: Failed to find cluster for node '%s'", node.Name)
return err
}
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
if node.Role == k3d.ServerRole || node.Role == k3d.AgentRole {
cluster, err := ClusterGet(ctx, runtime, &k3d.Cluster{Name: node.Labels[k3d.LabelClusterName]})
if err != nil {
log.Errorf("Failed to find cluster for node '%s'", node.Name)
return err
}
// if it's a server node, then update the loadbalancer configuration
if node.Role == k3d.ServerRole {
if err := UpdateLoadbalancerConfig(ctx, runtime, cluster); err != nil {
log.Errorln("Failed to update cluster loadbalancer")
return err
}
}
}
return nil
@ -293,11 +330,11 @@ func patchServerSpec(node *k3d.Node) error {
// Add labels and TLS SAN for the exposed API
// FIXME: For now, the labels concerning the API on the server nodes are only being used for configuring the kubeconfig
node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.ExposeAPI.HostIP // TODO: maybe get docker machine IP here
node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.ExposeAPI.Host
node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.ExposeAPI.Port
node.Labels[k3d.LabelServerAPIHostIP] = node.ServerOpts.KubeAPI.Binding.HostIP // TODO: maybe get docker machine IP here
node.Labels[k3d.LabelServerAPIHost] = node.ServerOpts.KubeAPI.Host
node.Labels[k3d.LabelServerAPIPort] = node.ServerOpts.KubeAPI.Binding.HostPort
node.Args = append(node.Args, "--tls-san", node.ServerOpts.ExposeAPI.Host) // add TLS SAN for non default host name
node.Args = append(node.Args, "--tls-san", node.ServerOpts.KubeAPI.Host) // add TLS SAN for non default host name
return nil
}
@ -319,6 +356,7 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
node, err := runtime.GetNode(ctx, node)
if err != nil {
log.Errorf("Failed to get node '%s'", node.Name)
return nil, err
}
return node, nil
@ -326,6 +364,7 @@ func NodeGet(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node) (*k3
//NodeWaitForLogMessage follows the logs of a node container and returns if it finds a specific line in there (or timeout is reached)
func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *k3d.Node, message string, since time.Time) error {
log.Tracef("NodeWaitForLogMessage: Node '%s' waiting for log message '%s' since '%+v'", node.Name, message, since)
for {
select {
case <-ctx.Done():
@ -356,8 +395,8 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
// check if the container is restarting
running, status, _ := runtime.GetNodeStatus(ctx, node)
if running && status == k3d.NodeStatusRestarting {
return fmt.Errorf("Node %s is restarting, early exit to avoid crash loop", node.Name)
if running && status == k3d.NodeStatusRestarting && time.Now().Sub(since) > k3d.NodeWaitForLogMessageRestartWarnTime {
log.Warnf("Node '%s' is restarting for more than a minute now. Possibly it will recover soon (e.g. when it's waiting to join). Consider using a creation timeout to avoid waiting forever in a Restart Loop.", node.Name)
}
time.Sleep(500 * time.Millisecond) // wait for half a second to avoid overloading docker (error `socket: too many open files`)
@ -365,3 +404,36 @@ func NodeWaitForLogMessage(ctx context.Context, runtime runtimes.Runtime, node *
log.Debugf("Finished waiting for log message '%s' from node '%s'", message, node.Name)
return nil
}
// NodeFilterByRoles filters a list of nodes by their roles
func NodeFilterByRoles(nodes []*k3d.Node, includeRoles, excludeRoles []k3d.Role) []*k3d.Node {
// check for conflicting filters
for _, includeRole := range includeRoles {
for _, excludeRole := range excludeRoles {
if includeRole == excludeRole {
log.Warnf("You've specified the same role ('%s') for inclusion and exclusion. Exclusion precedes inclusion.", includeRole)
}
}
}
resultList := []*k3d.Node{}
nodeLoop:
for _, node := range nodes {
// exclude > include
for _, excludeRole := range excludeRoles {
if node.Role == excludeRole {
continue nodeLoop
}
}
// include < exclude
for _, includeRole := range includeRoles {
if node.Role == includeRole {
resultList = append(resultList, node)
continue nodeLoop
}
}
}
return resultList
}

@ -0,0 +1,306 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package client
import (
"context"
"fmt"
"github.com/docker/go-connections/nat"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/types/k3s"
"github.com/rancher/k3d/v4/pkg/types/k8s"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
func RegistryRun(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Registry) (*k3d.Node, error) {
regNode, err := RegistryCreate(ctx, runtime, reg)
if err != nil {
return nil, fmt.Errorf("Failed to create registry: %+v", err)
}
if err := NodeStart(ctx, runtime, regNode, k3d.NodeStartOpts{}); err != nil {
return nil, fmt.Errorf("Failed to start registry: %+v", err)
}
return regNode, err
}
// RegistryCreate creates a registry node
func RegistryCreate(ctx context.Context, runtime runtimes.Runtime, reg *k3d.Registry) (*k3d.Node, error) {
// registry name
if len(reg.Host) == 0 {
reg.Host = k3d.DefaultRegistryName
}
// if err := ValidateHostname(reg.Host); err != nil {
// log.Errorln("Invalid name for registry")
// log.Fatalln(err)
// }
registryNode := &k3d.Node{
Name: reg.Host,
Image: reg.Image,
Role: k3d.RegistryRole,
Network: "bridge", // Default network: TODO: change to const from types
}
// error out if that registry exists already
existingNode, err := runtime.GetNode(ctx, registryNode)
if err == nil && existingNode != nil {
return nil, fmt.Errorf("A registry node with that name already exists")
}
// setup the node labels
registryNode.Labels = map[string]string{
k3d.LabelRole: string(k3d.RegistryRole),
k3d.LabelRegistryHost: reg.ExposureOpts.Host, // TODO: docker machine host?
k3d.LabelRegistryHostIP: reg.ExposureOpts.Binding.HostIP,
k3d.LabelRegistryPortExternal: reg.ExposureOpts.Binding.HostPort,
k3d.LabelRegistryPortInternal: reg.ExposureOpts.Port.Port(),
}
for k, v := range k3d.DefaultObjectLabels {
registryNode.Labels[k] = v
}
// port
registryNode.Ports = nat.PortMap{}
registryNode.Ports[reg.ExposureOpts.Port] = []nat.PortBinding{reg.ExposureOpts.Binding}
// create the registry node
log.Infof("Creating node '%s'", registryNode.Name)
if err := NodeCreate(ctx, runtime, registryNode, k3d.NodeCreateOpts{}); err != nil {
log.Errorln("Failed to create registry node")
return nil, err
}
log.Infof("Successfully created registry '%s'", registryNode.Name)
return registryNode, nil
}
// RegistryConnectClusters connects an existing registry to one or more clusters
func RegistryConnectClusters(ctx context.Context, runtime runtimes.Runtime, registryNode *k3d.Node, clusters []*k3d.Cluster) error {
// find registry node
registryNode, err := NodeGet(ctx, runtime, registryNode)
if err != nil {
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
return err
}
// get cluster details and connect
failed := 0
for _, c := range clusters {
cluster, err := ClusterGet(ctx, runtime, c)
if err != nil {
log.Warnf("Failed to connect to cluster '%s': Cluster not found", c.Name)
failed++
continue
}
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, cluster.Network.Name); err != nil {
log.Warnf("Failed to connect to cluster '%s': Connection failed", cluster.Name)
log.Warnln(err)
failed++
}
}
if failed > 0 {
return fmt.Errorf("Failed to connect to one or more clusters")
}
return nil
}
// RegistryConnectNetworks connects an existing registry to one or more networks
func RegistryConnectNetworks(ctx context.Context, runtime runtimes.Runtime, registryNode *k3d.Node, networks []string) error {
// find registry node
registryNode, err := NodeGet(ctx, runtime, registryNode)
if err != nil {
log.Errorf("Failed to find registry node '%s'", registryNode.Name)
return err
}
// get cluster details and connect
failed := 0
for _, net := range networks {
if err := runtime.ConnectNodeToNetwork(ctx, registryNode, net); err != nil {
log.Warnf("Failed to connect to network '%s': Connection failed", net)
log.Warnln(err)
failed++
}
}
if failed > 0 {
return fmt.Errorf("Failed to connect to one or more networks")
}
return nil
}
// RegistryGenerateK3sConfig generates the k3s specific registries.yaml configuration for multiple registries
func RegistryGenerateK3sConfig(ctx context.Context, registries []*k3d.Registry) (*k3s.Registry, error) {
regConf := &k3s.Registry{}
for _, reg := range registries {
internalAddress := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Port.Port())
externalAddress := fmt.Sprintf("%s:%s", reg.Host, reg.ExposureOpts.Binding.HostPort)
// init mirrors if nil
if regConf.Mirrors == nil {
regConf.Mirrors = make(map[string]k3s.Mirror)
}
regConf.Mirrors[externalAddress] = k3s.Mirror{
Endpoints: []string{
fmt.Sprintf("http://%s", internalAddress),
},
}
if reg.Options.Proxy.RemoteURL != "" {
regConf.Mirrors[reg.Options.Proxy.RemoteURL] = k3s.Mirror{
Endpoints: []string{fmt.Sprintf("http://%s", internalAddress)},
}
}
}
return regConf, nil
}
// RegistryGet gets a registry node by name and returns it as a registry object
func RegistryGet(ctx context.Context, runtime runtimes.Runtime, name string) (*k3d.Registry, error) {
regNode, err := runtime.GetNode(ctx, &k3d.Node{
Name: name,
Role: k3d.RegistryRole,
})
if err != nil {
return nil, fmt.Errorf("Failed to find registry '%s': %+v", name, err)
}
registry := &k3d.Registry{
Host: regNode.Name,
}
// TODO: finish RegistryGet
return registry, nil
}
// RegistryFromNode transforms a node spec to a registry spec
func RegistryFromNode(node *k3d.Node) (*k3d.Registry, error) {
registry := &k3d.Registry{
Host: node.Name,
Image: node.Image,
}
// we expect exactly one portmap
if len(node.Ports) != 1 {
return nil, fmt.Errorf("Failed to parse registry spec from node %+v: 0 or multiple ports defined, where one is expected", node)
}
for port, bindings := range node.Ports {
registry.ExposureOpts.Port = port
// we expect 0 or 1 binding for that port
if len(bindings) > 1 {
return nil, fmt.Errorf("Failed to parse registry spec from node %+v: Multiple bindings '%+v' specified for port '%s' where one is expected", node, bindings, port)
}
for _, binding := range bindings {
registry.ExposureOpts.Binding = binding
}
}
log.Tracef("Got registry %+v from node %+v", registry, node)
return registry, nil
}
// RegistryGenerateLocalRegistryHostingConfigMapYAML generates a ConfigMap used to advertise the registries in the cluster
func RegistryGenerateLocalRegistryHostingConfigMapYAML(ctx context.Context, registries []*k3d.Registry) ([]byte, error) {
type cmMetadata struct {
Name string `yaml:"name"`
Namespace string `yaml:"namespace"`
}
type cmData struct {
RegHostV1 string `yaml:"localRegistryHosting.v1"`
}
type configmap struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Metadata cmMetadata `yaml:"metadata"`
Data cmData `yaml:"data"`
}
if len(registries) > 1 {
log.Warnf("More than one registry specified, but the LocalRegistryHostingV1 spec only supports one -> Selecting the first one: %s", registries[0].Host)
}
if len(registries) < 1 {
log.Debugln("No registry specified, not generating local registry hosting configmap")
return nil, nil
}
host := registries[0].ExposureOpts.Host
if host == "" {
host = registries[0].ExposureOpts.Binding.HostIP
}
dat, err := yaml.Marshal(
k8s.LocalRegistryHostingV1{
Host: fmt.Sprintf("%s:%s", host, registries[0].ExposureOpts.Binding.HostPort),
HostFromContainerRuntime: fmt.Sprintf("%s:%s", registries[0].Host, registries[0].ExposureOpts.Port.Port()),
Help: "https://k3d.io/usage/guides/registries/#using-a-local-registry",
},
)
if err != nil {
return nil, err
}
cm := configmap{
APIVersion: "v1",
Kind: "ConfigMap",
Metadata: cmMetadata{
Name: "local-registry-hosting",
Namespace: "kube-public",
},
Data: cmData{
RegHostV1: string(dat),
},
}
cmYaml, err := yaml.Marshal(cm)
if err != nil {
return nil, err
}
log.Tracef("LocalRegistryHostingConfigMapYaml: %s", string(cmYaml))
return cmYaml, nil
}

@ -0,0 +1,66 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package client
import (
"context"
"strings"
"testing"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func TestRegistryGenerateLocalRegistryHostingConfigMapYAML(t *testing.T) {
var err error
expectedYAMLString := `apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: test-host:5432
hostFromContainerRuntime: test-host:1234
help: https://k3d.io/usage/guides/registries/#using-a-local-registry
`
reg := &k3d.Registry{
Host: "test-host",
}
reg.ExposureOpts.Host = "test-host"
reg.ExposureOpts.Port = nat.Port("1234/tcp")
reg.ExposureOpts.Binding.HostPort = "5432"
regs := []*k3d.Registry{reg}
cm, err := RegistryGenerateLocalRegistryHostingConfigMapYAML(context.Background(), regs)
if err != nil {
t.Error(err)
}
if !(strings.TrimSpace(string(cm)) == strings.TrimSpace(expectedYAMLString)) {
t.Errorf("Computed configmap\n-> Actual: %s\n does not match expected YAML\n-> Expected: %s", strings.TrimSpace(string(cm)), strings.TrimSpace(expectedYAMLString))
}
}

@ -0,0 +1 @@
{"host":"test-host:5432","hostFromContainerRuntime":"test-host:1234","help":"https://k3d.io/usage/guides/registries/#using-a-local-registry"}

@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-test-registry
labels:
app: nginx-test-registry
spec:
replicas: 1
selector:
matchLabels:
app: nginx-test-registry
template:
metadata:
labels:
app: nginx-test-registry
spec:
containers:
- name: nginx-test-registry
image: k3d-newreg/alpine:test
ports:
- containerPort: 80

@ -0,0 +1,83 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"fmt"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func ReadConfig(file string) (conf.Config, error) {
cfgViper := viper.New()
cfgViper.SetConfigFile(file)
cfgViper.SetConfigType("yaml")
cfgViper.SetEnvPrefix(k3d.DefaultObjectNamePrefix)
cfgViper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
cfgViper.AutomaticEnv()
// try to read config into memory (viper map structure)
if err := cfgViper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
log.Errorln("No config file found!")
return nil, err
}
// config file found but some other error happened
log.Debugf("Failed to read config file: %+v", cfgViper.ConfigFileUsed())
return nil, err
}
var cfg conf.Config
// determine config kind
switch strings.ToLower(cfgViper.GetString("kind")) {
case "simple":
cfg = conf.SimpleConfig{}
case "cluster":
cfg = conf.ClusterConfig{}
case "clusterlist":
cfg = conf.ClusterListConfig{}
case "":
return nil, fmt.Errorf("Missing `kind` in config file")
default:
return nil, fmt.Errorf("Unknown `kind` '%s' in config file", cfgViper.GetString("kind"))
}
if err := cfgViper.Unmarshal(&cfg); err != nil {
log.Errorln("Failed to unmarshal File config")
return nil, err
}
log.Infof("Using Config: %s", cfgViper.ConfigFileUsed())
return cfg, nil
}

@ -0,0 +1,201 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"testing"
"time"
"github.com/go-test/deep"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func TestReadSimpleConfig(t *testing.T) {
exposedAPI := conf.SimpleExposureOpts{}
exposedAPI.HostIP = "0.0.0.0"
exposedAPI.HostPort = "6443"
expectedConfig := conf.SimpleConfig{
TypeMeta: conf.TypeMeta{
APIVersion: "k3d.io/v1alpha1",
Kind: "Simple",
},
Name: "test",
Servers: 1,
Agents: 2,
ExposeAPI: exposedAPI,
Image: "rancher/k3s:latest",
Volumes: []conf.VolumeWithNodeFilters{
{
Volume: "/my/path:/some/path",
NodeFilters: []string{"all"},
},
},
Ports: []conf.PortWithNodeFilters{
{
Port: "80:80",
NodeFilters: []string{"loadbalancer"},
}, {
Port: "0.0.0.0:443:443",
NodeFilters: []string{"loadbalancer"},
},
},
Labels: []conf.LabelWithNodeFilters{
{
Label: "foo=bar",
NodeFilters: []string{"server[0]", "loadbalancer"},
},
},
Env: []conf.EnvVarWithNodeFilters{
{
EnvVar: "bar=baz",
NodeFilters: []string{"all"},
},
},
Options: conf.SimpleConfigOptions{
K3dOptions: conf.SimpleConfigOptionsK3d{
Wait: true,
Timeout: 60 * time.Second,
DisableLoadbalancer: false,
DisableImageVolume: false,
},
K3sOptions: conf.SimpleConfigOptionsK3s{
ExtraServerArgs: []string{"--tls-san=127.0.0.1"},
ExtraAgentArgs: []string{},
},
KubeconfigOptions: conf.SimpleConfigOptionsKubeconfig{
UpdateDefaultKubeconfig: true,
SwitchCurrentContext: true,
},
},
}
cfgFile := "./test_assets/config_test_simple.yaml"
cfg, err := ReadConfig(cfgFile)
if err != nil {
t.Error(err)
}
simpleCfg, ok := cfg.(conf.SimpleConfig)
if !ok {
t.Error("Config is not of type SimpleConfig")
}
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", simpleCfg)
if diff := deep.Equal(simpleCfg, expectedConfig); diff != nil {
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", simpleCfg, expectedConfig, diff)
}
}
func TestReadClusterConfig(t *testing.T) {
expectedConfig := conf.ClusterConfig{
TypeMeta: conf.TypeMeta{
APIVersion: "k3d.io/v1alpha1",
Kind: "Cluster",
},
Cluster: k3d.Cluster{
Name: "foo",
Nodes: []*k3d.Node{
{
Name: "foo-node-0",
Role: k3d.ServerRole,
},
},
},
}
cfgFile := "./test_assets/config_test_cluster.yaml"
readConfig, err := ReadConfig(cfgFile)
if err != nil {
t.Fatal(err)
}
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", readConfig)
if diff := deep.Equal(readConfig, expectedConfig); diff != nil {
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", readConfig, expectedConfig, diff)
}
}
func TestReadClusterListConfig(t *testing.T) {
expectedConfig := conf.ClusterListConfig{
TypeMeta: conf.TypeMeta{
APIVersion: "k3d.io/v1alpha1",
Kind: "ClusterList",
},
Clusters: []k3d.Cluster{
{
Name: "foo",
Nodes: []*k3d.Node{
{
Name: "foo-node-0",
Role: k3d.ServerRole,
},
},
},
{
Name: "bar",
Nodes: []*k3d.Node{
{
Name: "bar-node-0",
Role: k3d.ServerRole,
},
},
},
},
}
cfgFile := "./test_assets/config_test_cluster_list.yaml"
readConfig, err := ReadConfig(cfgFile)
if err != nil {
t.Fatal(err)
}
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", readConfig)
if diff := deep.Equal(readConfig, expectedConfig); diff != nil {
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", readConfig, expectedConfig, diff)
}
}
func TestReadUnknownConfig(t *testing.T) {
cfgFile := "./test_assets/config_test_unknown.yaml"
_, err := ReadConfig(cfgFile)
if err == nil {
t.Fail()
}
}

@ -0,0 +1,42 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"github.com/imdario/mergo"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
log "github.com/sirupsen/logrus"
)
// MergeSimple merges two simple configuration files with the values of the destination one having priority
func MergeSimple(dest, src conf.SimpleConfig) (*conf.SimpleConfig, error) {
log.Debugf("Merging %+v into %+v", src, dest)
if err := mergo.Merge(&dest, src); err != nil {
log.Errorln("Failed to merge config")
return nil, err
}
return &dest, nil
}

@ -0,0 +1,59 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"testing"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"gotest.tools/assert"
)
func TestMergeSimpleConfig(t *testing.T) {
srcConfig := "./test_assets/config_test_simple.yaml"
destConfig := "./test_assets/config_test_simple_2.yaml"
var src, dest conf.Config
var err error
if src, err = ReadConfig(srcConfig); err != nil {
t.Fatal(err)
}
if dest, err = ReadConfig(destConfig); err != nil {
t.Fatal(err)
}
mergedConfig, err := MergeSimple(dest.(conf.SimpleConfig), src.(conf.SimpleConfig))
if err != nil {
t.Fatal(err)
}
// ensure that we get the two filled fields of destConfig
assert.Equal(t, mergedConfig.Name, dest.(conf.SimpleConfig).Name)
assert.Equal(t, mergedConfig.Agents, dest.(conf.SimpleConfig).Agents)
// ensure that we get the other fields from the srcConfig (only checking two of them here)
assert.Equal(t, mergedConfig.Servers, src.(conf.SimpleConfig).Servers)
assert.Equal(t, mergedConfig.Image, src.(conf.SimpleConfig).Image)
}

@ -0,0 +1,6 @@
apiVersion: k3d.io/v1alpha1
kind: Cluster
name: foo
nodes:
- name: foo-node-0
role: server

@ -0,0 +1,12 @@
---
apiVersion: k3d.io/v1alpha1
kind: ClusterList
clusters:
- name: foo
nodes:
- name: foo-node-0
role: server
- name: bar
nodes:
- name: bar-node-0
role: server

@ -0,0 +1,43 @@
apiVersion: k3d.io/v1alpha1
kind: Simple
name: test
servers: 1
agents: 2
kubeAPI:
hostIP: "0.0.0.0"
hostPort: "6443"
image: rancher/k3s:latest
volumes:
- volume: /my/path:/some/path
nodeFilters:
- all
ports:
- port: 80:80
nodeFilters:
- loadbalancer
- port: 0.0.0.0:443:443
nodeFilters:
- loadbalancer
env:
- envVar: bar=baz
nodeFilters:
- all
labels:
- label: foo=bar
nodeFilters:
- server[0]
- loadbalancer
options:
k3d:
wait: true
timeout: "60s"
disableLoadbalancer: false
disableImageVolume: false
k3s:
extraServerArgs:
- --tls-san=127.0.0.1
extraAgentArgs: []
kubeconfig:
updateDefaultKubeconfig: true
switchCurrentContext: true

@ -0,0 +1,4 @@
apiVersion: k3d.io/v1alpha1
kind: Simple
name: supertest
agents: 8

@ -0,0 +1,3 @@
apiVersion: k3d.io/v1alpha1
kind: Unknown
foo: bar

@ -0,0 +1,271 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"context"
"fmt"
"github.com/docker/go-connections/nat"
cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)
// TransformSimpleToClusterConfig transforms a simple configuration to a full-fledged cluster configuration
func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtime, simpleConfig conf.SimpleConfig) (*conf.ClusterConfig, error) {
// set default cluster name
if simpleConfig.Name == "" {
simpleConfig.Name = k3d.DefaultClusterName
}
// fetch latest image
if simpleConfig.Image == "latest" {
simpleConfig.Image = version.GetK3sVersion(true)
}
clusterNetwork := k3d.ClusterNetwork{}
if simpleConfig.Network != "" {
clusterNetwork.Name = simpleConfig.Network
clusterNetwork.External = true
}
// -> API
if simpleConfig.ExposeAPI.Host == "" {
simpleConfig.ExposeAPI.Host = k3d.DefaultAPIHost
}
if simpleConfig.ExposeAPI.HostIP == "" {
simpleConfig.ExposeAPI.HostIP = k3d.DefaultAPIHost
}
kubeAPIExposureOpts := &k3d.ExposureOpts{
Host: simpleConfig.ExposeAPI.Host,
}
kubeAPIExposureOpts.Port = k3d.DefaultAPIPort
kubeAPIExposureOpts.Binding = nat.PortBinding{
HostIP: simpleConfig.ExposeAPI.HostIP,
HostPort: simpleConfig.ExposeAPI.HostPort,
}
// FILL CLUSTER CONFIG
newCluster := k3d.Cluster{
Name: simpleConfig.Name,
Network: clusterNetwork,
Token: simpleConfig.ClusterToken,
KubeAPI: kubeAPIExposureOpts,
}
// -> NODES
newCluster.Nodes = []*k3d.Node{}
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
newCluster.ServerLoadBalancer = &k3d.Node{
Role: k3d.LoadBalancerRole,
}
}
/*************
* Add Nodes *
*************/
for i := 0; i < simpleConfig.Servers; i++ {
serverNode := k3d.Node{
Role: k3d.ServerRole,
Image: simpleConfig.Image,
Args: simpleConfig.Options.K3sOptions.ExtraServerArgs,
ServerOpts: k3d.ServerOpts{},
}
// first server node will be init node if we have more than one server specified but no external datastore
if i == 0 && simpleConfig.Servers > 1 {
serverNode.ServerOpts.IsInit = true
newCluster.InitNode = &serverNode
}
newCluster.Nodes = append(newCluster.Nodes, &serverNode)
}
for i := 0; i < simpleConfig.Agents; i++ {
agentNode := k3d.Node{
Role: k3d.AgentRole,
Image: simpleConfig.Image,
Args: simpleConfig.Options.K3sOptions.ExtraAgentArgs,
}
newCluster.Nodes = append(newCluster.Nodes, &agentNode)
}
/****************************
* Extra Node Configuration *
****************************/
// -> VOLUMES
nodeCount := simpleConfig.Servers + simpleConfig.Agents
nodeList := newCluster.Nodes
if !simpleConfig.Options.K3dOptions.DisableLoadbalancer {
nodeCount++
nodeList = append(nodeList, newCluster.ServerLoadBalancer)
}
for _, volumeWithNodeFilters := range simpleConfig.Volumes {
nodes, err := util.FilterNodes(newCluster.Nodes, volumeWithNodeFilters.NodeFilters)
if err != nil {
return nil, err
}
for _, node := range nodes {
node.Volumes = append(node.Volumes, volumeWithNodeFilters.Volume)
}
}
// -> PORTS
for _, portWithNodeFilters := range simpleConfig.Ports {
if len(portWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
return nil, fmt.Errorf("Portmapping '%s' lacks a node filter, but there's more than one node", portWithNodeFilters.Port)
}
nodes, err := util.FilterNodes(nodeList, portWithNodeFilters.NodeFilters)
if err != nil {
return nil, err
}
for _, node := range nodes {
portmappings, err := nat.ParsePortSpec(portWithNodeFilters.Port)
if err != nil {
return nil, fmt.Errorf("Failed to parse port spec '%s': %+v", portWithNodeFilters.Port, err)
}
if node.Ports == nil {
node.Ports = nat.PortMap{}
}
for _, pm := range portmappings {
if _, exists := node.Ports[pm.Port]; exists {
node.Ports[pm.Port] = append(node.Ports[pm.Port], pm.Binding)
} else {
node.Ports[pm.Port] = []nat.PortBinding{pm.Binding}
}
}
}
}
// -> LABELS
for _, labelWithNodeFilters := range simpleConfig.Labels {
if len(labelWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
return nil, fmt.Errorf("Labelmapping '%s' lacks a node filter, but there's more than one node", labelWithNodeFilters.Label)
}
nodes, err := util.FilterNodes(nodeList, labelWithNodeFilters.NodeFilters)
if err != nil {
return nil, err
}
for _, node := range nodes {
if node.Labels == nil {
node.Labels = make(map[string]string) // ensure that the map is initialized
}
k, v := util.SplitLabelKeyValue(labelWithNodeFilters.Label)
node.Labels[k] = v
}
}
// -> ENV
for _, envVarWithNodeFilters := range simpleConfig.Env {
if len(envVarWithNodeFilters.NodeFilters) == 0 && nodeCount > 1 {
return nil, fmt.Errorf("EnvVarMapping '%s' lacks a node filter, but there's more than one node", envVarWithNodeFilters.EnvVar)
}
nodes, err := util.FilterNodes(nodeList, envVarWithNodeFilters.NodeFilters)
if err != nil {
return nil, err
}
for _, node := range nodes {
node.Env = append(node.Env, envVarWithNodeFilters.EnvVar)
}
}
/**************************
* Cluster Create Options *
**************************/
clusterCreateOpts := k3d.ClusterCreateOpts{
DisableImageVolume: simpleConfig.Options.K3dOptions.DisableImageVolume,
WaitForServer: simpleConfig.Options.K3dOptions.Wait,
Timeout: simpleConfig.Options.K3dOptions.Timeout,
DisableLoadBalancer: simpleConfig.Options.K3dOptions.DisableLoadbalancer,
K3sServerArgs: simpleConfig.Options.K3sOptions.ExtraServerArgs,
K3sAgentArgs: simpleConfig.Options.K3sOptions.ExtraAgentArgs,
GlobalLabels: map[string]string{}, // empty init
GlobalEnv: []string{}, // empty init
}
// ensure, that we have the default object labels
for k, v := range k3d.DefaultObjectLabels {
clusterCreateOpts.GlobalLabels[k] = v
}
/*
* Registries
*/
if simpleConfig.Registries.Create {
regPort, err := cliutil.ParsePortExposureSpec("random", k3d.DefaultRegistryPort)
if err != nil {
return nil, fmt.Errorf("Failed to get port for registry: %+v", err)
}
clusterCreateOpts.Registries.Create = &k3d.Registry{
Host: fmt.Sprintf("%s-%s-registry", k3d.DefaultObjectNamePrefix, newCluster.Name),
Image: fmt.Sprintf("%s:%s", k3d.DefaultRegistryImageRepo, k3d.DefaultRegistryImageTag),
ExposureOpts: *regPort,
}
}
for _, usereg := range simpleConfig.Registries.Use {
reg, err := util.ParseRegistryRef(usereg)
if err != nil {
return nil, fmt.Errorf("Failed to parse use-registry string '%s': %+v", usereg, err)
}
log.Tracef("Parsed registry reference: %+v", reg)
clusterCreateOpts.Registries.Use = append(clusterCreateOpts.Registries.Use, reg)
}
/**********************
* Kubeconfig Options *
**********************/
// Currently, the kubeconfig options for the cluster config are the same as for the simple config
/******************************
* Create Full Cluster Config *
******************************/
clusterConfig := &conf.ClusterConfig{
Cluster: newCluster,
ClusterCreateOpts: clusterCreateOpts,
KubeconfigOpts: simpleConfig.Options.KubeconfigOptions,
}
return clusterConfig, nil
}

@ -0,0 +1,55 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"context"
"testing"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"github.com/rancher/k3d/v4/pkg/runtimes"
)
func TestTransformSimpleConfigToClusterConfig(t *testing.T) {
cfgFile := "./test_assets/config_test_simple.yaml"
cfg, err := ReadConfig(cfgFile)
if err != nil {
t.Error(err)
}
simpleCfg, ok := cfg.(conf.SimpleConfig)
if !ok {
t.Error("Config is not of type SimpleConfig")
}
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", simpleCfg)
clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, simpleCfg)
if err != nil {
t.Error(err)
}
t.Logf("\n===== Resulting Cluster Config =====\n%+v\n===============\n", clusterCfg)
}

@ -0,0 +1,167 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package v1alpha1
import (
"fmt"
"time"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
)
// DefaultConfigTpl for printing
const DefaultConfigTpl = `---
apiVersion: k3d.io/v1alpha1
kind: Simple
name: %s
servers: 1
agents: 0
image: %s
`
// DefaultConfig templated DefaultConfigTpl
var DefaultConfig = fmt.Sprintf(
DefaultConfigTpl,
k3d.DefaultClusterName,
fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)),
)
// TypeMeta, basically copied from https://github.com/kubernetes/apimachinery/blob/a3b564b22db316a41e94fdcffcf9995424fe924c/pkg/apis/meta/v1/types.go#L36-L56
type TypeMeta struct {
Kind string `mapstructure:"kind,omitempty" yaml:"kind,omitempty" json:"kind,omitempty"`
APIVersion string `mapstructure:"apiVersion,omitempty" yaml:"apiVersion,omitempty" json:"apiVersion,omitempty"`
}
// Config interface.
type Config interface {
GetKind() string
}
type VolumeWithNodeFilters struct {
Volume string `mapstructure:"volume" yaml:"volume" json:"volume,omitempty"`
NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"`
}
type PortWithNodeFilters struct {
Port string `mapstructure:"port" yaml:"port" json:"port,omitempty"`
NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"`
}
type LabelWithNodeFilters struct {
Label string `mapstructure:"label" yaml:"label" json:"label,omitempty"`
NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"`
}
type EnvVarWithNodeFilters struct {
EnvVar string `mapstructure:"envVar" yaml:"envVar" json:"envVar,omitempty"`
NodeFilters []string `mapstructure:"nodeFilters" yaml:"nodeFilters" json:"nodeFilters,omitempty"`
}
// SimpleConfigOptionsKubeconfig describes the set of options referring to the kubeconfig during cluster creation.
type SimpleConfigOptionsKubeconfig struct {
UpdateDefaultKubeconfig bool `mapstructure:"updateDefaultKubeconfig" yaml:"updateDefaultKubeconfig" json:"updateDefaultKubeconfig,omitempty"` // default: true
SwitchCurrentContext bool `mapstructure:"switchCurrentContext" yaml:"switchCurrentContext" json:"switchCurrentContext,omitempty"` //nolint:lll // default: true
}
type SimpleConfigOptions struct {
K3dOptions SimpleConfigOptionsK3d `mapstructure:"k3d" yaml:"k3d"`
K3sOptions SimpleConfigOptionsK3s `mapstructure:"k3s" yaml:"k3s"`
KubeconfigOptions SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"`
Runtime SimpleConfigOptionsRuntime `mapstructure:"runtime" yaml:"runtime"`
}
type SimpleConfigOptionsRuntime struct {
GPURequest string `mapstructure:"gpuRequest" yaml:"gpuRequest"`
}
type SimpleConfigOptionsK3d struct {
Wait bool `mapstructure:"wait" yaml:"wait"`
Timeout time.Duration `mapstructure:"timeout" yaml:"timeout"`
DisableLoadbalancer bool `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer"`
DisableImageVolume bool `mapstructure:"disableImageVolume" yaml:"disableImageVolume"`
NoRollback bool `mapstructure:"noRollback" yaml:"noRollback"`
PrepDisableHostIPInjection bool `mapstructure:"prepDisableHostIPInjection" yaml:"prepDisableHostIPInjection"`
NodeHookActions []k3d.NodeHookAction `mapstructure:"nodeHookActions" yaml:"nodeHookActions,omitempty"`
}
type SimpleConfigOptionsK3s struct {
ExtraServerArgs []string `mapstructure:"extraServerArgs" yaml:"extraServerArgs"`
ExtraAgentArgs []string `mapstructure:"extraAgentArgs" yaml:"extraAgentArgs"`
}
// SimpleConfig describes the toplevel k3d configuration file.
type SimpleConfig struct {
TypeMeta `mapstructure:",squash" yaml:",inline"`
Name string `mapstructure:"name" yaml:"name" json:"name,omitempty"`
Servers int `mapstructure:"servers" yaml:"servers" json:"servers,omitempty"` //nolint:lll // default 1
Agents int `mapstructure:"agents" yaml:"agents" json:"agents,omitempty"` //nolint:lll // default 0
ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"`
Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"`
Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"`
ClusterToken string `mapstructure:"clusterToken" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated
Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"`
Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"`
Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"`
Options SimpleConfigOptions `mapstructure:"options" yaml:"options" json:"options,omitempty"`
Env []EnvVarWithNodeFilters `mapstructure:"env" yaml:"env" json:"env,omitempty"`
Registries struct {
Use []string `mapstructure:"use" yaml:"use,omitempty" json:"use,omitempty"`
Create bool `mapstructure:"create" yaml:"create,omitempty" json:"create,omitempty"`
} `mapstructure:"registries" yaml:"registries,omitempty" json:"registries,omitempty"`
}
// SimpleExposureOpts provides a simplified syntax compared to the original k3d.ExposureOpts
type SimpleExposureOpts struct {
Host string `mapstructure:"host" yaml:"host,omitempty" json:"host,omitempty"`
HostIP string `mapstructure:"hostIP" yaml:"hostIP,omitempty" json:"hostIP,omitempty"`
HostPort string `mapstructure:"hostPort" yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
}
// GetKind implements Config.GetKind
func (c SimpleConfig) GetKind() string {
return "Cluster"
}
// ClusterConfig describes a single cluster config
type ClusterConfig struct {
TypeMeta `mapstructure:",squash" yaml:",inline"`
Cluster k3d.Cluster `mapstructure:",squash" yaml:",inline"`
ClusterCreateOpts k3d.ClusterCreateOpts `mapstructure:"options" yaml:"options"`
KubeconfigOpts SimpleConfigOptionsKubeconfig `mapstructure:"kubeconfig" yaml:"kubeconfig"`
}
// GetKind implements Config.GetKind
func (c ClusterConfig) GetKind() string {
return "Cluster"
}
// ClusterListConfig describes a list of clusters
type ClusterListConfig struct {
TypeMeta `mapstructure:",squash" yaml:",inline"`
Clusters []k3d.Cluster `mapstructure:"clusters" yaml:"clusters"`
}
func (c ClusterListConfig) GetKind() string {
return "ClusterList"
}

@ -0,0 +1,84 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"context"
"time"
k3dc "github.com/rancher/k3d/v4/pkg/client"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/pkg/util"
"fmt"
log "github.com/sirupsen/logrus"
)
// ValidateClusterConfig checks a given cluster config for basic errors
func ValidateClusterConfig(ctx context.Context, runtime runtimes.Runtime, config conf.ClusterConfig) error {
// cluster name must be a valid host name
if err := k3dc.CheckName(config.Cluster.Name); err != nil {
log.Errorf("Provided cluster name '%s' does not match requirements", config.Cluster.Name)
return err
}
// network:: edge case: hostnetwork -> only if we have a single node (to avoid port collisions)
if config.Cluster.Network.Name == "host" && len(config.Cluster.Nodes) > 1 {
return fmt.Errorf("Can only use hostnetwork mode with a single node (port collisions, etc.)")
}
// timeout can't be negative
if config.ClusterCreateOpts.Timeout < 0*time.Second {
return fmt.Errorf("Timeout may not be negative (is '%s')", config.ClusterCreateOpts.Timeout)
}
// API-Port cannot be changed when using network=host
if config.Cluster.Network.Name == "host" && config.Cluster.KubeAPI.Port.Port() != k3d.DefaultAPIPort {
// in hostNetwork mode, we're not going to map a hostport. Here it should always use 6443.
// Note that hostNetwork mode is super inflexible and since we don't change the backend port (on the container), it will only be one hostmode cluster allowed.
return fmt.Errorf("The API Port can not be changed when using 'host' network")
}
// validate nodes one by one
for _, node := range config.Cluster.Nodes {
// node names have to be valid hostnames // TODO: validate hostnames once we generate them before this step
/*if err := k3dc.CheckName(node.Name); err != nil {
return err
}*/
// volumes have to be either an existing path on the host or a named runtime volume
for _, volume := range node.Volumes {
if err := util.ValidateVolumeMount(runtime, volume); err != nil {
return err
}
}
}
return nil
}

@ -0,0 +1,44 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package config
import (
"context"
"testing"
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
"github.com/rancher/k3d/v4/pkg/runtimes"
)
func TestValidateClusterConfig(t *testing.T) {
cfgFile := "./test_assets/config_test_cluster.yaml"
cfg, err := ReadConfig(cfgFile)
if err != nil {
t.Error(err)
}
if err := ValidateClusterConfig(context.Background(), runtimes.Docker, cfg.(conf.ClusterConfig)); err != nil {
t.Error(err)
}
}

@ -26,7 +26,7 @@ import (
"context"
"io"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// GetKubeconfig grabs the kubeconfig from inside a k3d node

@ -21,7 +21,11 @@ THE SOFTWARE.
*/
package containerd
import "context"
import (
"context"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// CreateNetworkIfNotPresent creates a new docker network
func (d Containerd) CreateNetworkIfNotPresent(ctx context.Context, name string) (string, bool, error) {
@ -32,3 +36,13 @@ func (d Containerd) CreateNetworkIfNotPresent(ctx context.Context, name string)
func (d Containerd) DeleteNetwork(ctx context.Context, ID string) error {
return nil
}
// ConnectNodeToNetwork connects a node to a network
func (d Containerd) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, network string) error {
return nil
}
// DisconnectNodeFromNetwork disconnects a node from a network (u don't say :O)
func (d Containerd) DisconnectNodeFromNetwork(ctx context.Context, node *k3d.Node, network string) error {
return nil
}

@ -30,7 +30,7 @@ import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -137,3 +137,8 @@ func (d Containerd) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string
func (d Containerd) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []string) (*bufio.Reader, error) {
return nil, nil
}
// GetNodesInNetwork returns all the nodes connected to a given network
func (d Containerd) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.Node, error) {
return nil, nil
}

@ -24,10 +24,15 @@ package containerd
import (
"context"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// CopyToNode copies a file from the local FS to the selected node
func (d Containerd) CopyToNode(ctx context.Context, src string, dest string, node *k3d.Node) error {
return nil
}
// WriteToNode writes a byte array to the selected node
func (d Containerd) WriteToNode(ctx context.Context, content []byte, dest string, node *k3d.Node) error {
return nil
}

@ -33,12 +33,12 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// createContainer creates a new docker container from translated specs
func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string) error {
func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string) (string, error) {
log.Tracef("Creating docker container with translated config\n%+v\n", dockerNode)
@ -46,7 +46,7 @@ func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string)
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
return "", err
}
defer docker.Close()
@ -58,24 +58,30 @@ func createContainer(ctx context.Context, dockerNode *NodeInDocker, name string)
if client.IsErrNotFound(err) {
if err := pullImage(ctx, docker, dockerNode.ContainerConfig.Image); err != nil {
log.Errorf("Failed to create container '%s'", name)
return err
return "", err
}
continue
}
log.Errorf("Failed to create container '%s'", name)
return err
return "", err
}
log.Debugf("Created container %s (ID: %s)", name, resp.ID)
break
}
// start container
if err := docker.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
log.Errorln("Failed to start container")
return resp.ID, nil
}
func startContainer(ctx context.Context, ID string) error {
// initialize docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
}
defer docker.Close()
return nil
return docker.ContainerStart(ctx, ID, types.ContainerStartOptions{})
}
// removeContainer deletes a running container (like docker rm -f)

@ -27,7 +27,7 @@ import (
"io"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)

@ -25,12 +25,15 @@ import (
"context"
"fmt"
"net"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/v3/pkg/types"
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -93,7 +96,13 @@ func (d Docker) DeleteNetwork(ctx context.Context, ID string) error {
defer docker.Close()
// (3) delete network
return docker.NetworkRemove(ctx, ID)
if err := docker.NetworkRemove(ctx, ID); err != nil {
if strings.HasSuffix(err.Error(), "active endpoints") {
return runtimeErr.ErrRuntimeNetworkNotEmpty
}
return err
}
return nil
}
// GetNetwork gets information about a network by its ID
@ -119,3 +128,56 @@ func GetGatewayIP(ctx context.Context, network string) (net.IP, error) {
return gatewayIP, nil
}
// ConnectNodeToNetwork connects a node to a network
func (d Docker) ConnectNodeToNetwork(ctx context.Context, node *k3d.Node, networkName string) error {
// get container
container, err := getNodeContainer(ctx, node)
if err != nil {
return err
}
// get docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
}
defer docker.Close()
// get network
networkResource, err := GetNetwork(ctx, networkName)
if err != nil {
log.Errorf("Failed to get network '%s'", networkName)
return err
}
// connect container to network
return docker.NetworkConnect(ctx, networkResource.ID, container.ID, &network.EndpointSettings{})
}
// DisconnectNodeFromNetwork disconnects a node from a network (u don't say :O)
func (d Docker) DisconnectNodeFromNetwork(ctx context.Context, node *k3d.Node, networkName string) error {
// get container
container, err := getNodeContainer(ctx, node)
if err != nil {
return err
}
// get docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
}
defer docker.Close()
// get network
networkResource, err := GetNetwork(ctx, networkName)
if err != nil {
log.Errorf("Failed to get network '%s'", networkName)
return err
}
return docker.NetworkDisconnect(ctx, networkResource.ID, container.ID, true)
}

@ -25,6 +25,7 @@ package docker
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@ -33,7 +34,8 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/v3/pkg/types"
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -48,7 +50,8 @@ func (d Docker) CreateNode(ctx context.Context, node *k3d.Node) error {
}
// create node
if err := createContainer(ctx, dockerNode, node.Name); err != nil {
_, err = createContainer(ctx, dockerNode, node.Name)
if err != nil {
log.Errorf("Failed to create node '%s'", node.Name)
return err
}
@ -58,6 +61,7 @@ func (d Docker) CreateNode(ctx context.Context, node *k3d.Node) error {
// DeleteNode deletes a node
func (d Docker) DeleteNode(ctx context.Context, nodeSpec *k3d.Node) error {
log.Debugf("Deleting node %s ...", nodeSpec.Name)
return removeContainer(ctx, nodeSpec.Name)
}
@ -217,7 +221,7 @@ func (d Docker) GetNode(ctx context.Context, node *k3d.Node) (*k3d.Node, error)
node, err = TranslateContainerDetailsToNode(containerDetails)
if err != nil {
log.Errorf("Failed to translate container details for node '%s' to node object", node.Name)
log.Errorf("Failed to translate container '%s' to node object", containerDetails.Name)
return node, err
}
@ -283,7 +287,7 @@ func (d Docker) GetNodeLogs(ctx context.Context, node *k3d.Node, since time.Time
containerInspectResponse, err := docker.ContainerInspect(ctx, container.ID)
if err != nil {
log.Errorf("Failed to inspect container '%s'", container.ID)
log.Errorf("Failed to inspect node '%s'(ID %s)", node.Name, container.ID)
return nil, err
}
@ -320,12 +324,14 @@ func (d Docker) ExecInNodeGetLogs(ctx context.Context, node *k3d.Node, cmd []str
func (d Docker) ExecInNode(ctx context.Context, node *k3d.Node, cmd []string) error {
execConnection, err := executeInNode(ctx, node, cmd)
if err != nil {
logs, err := ioutil.ReadAll(execConnection.Reader)
if err != nil {
log.Errorf("Failed to get logs from errored exec process in node '%s'", node.Name)
return err
if execConnection != nil && execConnection.Reader != nil {
logs, err := ioutil.ReadAll(execConnection.Reader)
if err != nil {
log.Errorf("Failed to get logs from errored exec process in node '%s'", node.Name)
return err
}
err = fmt.Errorf("%w: Logs from failed access process:\n%s", err, string(logs))
}
err = fmt.Errorf("%w: Logs from failed access process:\n%s", err, string(logs))
}
return err
}
@ -393,10 +399,44 @@ func executeInNode(ctx context.Context, node *k3d.Node, cmd []string) (*types.Hi
if execInfo.ExitCode == 0 { // success
log.Debugf("Exec process in node '%s' exited with '0'", node.Name)
return &execConnection, nil
} else { // failed
return &execConnection, fmt.Errorf("Exec process in node '%s' failed with exit code '%d'", node.Name, execInfo.ExitCode)
}
return &execConnection, fmt.Errorf("Exec process in node '%s' failed with exit code '%d'", node.Name, execInfo.ExitCode)
}
}
// GetNodesInNetwork returns all the nodes connected to a given network
func (d Docker) GetNodesInNetwork(ctx context.Context, network string) ([]*k3d.Node, error) {
// create docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return nil, err
}
defer docker.Close()
net, err := GetNetwork(ctx, network)
if err != nil {
return nil, err
}
connectedNodes := []*k3d.Node{}
// loop over list of containers connected to this cluster and transform them into nodes internally
for cID := range net.Containers {
containerDetails, err := getContainerDetails(ctx, cID)
if err != nil {
return nil, err
}
node, err := TranslateContainerDetailsToNode(containerDetails)
if err != nil {
if errors.Is(err, runtimeErr.ErrRuntimeContainerUnknown) {
log.Tracef("GetNodesInNetwork: inspected non-k3d-managed container %s", containerDetails.Name)
continue
}
return nil, err
}
connectedNodes = append(connectedNodes, node)
}
return connectedNodes, nil
}

@ -31,7 +31,8 @@ import (
docker "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/v3/pkg/types"
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
dockercliopts "github.com/docker/cli/opts"
@ -93,13 +94,14 @@ func TranslateNodeToContainer(node *k3d.Node) (*NodeInDocker, error) {
// containerConfig.Volumes = map[string]struct{}{} // TODO: do we need this? We only used binds before
/* Ports */
exposedPorts, portBindings, err := nat.ParsePortSpecs(node.Ports)
if err != nil {
log.Errorf("Failed to parse port specs '%v'", node.Ports)
return nil, err
exposedPorts := nat.PortSet{}
for ep := range node.Ports {
if _, exists := exposedPorts[ep]; !exists {
exposedPorts[ep] = struct{}{}
}
}
containerConfig.ExposedPorts = exposedPorts
hostConfig.PortBindings = portBindings
hostConfig.PortBindings = node.Ports
/* Network */
networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
node.Network: {},
@ -133,11 +135,20 @@ func TranslateContainerToNode(cont *types.Container) (*k3d.Node, error) {
// TranslateContainerDetailsToNode translates a docker containerJSON object into a k3d node representation
func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d.Node, error) {
// translate portMap to string representation
ports := []string{}
for containerPort, portBindingList := range containerDetails.HostConfig.PortBindings {
for _, hostInfo := range portBindingList {
ports = append(ports, fmt.Sprintf("%s:%s:%s", hostInfo.HostIP, hostInfo.HostPort, containerPort))
// first, make sure, that it's actually a k3d managed container by checking if it has all the default labels
for k, v := range k3d.DefaultObjectLabels {
log.Tracef("TranslateContainerDetailsToNode: Checking for default object label %s=%s", k, v)
found := false
for lk, lv := range containerDetails.Config.Labels {
if lk == k && lv == v {
found = true
break
}
}
if !found {
log.Debugf("Container %s is missing default label %s=%s in label set %+v", containerDetails.Name, k, v, containerDetails.Config.Labels)
return nil, runtimeErr.ErrRuntimeContainerUnknown
}
}
@ -157,13 +168,14 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
// serverOpts
serverOpts := k3d.ServerOpts{IsInit: false}
serverOpts.KubeAPI = &k3d.ExposureOpts{}
for k, v := range containerDetails.Config.Labels {
if k == k3d.LabelServerAPIHostIP {
serverOpts.ExposeAPI.HostIP = v
serverOpts.KubeAPI.Binding.HostIP = v
} else if k == k3d.LabelServerAPIHost {
serverOpts.ExposeAPI.Host = v
serverOpts.KubeAPI.Host = v
} else if k == k3d.LabelServerAPIPort {
serverOpts.ExposeAPI.Port = v
serverOpts.KubeAPI.Binding.HostPort = v
}
}
@ -197,7 +209,7 @@ func TranslateContainerDetailsToNode(containerDetails types.ContainerJSON) (*k3d
Env: env,
Cmd: containerDetails.Config.Cmd,
Args: []string{}, // empty, since Cmd already contains flags
Ports: ports,
Ports: containerDetails.HostConfig.PortBindings,
Restart: restart,
Created: containerDetails.Created,
Labels: labels,

@ -30,7 +30,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
func TestTranslateNodeToContainer(t *testing.T) {
@ -43,7 +43,14 @@ func TestTranslateNodeToContainer(t *testing.T) {
Env: []string{"TEST_KEY_1=TEST_VAL_1"},
Cmd: []string{"server", "--https-listen-port=6443"},
Args: []string{"--some-boolflag"},
Ports: []string{"0.0.0.0:6443:6443/tcp"},
Ports: nat.PortMap{
"6443/tcp": []nat.PortBinding{
{
HostIP: "0.0.0.0",
HostPort: "6443",
},
},
},
Restart: true,
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
}
@ -58,7 +65,7 @@ func TestTranslateNodeToContainer(t *testing.T) {
Cmd: []string{"server", "--https-listen-port=6443", "--some-boolflag"},
Labels: map[string]string{k3d.LabelRole: string(k3d.ServerRole), "test_key_1": "test_val_1"},
ExposedPorts: nat.PortSet{
"6443/tcp": {},
"6443/tcp": struct{}{},
},
},
HostConfig: container.HostConfig{

@ -22,6 +22,8 @@ THE SOFTWARE.
package docker
import (
"archive/tar"
"bytes"
"context"
"fmt"
@ -29,7 +31,7 @@ import (
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -88,3 +90,48 @@ func (d Docker) CopyToNode(ctx context.Context, src string, dest string, node *k
return docker.CopyToContainer(ctx, container.ID, destDir, preparedArchive, types.CopyToContainerOptions{AllowOverwriteDirWithFile: false})
}
// WriteToNode writes a byte array to the selected node
func (d Docker) WriteToNode(ctx context.Context, content []byte, dest string, node *k3d.Node) error {
nodeContainer, err := getNodeContainer(ctx, node)
if err != nil {
return fmt.Errorf("Failed to find container for node '%s': %+v", node.Name, err)
}
// create docker client
docker, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Errorln("Failed to create docker client")
return err
}
defer docker.Close()
buf := new(bytes.Buffer)
tarWriter := tar.NewWriter(buf)
defer tarWriter.Close()
tarHeader := &tar.Header{
Name: dest,
Mode: 0644,
Size: int64(len(content)),
}
if err := tarWriter.WriteHeader(tarHeader); err != nil {
return fmt.Errorf("Failed to write tar header: %+v", err)
}
if _, err := tarWriter.Write(content); err != nil {
return fmt.Errorf("Failed to write tar content: %+v", err)
}
if err := tarWriter.Close(); err != nil {
log.Debugf("Failed to close tar writer: %+v", err)
}
tarBytes := bytes.NewReader(buf.Bytes())
if err := docker.CopyToContainer(ctx, nodeContainer.ID, "/", tarBytes, types.CopyToContainerOptions{AllowOverwriteDirWithFile: true}); err != nil {
return fmt.Errorf("Failed to copy content to container '%s': %+v", nodeContainer.ID, err)
}
return nil
}

@ -28,7 +28,7 @@ import (
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
k3d "github.com/rancher/k3d/v3/pkg/types"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
@ -45,11 +45,12 @@ func (d Docker) CreateVolume(ctx context.Context, name string, labels map[string
// (1) create volume
volumeCreateOptions := volume.VolumeCreateBody{
Name: name,
Labels: k3d.DefaultObjectLabels,
Labels: labels,
Driver: "local", // TODO: allow setting driver + opts
DriverOpts: map[string]string{},
}
for k, v := range labels {
for k, v := range k3d.DefaultObjectLabels {
volumeCreateOptions.Labels[k] = v
}

@ -0,0 +1,30 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package runtimes
import "errors"
// ErrRuntimeNetworkNotEmpty describes an error that occurs because a network still has containers connected to it (e.g. cannot be deleted)
var ErrRuntimeNetworkNotEmpty = errors.New("network not empty")
// ErrRuntimeContainerUnknown describes the situation, where we're inspecting a container that's not obviously managed by k3d
var ErrRuntimeContainerUnknown = errors.New("container not managed by k3d: missing default label(s)")

@ -29,9 +29,9 @@ import (
"net"
"time"
"github.com/rancher/k3d/v3/pkg/runtimes/containerd"
"github.com/rancher/k3d/v3/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v4/pkg/runtimes/containerd"
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
// SelectedRuntime is a runtime (pun intended) variable determining the selected runtime
@ -51,15 +51,16 @@ var Runtimes = map[string]Runtime{
// Runtime defines an interface that can be implemented for various container runtime environments (docker, containerd, etc.)
type Runtime interface {
CreateNode(context.Context, *k3d.Node) error
CreateNode(context.Context, *k3d.Node) error // Creates a node container, but does not start it
DeleteNode(context.Context, *k3d.Node) error
GetNodesByLabel(context.Context, map[string]string) ([]*k3d.Node, error)
GetNode(context.Context, *k3d.Node) (*k3d.Node, error)
GetNodeStatus(context.Context, *k3d.Node) (bool, string, error)
GetNodesInNetwork(context.Context, string) ([]*k3d.Node, error)
CreateNetworkIfNotPresent(context.Context, string) (string, bool, error) // @return NETWORK_NAME, EXISTS, ERROR
GetKubeconfig(context.Context, *k3d.Node) (io.ReadCloser, error)
DeleteNetwork(context.Context, string) error
StartNode(context.Context, *k3d.Node) error
StartNode(context.Context, *k3d.Node) error // starts an existing container
StopNode(context.Context, *k3d.Node) error
CreateVolume(context.Context, string, map[string]string) error
DeleteVolume(context.Context, string) error
@ -69,8 +70,11 @@ type Runtime interface {
ExecInNodeGetLogs(context.Context, *k3d.Node, []string) (*bufio.Reader, error)
GetNodeLogs(context.Context, *k3d.Node, time.Time) (io.ReadCloser, error)
GetImages(context.Context) ([]string, error)
CopyToNode(context.Context, string, string, *k3d.Node) error
CopyToNode(context.Context, string, string, *k3d.Node) error // @param context, source, destination, node
WriteToNode(context.Context, []byte, string, *k3d.Node) error // @param context, content, destination, node
GetHostIP(context.Context, string) (net.IP, error)
ConnectNodeToNetwork(context.Context, *k3d.Node, string) error // @param context, node, network name
DisconnectNodeFromNetwork(context.Context, *k3d.Node, string) error // @param context, node, network name
}
// GetRuntime checks, if a given name is represented by an implemented k3d runtime and returns it

@ -30,10 +30,10 @@ import (
"sync"
"time"
k3dc "github.com/rancher/k3d/v3/pkg/cluster"
"github.com/rancher/k3d/v3/pkg/runtimes"
k3d "github.com/rancher/k3d/v3/pkg/types"
"github.com/rancher/k3d/v3/version"
k3dc "github.com/rancher/k3d/v4/pkg/client"
"github.com/rancher/k3d/v4/pkg/runtimes"
k3d "github.com/rancher/k3d/v4/pkg/types"
"github.com/rancher/k3d/v4/version"
log "github.com/sirupsen/logrus"
)
@ -212,7 +212,7 @@ func startToolsNode(ctx context.Context, runtime runtimes.Runtime, cluster *k3d.
Labels: k3d.DefaultObjectLabels,
}
node.Labels[k3d.LabelClusterName] = cluster.Name
if err := runtime.CreateNode(ctx, node); err != nil {
if err := k3dc.NodeRun(ctx, runtime, node, k3d.NodeCreateOpts{}); err != nil {
log.Errorf("Failed to create tools container for cluster '%s'", cluster.Name)
return node, err
}

@ -0,0 +1,81 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package k3s
/*
* Copied from https://github.com/k3s-io/k3s/blob/cf8c101b705c7af20e2ed11df43beb4951e6d9dc/pkg/agent/templates/registry.go
* .. to avoid pulling in k3s as a dependency
*/
// Mirror contains the config related to the registry mirror
type Mirror struct {
// Endpoints are endpoints for a namespace. CRI plugin will try the endpoints
// one by one until a working one is found. The endpoint must be a valid url
// with host specified.
// The scheme, host and path from the endpoint URL will be used.
Endpoints []string `toml:"endpoint" yaml:"endpoint"`
}
// AuthConfig contains the config related to authentication to a specific registry
type AuthConfig struct {
// Username is the username to login the registry.
Username string `toml:"username" yaml:"username"`
// Password is the password to login the registry.
Password string `toml:"password" yaml:"password"`
// Auth is a base64 encoded string from the concatenation of the username,
// a colon, and the password.
Auth string `toml:"auth" yaml:"auth"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `toml:"identitytoken" yaml:"identity_token"`
}
// TLSConfig contains the CA/Cert/Key used for a registry
type TLSConfig struct {
CAFile string `toml:"ca_file" yaml:"ca_file"`
CertFile string `toml:"cert_file" yaml:"cert_file"`
KeyFile string `toml:"key_file" yaml:"key_file"`
InsecureSkipVerify bool `toml:"insecure_skip_verify" yaml:"insecure_skip_verify"`
}
// Registry is registry settings configured
type Registry struct {
// Mirrors are namespace to mirror mapping for all namespaces.
Mirrors map[string]Mirror `toml:"mirrors" yaml:"mirrors"`
// Configs are configs for each registry.
// The key is the FDQN or IP of the registry.
Configs map[string]RegistryConfig `toml:"configs" yaml:"configs"`
// Auths are registry endpoint to auth config mapping. The registry endpoint must
// be a valid url with host specified.
// DEPRECATED: Use Configs instead. Remove in containerd 1.4.
Auths map[string]AuthConfig `toml:"auths" yaml:"auths"`
}
// RegistryConfig contains configuration used to communicate with the registry.
type RegistryConfig struct {
// Auth contains information to authenticate to the registry.
Auth *AuthConfig `toml:"auth" yaml:"auth"`
// TLS is a pair of CA/Cert/Key which then are used when creating the transport
// that communicates with the registry.
TLS *TLSConfig `toml:"tls" yaml:"tls"`
}

@ -0,0 +1,94 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package k8s
/*
* Source: https://github.com/kubernetes/enhancements/blob/0d69f7cea6fbe73a7d70fab569c6898f5ccb7be0/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry/README.md#specification-for-localregistryhosting-v1
* Copied over: 07.01.2020
* Original License
* > Copyright 2020 The Kubernetes Authors
* >
* > Licensed under the Apache License, Version 2.0 (the "License");
* > you may not use this file except in compliance with the License.
* > You may obtain a copy of the License at
* >
* > http://www.apache.org/licenses/LICENSE-2.0
* >
* > Unless required by applicable law or agreed to in writing, software
* > distributed under the License is distributed on an "AS IS" BASIS,
* > WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* > See the License for the specific language governing permissions and
* > limitations under the License.
*/
// LocalRegistryHostingV1 describes a local registry that developer tools can
// connect to. A local registry allows clients to load images into the local
// cluster by pushing to this registry.
type LocalRegistryHostingV1 struct {
// Host documents the host (hostname and port) of the registry, as seen from
// outside the cluster.
//
// This is the registry host that tools outside the cluster should push images
// to.
Host string `yaml:"host,omitempty" json:"host,omitempty"`
// HostFromClusterNetwork documents the host (hostname and port) of the
// registry, as seen from networking inside the container pods.
//
// This is the registry host that tools running on pods inside the cluster
// should push images to. If not set, then tools inside the cluster should
// assume the local registry is not available to them.
HostFromClusterNetwork string `yaml:"hostFromClusterNetwork,omitempty" json:"hostFromClusterNetwork,omitempty"`
// HostFromContainerRuntime documents the host (hostname and port) of the
// registry, as seen from the cluster's container runtime.
//
// When tools apply Kubernetes objects to the cluster, this host should be
// used for image name fields. If not set, users of this field should use the
// value of Host instead.
//
// Note that it doesn't make sense semantically to define this field, but not
// define Host or HostFromClusterNetwork. That would imply a way to pull
// images without a way to push images.
HostFromContainerRuntime string `yaml:"hostFromContainerRuntime,omitempty" json:"hostFromContainerRuntime,omitempty"`
// Help contains a URL pointing to documentation for users on how to set
// up and configure a local registry.
//
// Tools can use this to nudge users to enable the registry. When possible,
// the writer should use as permanent a URL as possible to prevent drift
// (e.g., a version control SHA).
//
// When image pushes to a registry host specified in one of the other fields
// fail, the tool should display this help URL to the user. The help URL
// should contain instructions on how to diagnose broken or misconfigured
// registries.
Help string `yaml:"help,omitempty" json:"help,omitempty"`
}
// LocalRegistryHosting defaults
const (
LocalRegistryHostingNamespace = "kube-public"
LocalRegistryHostingName = "local-registry-hosting"
LocalRegistryHostingData = "localRegistryHosting.v1"
)

@ -22,8 +22,11 @@ THE SOFTWARE.
package types
import (
"context"
"fmt"
"time"
"github.com/docker/go-connections/nat"
)
// DefaultClusterName specifies the default name used for newly created clusters
@ -44,6 +47,12 @@ const DefaultLBImageRepo = "docker.io/rancher/k3d-proxy"
// DefaultToolsImageRepo defines the default image used for the tools container
const DefaultToolsImageRepo = "docker.io/rancher/k3d-tools"
// DefaultRegistryImageRepo defines the default image used for the k3d-managed registry
const DefaultRegistryImageRepo = "docker.io/library/registry"
// DefaultRegistryImageTag defines the default image tag used for the k3d-managed registry
const DefaultRegistryImageTag = "2"
// DefaultObjectNamePrefix defines the name prefix for every object created by k3d
const DefaultObjectNamePrefix = "k3d"
@ -52,8 +61,12 @@ var ReadyLogMessageByRole = map[Role]string{
ServerRole: "k3s is up and running",
AgentRole: "Successfully registered node",
LoadBalancerRole: "start worker processes",
RegistryRole: "listening on",
}
// NodeWaitForLogMessageRestartWarnTime is the time after which to warn about a restarting container
const NodeWaitForLogMessageRestartWarnTime = 2 * time.Minute
// NodeStatusRestarting defines the status string that signals the node container is restarting
const NodeStatusRestarting = "restarting"
@ -66,6 +79,7 @@ const (
AgentRole Role = "agent"
NoRole Role = "noRole"
LoadBalancerRole Role = "loadbalancer"
RegistryRole Role = "registry"
)
// NodeRoles defines the roles available for nodes
@ -73,6 +87,19 @@ var NodeRoles = map[string]Role{
string(ServerRole): ServerRole,
string(AgentRole): AgentRole,
string(LoadBalancerRole): LoadBalancerRole,
string(RegistryRole): RegistryRole,
}
// ClusterInternalNodeRoles is a list of roles for nodes that belong to a cluster
var ClusterInternalNodeRoles = []Role{
ServerRole,
AgentRole,
LoadBalancerRole,
}
// ClusterExternalNodeRoles is a list of roles for nodes that do not belong to a specific cluster
var ClusterExternalNodeRoles = []Role{
RegistryRole,
}
// DefaultObjectLabels specifies a set of labels that will be attached to k3d objects by default
@ -82,16 +109,20 @@ var DefaultObjectLabels = map[string]string{
// List of k3d technical label name
const (
LabelClusterName string = "k3d.cluster"
LabelClusterURL string = "k3d.cluster.url"
LabelClusterToken string = "k3d.cluster.token"
LabelImageVolume string = "k3d.cluster.imageVolume"
LabelNetworkExternal string = "k3d.cluster.network.external"
LabelNetwork string = "k3d.cluster.network"
LabelRole string = "k3d.role"
LabelServerAPIPort string = "k3d.server.api.port"
LabelServerAPIHost string = "k3d.server.api.host"
LabelServerAPIHostIP string = "k3d.server.api.hostIP"
LabelClusterName string = "k3d.cluster"
LabelClusterURL string = "k3d.cluster.url"
LabelClusterToken string = "k3d.cluster.token"
LabelImageVolume string = "k3d.cluster.imageVolume"
LabelNetworkExternal string = "k3d.cluster.network.external"
LabelNetwork string = "k3d.cluster.network"
LabelRole string = "k3d.role"
LabelServerAPIPort string = "k3d.server.api.port"
LabelServerAPIHost string = "k3d.server.api.host"
LabelServerAPIHostIP string = "k3d.server.api.hostIP"
LabelRegistryHost string = "k3d.registry.host"
LabelRegistryHostIP string = "k3d.registry.hostIP"
LabelRegistryPortExternal string = "k3s.registry.port.external"
LabelRegistryPortInternal string = "k3s.registry.port.internal"
)
// DefaultRoleCmds maps the node roles to their respective default commands
@ -136,32 +167,62 @@ var DoNotCopyServerFlags = []string{
// ClusterCreateOpts describe a set of options one can set when creating a cluster
type ClusterCreateOpts struct {
PrepDisableHostIPInjection bool
DisableImageVolume bool
WaitForServer bool
Timeout time.Duration
DisableLoadBalancer bool
K3sServerArgs []string
K3sAgentArgs []string
GPURequest string
PrepDisableHostIPInjection bool `yaml:"prepDisableHostIPInjection" json:"prepDisableHostIPInjection,omitempty"`
DisableImageVolume bool `yaml:"disableImageVolume" json:"disableImageVolume,omitempty"`
WaitForServer bool `yaml:"waitForServer" json:"waitForServer,omitempty"`
Timeout time.Duration `yaml:"timeout" json:"timeout,omitempty"`
DisableLoadBalancer bool `yaml:"disableLoadbalancer" json:"disableLoadbalancer,omitempty"`
K3sServerArgs []string `yaml:"k3sServerArgs" json:"k3sServerArgs,omitempty"`
K3sAgentArgs []string `yaml:"k3sAgentArgs" json:"k3sAgentArgs,omitempty"`
GPURequest string `yaml:"gpuRequest" json:"gpuRequest,omitempty"`
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
GlobalLabels map[string]string `yaml:"globalLabels,omitempty" json:"globalLabels,omitempty"`
GlobalEnv []string `yaml:"globalEnv,omitempty" json:"globalEnv,omitempty"`
Registries struct {
Create *Registry `yaml:"create,omitempty" json:"create,omitempty"`
Use []*Registry `yaml:"use,omitempty" json:"use,omitempty"`
} `yaml:"registries,omitempty" json:"registries,omitempty"`
}
// NodeHook is an action that is bound to a specifc stage of a node lifecycle
type NodeHook struct {
Stage LifecycleStage `yaml:"stage,omitempty" json:"stage,omitempty"`
Action NodeHookAction `yaml:"action,omitempty" json:"action,omitempty"`
}
// LifecycleStage defines descriptors for specific stages in the lifecycle of a node or cluster object
type LifecycleStage string
// all defined lifecyclestages
const (
LifecycleStagePreStart LifecycleStage = "preStart"
LifecycleStagePostStart LifecycleStage = "postStart"
)
// ClusterStartOpts describe a set of options one can set when (re-)starting a cluster
type ClusterStartOpts struct {
WaitForServer bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
}
// NodeCreateOpts describes a set of options one can set when creating a new node
type NodeCreateOpts struct {
Wait bool
Timeout time.Duration
Wait bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
}
// NodeStartOpts describes a set of options one can set when (re-)starting a node
type NodeStartOpts struct {
Wait bool
Timeout time.Duration
Wait bool
Timeout time.Duration
NodeHooks []NodeHook `yaml:"nodeHooks,omitempty" json:"nodeHooks,omitempty"`
}
// NodeHookAction is an interface to implement actions that should trigger at specific points of the node lifecycle
type NodeHookAction interface {
Run(ctx context.Context, node *Node) error
}
// ImageImportOpts describes a set of options one can set for loading image(s) into cluster(s)
@ -179,14 +240,13 @@ type ClusterNetwork struct {
type Cluster struct {
Name string `yaml:"name" json:"name,omitempty"`
Network ClusterNetwork `yaml:"network" json:"network,omitempty"`
Token string `yaml:"cluster_token" json:"clusterToken,omitempty"`
Token string `yaml:"clusterToken" json:"clusterToken,omitempty"`
Nodes []*Node `yaml:"nodes" json:"nodes,omitempty"`
InitNode *Node // init server node
ExternalDatastore *ExternalDatastore `yaml:"external_datastore,omitempty" json:"externalDatastore,omitempty"`
CreateClusterOpts *ClusterCreateOpts `yaml:"options,omitempty" json:"options,omitempty"`
ExposeAPI ExposeAPI `yaml:"expose_api" json:"exposeAPI,omitempty"`
ServerLoadBalancer *Node `yaml:"server_loadbalancer,omitempty" json:"serverLoadBalancer,omitempty"`
ImageVolume string `yaml:"image_volume" json:"imageVolume,omitempty"`
ExternalDatastore *ExternalDatastore `yaml:"externalDatastore,omitempty" json:"externalDatastore,omitempty"`
KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI,omitempty"`
ServerLoadBalancer *Node `yaml:"serverLoadbalancer,omitempty" json:"serverLoadBalancer,omitempty"`
ImageVolume string `yaml:"imageVolume" json:"imageVolume,omitempty"`
}
// ServerCountRunning returns the number of server nodes running in the cluster and the total number
@ -237,41 +297,40 @@ type Node struct {
Volumes []string `yaml:"volumes" json:"volumes,omitempty"`
Env []string `yaml:"env" json:"env,omitempty"`
Cmd []string // filled automatically based on role
Args []string `yaml:"extra_args" json:"extraArgs,omitempty"`
Ports []string `yaml:"port_mappings" json:"portMappings,omitempty"`
Args []string `yaml:"extraArgs" json:"extraArgs,omitempty"`
Ports nat.PortMap `yaml:"portMappings" json:"portMappings,omitempty"`
Restart bool `yaml:"restart" json:"restart,omitempty"`
Created string `yaml:"created" json:"created,omitempty"`
Labels map[string]string // filled automatically
Network string // filled automatically
ExtraHosts []string // filled automatically
ServerOpts ServerOpts `yaml:"server_opts" json:"serverOpts,omitempty"`
AgentOpts AgentOpts `yaml:"agent_opts" json:"agentOpts,omitempty"`
ServerOpts ServerOpts `yaml:"serverOpts" json:"serverOpts,omitempty"`
AgentOpts AgentOpts `yaml:"agentOpts" json:"agentOpts,omitempty"`
GPURequest string // filled automatically
State NodeState // filled automatically
}
// ServerOpts describes some additional server role specific opts
type ServerOpts struct {
IsInit bool `yaml:"is_initializing_server" json:"isInitializingServer,omitempty"`
ExposeAPI ExposeAPI // filled automatically
IsInit bool `yaml:"isInitializingServer" json:"isInitializingServer,omitempty"`
KubeAPI *ExposureOpts `yaml:"kubeAPI" json:"kubeAPI"`
}
// ExposureOpts describes settings that the user can set for accessing the Kubernetes API
type ExposureOpts struct {
nat.PortMapping // filled automatically (reference to normal portmapping)
Host string `yaml:"host,omitempty" json:"host,omitempty"`
}
// ExternalDatastore describes an external datastore used for HA/multi-server clusters
type ExternalDatastore struct {
Endpoint string `yaml:"endpoint" json:"endpoint,omitempty"`
CAFile string `yaml:"ca_file" json:"caFile,omitempty"`
CertFile string `yaml:"cert_file" json:"certFile,omitempty"`
KeyFile string `yaml:"key_file" json:"keyFile,omitempty"`
CAFile string `yaml:"caFile" json:"caFile,omitempty"`
CertFile string `yaml:"certFile" json:"certFile,omitempty"`
KeyFile string `yaml:"keyFile" json:"keyFile,omitempty"`
Network string `yaml:"network" json:"network,omitempty"`
}
// ExposeAPI describes specs needed to expose the API-Server
type ExposeAPI struct {
Host string `yaml:"host" json:"host,omitempty"`
HostIP string `yaml:"host_ip" json:"hostIP,omitempty"`
Port string `yaml:"port" json:"port"`
}
// AgentOpts describes some additional agent role specific opts
type AgentOpts struct{}
@ -285,3 +344,42 @@ type NodeState struct {
Running bool
Status string
}
/*
* Registry
*/
// Registry Defaults
const (
DefaultRegistryPort = "5000"
DefaultRegistryName = DefaultObjectNamePrefix + "-registry"
DefaultRegistriesFilePath = "/etc/rancher/k3s/registries.yaml"
DefaultRegistryMountPath = "/var/lib/registry"
DefaultDockerHubAddress = "registry-1.docker.io"
)
// Registry describes a k3d-managed registry
type Registry struct {
ClusterRef string // filled automatically -> if created with a cluster
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Image string `yaml:"image,omitempty" json:"image,omitempty"`
ExposureOpts ExposureOpts `yaml:"expose" json:"expose"`
Options struct {
ConfigFile string `yaml:"configFile,omitempty" json:"configFile,omitempty"`
Proxy struct {
RemoteURL string `yaml:"remoteURL" json:"remoteURL"`
Username string `yaml:"username,omitempty" json:"username,omitempty"`
Password string `yaml:"password,omitempty" json:"password,omitempty"`
} `yaml:"proxy,omitempty" json:"proxy,omitempty"`
} `yaml:"options,omitempty" json:"options,omitempty"`
}
// RegistryExternal describes a minimal spec for an "external" registry
// "external" meaning, that it's unrelated to the current cluster
// e.g. used for the --registry-use flag registry reference
type RegistryExternal struct {
Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` // default: http
Host string `yaml:"host" json:"host"`
Port string `yaml:"port" json:"port"`
}

@ -0,0 +1,190 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"fmt"
"regexp"
"strconv"
"strings"
k3d "github.com/rancher/k3d/v4/pkg/types"
log "github.com/sirupsen/logrus"
)
// Regexp pattern to match node filters
var filterRegexp = regexp.MustCompile(`^(?P<group>server|agent|loadbalancer|all)(?P<subsetSpec>\[(?P<subset>(?P<subsetList>(\d+,?)+)|(?P<subsetRange>\d*:\d*)|(?P<subsetWildcard>\*))\])?$`)
// FilterNodes takes a string filter to return a filtered list of nodes
func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
if len(filters) == 0 || len(filters[0]) == 0 {
log.Warnln("No node filter specified")
return nodes, nil
}
// map roles to subsets
serverNodes := []*k3d.Node{}
agentNodes := []*k3d.Node{}
var serverlb *k3d.Node
for _, node := range nodes {
if node.Role == k3d.ServerRole {
serverNodes = append(serverNodes, node)
} else if node.Role == k3d.AgentRole {
agentNodes = append(agentNodes, node)
} else if node.Role == k3d.LoadBalancerRole {
serverlb = node
}
}
filteredNodes := []*k3d.Node{}
set := make(map[*k3d.Node]struct{})
// range over all instances of group[subset] specs
for _, filter := range filters {
// match regex with capturing groups
match := filterRegexp.FindStringSubmatch(filter)
if len(match) == 0 {
return nil, fmt.Errorf("Failed to parse node filters: invalid format or empty subset in '%s'", filter)
}
// map capturing group names to submatches
submatches := MapSubexpNames(filterRegexp.SubexpNames(), match)
// if one of the filters is 'all', we only return this and drop all others
if submatches["group"] == "all" {
// TODO: filterNodes: only log if really more than one is specified
log.Warnf("Node filter 'all' set, but more were specified in '%+v'", filters)
return nodes, nil
}
// Choose the group of nodes to operate on
groupNodes := []*k3d.Node{}
if submatches["group"] == string(k3d.ServerRole) {
groupNodes = serverNodes
} else if submatches["group"] == string(k3d.AgentRole) {
groupNodes = agentNodes
} else if submatches["group"] == string(k3d.LoadBalancerRole) {
if serverlb == nil {
return nil, fmt.Errorf("Node filter '%s' targets a node that does not exist (disabled?)", filter)
}
filteredNodes = append(filteredNodes, serverlb)
return filteredNodes, nil // early exit if filtered group is the loadbalancer
}
/* Option 1) subset defined by list */
if submatches["subsetList"] != "" {
for _, index := range strings.Split(submatches["subsetList"], ",") {
if index != "" {
num, err := strconv.Atoi(index)
if err != nil {
return nil, fmt.Errorf("Failed to convert subset number to integer in '%s'", filter)
}
if num < 0 || num >= len(groupNodes) {
return nil, fmt.Errorf("Index out of range: index '%d' < 0 or > number of available nodes in filter '%s'", num, filter)
}
if _, exists := set[groupNodes[num]]; !exists {
filteredNodes = append(filteredNodes, groupNodes[num])
set[groupNodes[num]] = struct{}{}
}
}
}
/* Option 2) subset defined by range */
} else if submatches["subsetRange"] != "" {
/*
* subset specified by a range 'START:END', where each side is optional
*/
split := strings.Split(submatches["subsetRange"], ":")
if len(split) != 2 {
return nil, fmt.Errorf("Failed to parse subset range in '%s'", filter)
}
start := 0
end := len(groupNodes) - 1
var err error
if split[0] != "" {
start, err = strconv.Atoi(split[0])
if err != nil {
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
}
if start < 0 || start >= len(groupNodes) {
return nil, fmt.Errorf("Invalid subset range: start < 0 or > number of available nodes in '%s'", filter)
}
}
if split[1] != "" {
end, err = strconv.Atoi(split[1])
if err != nil {
return nil, fmt.Errorf("Failed to convert subset range start to integer in '%s'", filter)
}
if end < start || end >= len(groupNodes) {
return nil, fmt.Errorf("Invalid subset range: end < start or > number of available nodes in '%s'", filter)
}
}
for i := start; i <= end; i++ {
if _, exists := set[groupNodes[i]]; !exists {
filteredNodes = append(filteredNodes, groupNodes[i])
set[groupNodes[i]] = struct{}{}
}
}
/* Option 3) subset defined by wildcard */
} else if submatches["subsetWildcard"] == "*" {
/*
* '*' = all nodes
*/
for _, node := range groupNodes {
if _, exists := set[node]; !exists {
filteredNodes = append(filteredNodes, node)
set[node] = struct{}{}
}
}
/* Option X) invalid/unknown subset */
} else {
return nil, fmt.Errorf("Failed to parse node specifiers: unknown subset in '%s'", filter)
}
}
return filteredNodes, nil
}
// FilterNodesByRole returns a stripped list of nodes which do match the given role
func FilterNodesByRole(nodes []*k3d.Node, role k3d.Role) []*k3d.Node {
filteredNodes := []*k3d.Node{}
for _, node := range nodes {
if node.Role == role {
filteredNodes = append(filteredNodes, node)
}
}
return filteredNodes
}

@ -0,0 +1,39 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"strings"
)
// SplitLabelKeyValue separates the label key from the label value (if any)
func SplitLabelKeyValue(label string) (string, string) {
// split only on first '=' sign (like `docker run` do)
labelSlice := strings.SplitN(label, "=", 2)
if len(labelSlice) > 1 {
return labelSlice[0], labelSlice[1]
}
// defaults to label key with empty value (like `docker run` do)
return label, ""
}

@ -0,0 +1,47 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"net"
log "github.com/sirupsen/logrus"
)
// GetFreePort tries to fetch an open port from the OS-Kernel
func GetFreePort() (int, error) {
tcpAddress, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
log.Errorln("Failed to resolve address")
return 0, err
}
tcpListener, err := net.ListenTCP("tcp", tcpAddress)
if err != nil {
log.Errorln("Failed to create TCP Listener")
return 0, err
}
defer tcpListener.Close()
return tcpListener.Addr().(*net.TCPAddr).Port, nil
}

@ -0,0 +1,60 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"fmt"
"regexp"
"github.com/docker/go-connections/nat"
k3d "github.com/rancher/k3d/v4/pkg/types"
)
var registryRefRegexp = regexp.MustCompile(`^(?P<protocol>http:\/\/|https:\/\/)?(?P<hostref>(?P<hostip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|(?P<hostname>[a-zA-Z\-\.0-9]+)){1}?((:)(?P<internalport>\d{1,5}))?((:)(?P<externalport>\d{1,5}))?$`)
// ParseRegistryRef returns a registry struct parsed from a simplified definition string
func ParseRegistryRef(registryRef string) (*k3d.Registry, error) {
match := registryRefRegexp.FindStringSubmatch(registryRef)
if len(match) == 0 {
return nil, fmt.Errorf("Failed to parse registry reference %s: Must be [proto://]host[:port]", registryRef)
}
submatches := MapSubexpNames(registryRefRegexp.SubexpNames(), match)
registry := &k3d.Registry{
Host: submatches["hostref"],
Protocol: submatches["protocol"],
ExposureOpts: k3d.ExposureOpts{},
}
registry.ExposureOpts.Host = submatches["hostref"]
if submatches["port"] != "" {
registry.ExposureOpts.PortMapping = nat.PortMapping{
Port: nat.Port(fmt.Sprintf("%s/tcp", submatches["internalport"])),
Binding: nat.PortBinding{
HostPort: submatches["externalport"],
},
}
}
return registry, nil
}

@ -0,0 +1,92 @@
/*
Copyright © 2020 The k3d Author(s)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package util
import (
"fmt"
"os"
"strings"
log "github.com/sirupsen/logrus"
"github.com/rancher/k3d/v4/pkg/runtimes"
)
// ValidateVolumeMount checks, if the source of volume mounts exists and if the destination is an absolute path
// - SRC: source directory/file -> tests: must exist
// - DEST: source directory/file -> tests: must be absolute path
func ValidateVolumeMount(runtime runtimes.Runtime, volumeMount string) error {
src := ""
dest := ""
// validate 'SRC[:DEST]' substring
split := strings.Split(volumeMount, ":")
if len(split) < 1 {
return fmt.Errorf("No volume/path specified")
}
if len(split) > 3 {
return fmt.Errorf("Invalid volume mount '%s': maximal 2 ':' allowed", volumeMount)
}
// we only have SRC specified -> DEST = SRC
if len(split) == 1 {
src = split[0]
dest = src
} else {
src = split[0]
dest = split[1]
}
// verify that the source exists
if src != "" {
// a) named volume
isNamedVolume := true
if err := verifyNamedVolume(runtime, src); err != nil {
isNamedVolume = false
}
if !isNamedVolume {
if _, err := os.Stat(src); err != nil {
log.Warnf("Failed to stat file/directory/named volume that you're trying to mount: '%s' in '%s' -> Please make sure it exists", src, volumeMount)
}
}
}
// verify that the destination is an absolute path
if !strings.HasPrefix(dest, "/") {
return fmt.Errorf("Volume mount destination doesn't appear to be an absolute path: '%s' in '%s'", dest, volumeMount)
}
return nil
}
// verifyNamedVolume checks whether a named volume exists in the runtime
func verifyNamedVolume(runtime runtimes.Runtime, volumeName string) error {
volumeName, err := runtime.GetVolume(volumeName)
if err != nil {
return err
}
if volumeName == "" {
return fmt.Errorf("Failed to find named volume '%s'", volumeName)
}
return nil
}

@ -1,14 +1,12 @@
FROM nginx:1.16.0-alpine
FROM nginx:1.19-alpine
ARG CONFD_REPO=iwilltry42/confd
ARG CONFD_VERSION=0.16.1
ARG OS=linux
ARG ARCH=amd64
RUN echo "Building for '${OS}/${ARCH}'..." \
&& apk -U --no-cache add curl ca-certificates\
&& mkdir -p /etc/confd \
&& curl -sLf "https://github.com/${CONFD_REPO}/releases/download/v${CONFD_VERSION}/confd-${CONFD_VERSION}-${OS}-${ARCH}" > /usr/bin/confd \
&& chmod +x /usr/bin/confd \
&& apk del curl
&& wget "https://github.com/${CONFD_REPO}/releases/download/v${CONFD_VERSION}/confd-${CONFD_VERSION}-${OS}-${ARCH}" -O /usr/bin/confd \
&& chmod +x /usr/bin/confd
COPY templates /etc/confd/templates/
COPY conf.d /etc/confd/conf.d/

@ -0,0 +1,46 @@
apiVersion: k3d.io/v1alpha1
kind: Simple
name: test
servers: 1
agents: 2
exposeAPI:
hostIP: "0.0.0.0"
port: "6443"
image: rancher/k3s:latest
volumes:
- volume: /my/path:/some/path
nodeFilters:
- all
ports:
- port: 80:80
nodeFilters:
- loadbalancer
- port: 0.0.0.0:443:443
nodeFilters:
- loadbalancer
env:
- envVar: bar=baz
nodeFilters:
- all
labels:
- label: foo=bar
nodeFilters:
- server[0]
- loadbalancer
registries:
create: true
use: []
options:
k3d:
wait: true
timeout: "60s"
disableLoadbalancer: false
disableImageVolume: false
k3s:
extraServerArgs:
- --tls-san=127.0.0.1
extraAgentArgs: []
kubeconfig:
updateDefaultKubeconfig: true
switchCurrentContext: true

@ -68,7 +68,7 @@ check_url() {
check_clusters() {
[ -n "$EXE" ] || abort "EXE is not defined"
for c in "$@" ; do
$EXE kubeconfig merge "$c" --switch-context
$EXE kubeconfig merge "$c" --kubeconfig-switch-context
if kubectl cluster-info ; then
passed "cluster $c is reachable"
else
@ -94,7 +94,7 @@ check_cluster_count() {
check_multi_node() {
cluster=$1
expectedNodeCount=$2
$EXE kubeconfig merge "$cluster" --switch-context
$EXE kubeconfig merge "$cluster" --kubeconfig-switch-context
nodeCount=$(kubectl get nodes -o=custom-columns=NAME:.metadata.name --no-headers | wc -l)
if [[ $nodeCount == $expectedNodeCount ]]; then
passed "cluster $cluster has $expectedNodeCount nodes, as expected"
@ -163,4 +163,16 @@ wait_for_pod_exec() {
done
echo "Command '$2' in pod '$1' did NOT return successfully in $max_retries tries"
return 1
}
exec_in_node() {
# $1 = container/node name
# $2 = command
docker exec "$1" "$2"
}
docker_assert_container_label() {
# $1 = container/node name
# $2 = label to assert
docker inspect --format '{{ range $k, $v := .Config.Labels }}{{ printf "%s=%s\n" $k $v }}{{ end }}' "$1" | grep -E "^$2$"
}

@ -19,8 +19,10 @@ k3de2e=$(docker run -d \
-e EXE="$K3D_EXE" \
-e CI="true" \
-e LOG_LEVEL="$LOG_LEVEL" \
-e E2E_SKIP="$E2E_SKIP" \
-e E2E_INCLUDE="$E2E_INCLUDE" \
-e E2E_EXCLUDE="$E2E_EXCLUDE" \
-e E2E_EXTRA="$E2E_EXTRA" \
--add-host "k3d-registrytest-registry:127.0.0.1" \
--name "k3d-e2e-runner-$TIMESTAMP" \
"k3d:$K3D_IMAGE_TAG")
@ -45,7 +47,13 @@ until docker inspect "$k3de2e" | jq ".[0].State.Running" && docker logs "$k3de2e
done
# build helper container images
docker exec --workdir /src "$k3de2e" make build-helper-images
if [ -z "$E2E_HELPER_IMAGE_TAG" ]; then
docker exec --workdir /src "$k3de2e" make build-helper-images
# execute tests
docker exec "$k3de2e" /src/tests/runner.sh
else
# execute tests
docker exec -e "K3D_HELPER_IMAGE_TAG=$E2E_HELPER_IMAGE_TAG" "$k3de2e" /src/tests/runner.sh
fi
# execute tests
docker exec "$k3de2e" /src/tests/runner.sh

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save